Example #1
0
def  do_cnn(trainX, trainY,testX, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
    network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
Example #2
0
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
Example #3
0
def cnn():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    testX = testX.reshape([-1, 28, 28, 1])

    # Building convolutional network
    network = input_data(shape=[None, 28, 28, 1], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='cnn_demo')
Example #4
0
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
def createModel(nbClasses,imageSize):
	print("[+] Creating model...")
	convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

	convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = fully_connected(convnet, 1024, activation='elu')
	convnet = dropout(convnet, 0.5)

	convnet = fully_connected(convnet, nbClasses, activation='softmax')
	convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

	model = tflearn.DNN(convnet)
	print("    Model created! ✅")
	return model
Example #6
0
def do_cnn_doc2vec(trainX, testX, trainY, testY):
    global max_features
    print "CNN and doc2vec"

    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_features], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="review")
def build_model_anything_happening():
    ### IS ANY OF THIS NECESSARY FOR LIGHT/DARK? IN GENERAL W/ STAIONARY CAMERA?
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Specify shape of the data, image prep
    network = input_data(shape=[None, 52, 64],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    # Since the image position remains consistent and are fairly similar, this can be spatially aware.
    # Using a fully connected network directly, no need for convolution.
    network = fully_connected(network, 2048, activation='relu')
    network = fully_connected(network, 2, activation='softmax')

    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.00003)

    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Example #8
0
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
    print "CNN and doc2vec 2d"

    trainX = trainX.reshape([-1, max_features, max_document_length, 1])
    testX = testX.reshape([-1, max_features, max_document_length, 1])


    # Building convolutional network
    network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='review')
Example #9
0
def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
Example #10
0
def neural_network_model(input_size):
    """
    Function is to build NN based on the input size
    :param input_size: feature size of each observation
    :return: tensorflow model
    """
    network = input_data(shape=[None, input_size], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, learning_rate=LR,  name='targets')
    model = tflearn.DNN(network, tensorboard_dir='logs/ann/ann_0')

    return model
Example #11
0
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Example #12
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Example #13
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(Y[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
       snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
Example #14
0
def neural_network_model(input_size):

    network = input_data(shape=[None, input_size, 1], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=LR,
                         loss='categorical_crossentropy', name='targets')
    model = tflearn.DNN(network, tensorboard_dir='log')

    return model
 def build_network(self):
   # Building 'AlexNet'
   # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
   # https://github.com/DT42/squeezenet_demo
   # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
   print('[+] Building CNN')
   self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
   self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 5, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 3, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
   self.network = regression(self.network,
     optimizer = 'momentum',
     loss = 'categorical_crossentropy')
   self.model = tflearn.DNN(
     self.network,
     checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
     max_checkpoints = 1,
     tensorboard_verbose = 2
   )
   self.load_model()
def main():
    """

    :return:
    """
    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # set up training, testing, & validation partitions
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)

    # training params
    n_classes = 10
    n_training_steps = 1000
    training_step_size = 100
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00

    sdr_array_1h, hist = sdrs_to_one_hots(sdr_array, n_classes, True)

    train, test, validate = split_into_sets(len(sim_mat_array), training_percent,
                                            testing_percent, validation_percent)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1], name='input')
    network = conv_1d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    # network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = fully_connected(network, 128, activation='tanh')
    # network = dropout(network, 0.5)
    network = fully_connected(network, 512, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, n_classes, activation='softmax')
    # network = fully_connected(network, 1, activation='linear')
    network = regression(network, optimizer='adagrad', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    X = np.expand_dims(sim_mat_array, -1)
    Y = np.array(sdr_array_1h)
    # X = np.expand_dims([beat_spec_array[i] for i in train], -1)
    # Y = np.array([sdr_array_1h[i] for i in train])
    # testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    # testY = np.array([sdr_array[i] for i in test])

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
              validation_set=0.1,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
Example #17
0
def build_model():
	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=1, activation='tanh')
	network = fully_connected(network, 1, activation='linear')
	network = regression(network, optimizer='adam', learning_rate=0.001,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    print('Loading sim_mat and sdrs')
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
    print('sim_mat and sdrs loaded')

    print('splitting and grooming data')
    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    print('setting up CNN')
    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
    network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    print('running CNN')
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=10,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]

    print('plotting')
    plot(testY, predicted)
Example #19
0
def build_model():
	init = tf.truncated_normal_initializer(stddev=1e-4)

	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=2, activation='tanh', weights_init=init)
	network = fully_connected(network, 1, activation='tanh', weights_init=init)
	network = regression(network, optimizer='sgd', learning_rate=learning_rate,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
Example #20
0
def do_cnn(x,y):
    global max_document_length
    print "CNN and tf"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    #if not os.path.exists(pkl_file):
        # Training
    model.fit(trainX, trainY,
                  n_epoch=5, shuffle=True, validation_set=0.1,
                  show_metric=True, batch_size=100,run_id="webshell")
    #    model.save(pkl_file)
    #else:
    #    model.load(pkl_file)

    y_predict_list=model.predict(testX)
    #y_predict = list(model.predict(testX,as_iterable=True))

    y_predict=[]
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    print 'y_predict_list:'
    print y_predict_list
    print 'y_predict:'
    print  y_predict
    #print  y_test

    do_metrics(y_test, y_predict)
Example #21
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=100,
              snapshot_step=1000, show_metric=True, run_id='relus_100_3')

    predicted = np.array(model.predict(testX))[:,0]
    # pprint.pprint()
    print("Test MSE: ", np.square(testY - predicted).mean())
    plot(testY, predicted)
Example #22
0
def build_model_1_conv(learning_rate, input_shape, nb_classes, base_path , drop):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, 64, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='sgd', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
                        checkpoint_path=base_path + "/checkpoints/step")
    return model
Example #23
0
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
def main():
    pickle_folder = 'pickles_combined'


    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'

    # training params
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00
    beat_spec_max = 355


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)

    train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_max, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    start = time.time()
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=2000,
              snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
    elapsed = (time.time() - start)

    predicted = np.array(model.predict(testX))[:,0]
    print("Test MSE: ", np.square(testY - predicted).mean())
    print(elapsed, "seconds")
    plot(testY, predicted)
Example #25
0
def _model3():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    print('Model has been made!!!?')
    # Training
    model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10',
                        max_checkpoints=10, tensorboard_verbose=0,
                        clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Example #26
0
def build_network(image_size, batch_size=None, n_channels=3):
    network = input_data(shape=[batch_size, image_size[0], image_size[1], n_channels],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
    network = conv_2d(network, 16, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, num_classes, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    return network
    def generate_network(self):
        """ Return tflearn cnn network.
        """
        print(self.image_size, self.n_epoch, self.batch_size, self.person_ids)
        print(type(self.image_size), type(self.n_epoch),
              type(self.batch_size), type(self.person_ids))
        if not isinstance(self.image_size, list) \
            or not isinstance(self.n_epoch, int) \
            or not isinstance(self.batch_size, int) \
            or not isinstance(self.person_ids, list):
        # if self.image_size is None or self.n_epoch is None or \
        #     self.batch_size is None or self.person_ids is None:
            raise ValueError("Insufficient values to generate network.\n"
                             "Need (n_epoch, int), (batch_size, int),"
                             "(image_size, list), (person_ids, list).")

        # Real-time data preprocessing
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = ImageAugmentation()
        img_aug.add_random_rotation(max_angle=25.)
        img_aug.add_random_flip_leftright()

        # Convolutional network building
        network = input_data(
            shape=[None, self.image_size[0], self.image_size[1], 3],
            data_preprocessing=img_prep,
            data_augmentation=img_aug)
        network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = fully_connected(network, self.image_size[0] * 2**4,
                                  activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, self.person_num,
                                  activation='softmax')
        network = regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        return network
Example #28
0
def setup_model(checkpoint_path=None):
    """Sets up a deep belief network for image classification based on the set up described in

    :param checkpoint_path: string path describing prefix for model checkpoints
    :returns: Deep Neural Network
    :rtype: tflearn.DNN

    References:
        - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks

    Links:
        - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721

    """
     # Make sure the data is normalized
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create extra synthetic training data by flipping, rotating and blurring the
    # images on our data set.
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    # Input is a 32x32 image with 3 color channels (red, green and blue)
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    if checkpoint_path:
        model = tflearn.DNN(network, tensorboard_verbose=3,
                            checkpoint_path=checkpoint_path)
    else:
        model = tflearn.DNN(network, tensorboard_verbose=3)

    return model
def main():
    """

    :return:
    """
    pickle_folder = '../NMF/mfcc_pickles'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'mfcc_clusters'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                    feature, fg_or_bg, sdr_type)

    mfcc_array = np.expand_dims(mfcc_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, 13, 100, 1])
    network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(mfcc_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
Example #30
0
def get_nn_model(checkpoint_path='nn_motor_model', session=None):
    # Input is a single value (raw motor value)
    network = input_data(shape=[None, 1], name='input')

    # Hidden layer no.1,  
    network = fully_connected(network, 12, activation='linear')
    
    # Output layer
    network = fully_connected(network, 1, activation='tanh')

    # regression
    network = regression(network, loss='mean_square', metric='accuracy', name='target')

    # Verbosity yay nay
    model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path, session=session)
    return model
Example #31
0
img_pre = ImagePreprocessing()
img_pre.add_featurewise_zero_center(
)  # 零中心化,将每个样本的中心设为零,并指定平均值。如果未指定,则对所有样本求平均值。
img_pre.add_featurewise_stdnorm(
)  # STD标准化,按指定的标准偏差缩放每个样本。如果未指定std,则对所有样本数据进行std评估。

# 实时数据扩充
# Real-time data augmentation
img_aug = ImageAugmentation()  # 实时数据增强
img_aug.add_random_flip_leftright()  # 左右翻转
img_aug.add_random_rotation(max_angle=25.)  # 随机旋转

# 卷积网络的建立
# Convolutional network building
network = input_data(shape=[None, 32, 32, 3],
                     data_preprocessing=img_pre,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3,
                  activation='relu')  # 第一层卷积层。32个卷积核,尺寸3x3,激活函数ReLU
network = max_pool_2d(network, 2)  # 最大池化层。滑动窗口步幅为2
network = conv_2d(network, 64, 3, activation='relu')  #第二层卷积层,64个卷积核
network = conv_2d(network, 64, 3, activation='relu')  #第三层卷积层,64个卷积核
network = max_pool_2d(network, 2)  # 最大池化层
network = fully_connected(network, 512, activation='relu')  #全连接层,512个神经元
network = dropout(network, 0.5)  # 让50%的神经元工作
network = fully_connected(
    network, 10, activation='softmax')  # 全连接层,10个神经元和Softmax激活函数对cifar10分类
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)  # Adam优化器,分类交叉熵损失函数,学习率0.001
Example #32
0
X_test = pad_sequences(X_test, maxlen=model_size, value=0.)

n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)

# pickle.dump (X_train, open ("xtrain.p", b))
# pickle.dump (X_test, open ("xtest.p", b))

# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

# Building convolutional network
print('Build CNN')
network = input_data(shape=[None, model_size], name='input')
network = tflearn.embedding(network, input_dim=n_words, output_dim=cnn_size)
branch1 = conv_1d(network,
                  cnn_size,
                  3,
                  padding='valid',
                  activation=activation_function,
                  regularizer="L2")
branch2 = conv_1d(network,
                  cnn_size,
                  4,
                  padding='valid',
                  activation=activation_function,
                  regularizer="L2")
branch3 = conv_1d(network,
                  cnn_size,
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.estimator import regression


trainX, trainY, testX, testY = getGlassData()


# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=10, value=0.)
testX = pad_sequences(testX, maxlen=10, value=0.)
# # # Converting labels to binary vectors
trainY = to_categorical(trainY, 6)
testY = to_categorical(testY, 6)

# Network building
net = input_data(shape=[None, 10])
net = embedding(net, input_dim=1000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 6, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0.,tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=(testX, testY), 
	show_metric=True, batch_size=32, n_epoch=1000)
		  
model.save('./saved/tf/bidirectionRNN/model.tfl')
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import highway_conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression

datapath = '../../data/mnist'

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True, data_dir=datapath)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])

# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
#highway convolutions with pooling and dropout
for i in range(3):
    for j in [3, 2, 1]:
        network = highway_conv_2d(network, 16, j, activation='elu')
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)

network = fully_connected(network, 128, activation='elu')
network = fully_connected(network, 256, activation='elu')
network = fully_connected(network, 10, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='target')
Example #35
0
def dense_to_one_hot(labels_dense, num_classes):
    """Convert class labels from scalars to one-hot vectors."""
    # 从标量类标签转换为一个one-hot向量
    num_labels = labels_dense.shape[0]        #label的行数
    index_offset = np.arange(num_labels) * num_classes
    # print index_offset
    labels_one_hot = np.zeros((num_labels, num_classes))
    labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
    return labels_one_hot

# y_train=dense_to_one_hot(y_train,n_classes) # 转成one_hot
y_train=to_categorical(y_train,n_classes) # 转成one_hot


# # Building convolutional network
network = input_data(shape=[None, img_h, img_w, img_c], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)

network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)

network = fully_connected(network, 128, activation='tanh')
network = dropout(network, dropout_)

network = fully_connected(network, 256, activation='tanh')
network = dropout(network, dropout_)

network = fully_connected(network, n_classes, activation='softmax')
Example #36
0
class Thai_sentiment():

    file_path = './corpus/Combined_inhousedata_UTF8-4.csv'
    file_path3 = './trainingdataset/combined_inhousedata-UTF8-traindataset-1.csv'
    data, labels = load_csv(file_path,
                            target_column=0,
                            categorical_labels=True,
                            n_classes=2)
    testdata, testlabels = load_csv(file_path3,
                                    target_column=0,
                                    categorical_labels=True,
                                    n_classes=2)

    def preprocess_server(data):
        rlist = []
        preprocessdata = []
        for i in range(len(data)):
            x = requests.get('http://174.138.26.245:5000/preprocess/' +
                             data[i][0])
            resu = x.json()
            preprocessdata.append(resu['result'])
        for i in range(len(preprocessdata)):
            r = requests.get('http://174.138.26.245:5000/tokenize/' +
                             preprocessdata[i])
            result = r.json()
            rlist.append(result['result'])
        return rlist

    def get_uniquewords(listdata):
        f = open('./uniqueword/combined_inhousedata_UTF8-4_uniquewords.csv',
                 'w')

        uniquewords = []
        for line in range(len(listdata)):
            words = listdata[line]
            inner_data = []
            for word in words:
                if word not in uniquewords:
                    uniquewords.append(word)
                    f.write(word + '\n')
        f.close()
        return uniquewords

    def preprocess_vector(listdata, uniquewords):
        sentences = []
        vectors = []
        mainvector = []
        #f = open(file_path, 'r')
        for line in range(len(listdata)):
            words = listdata[line]
            inner_data = []
            for word in words:
                inner_data.append(word)
            sentences.append(inner_data)

        for sentence in sentences:
            inner_vector = []
            vectors = []
            for word in uniquewords:
                if word in sentence:
                    inner_vector.append(1)
                else:
                    inner_vector.append(0)
            vectors.append(inner_vector)
            mainvector.append(vectors)
        return np.array(mainvector, dtype=np.float32)

    pdata = preprocess_server(data)
    unique_words = get_uniquewords(pdata)
    data = preprocess_vector(pdata, unique_words)
    resultdata = preprocess_server(testdata)
    resultdata = preprocess_vector(resultdata, unique_words)

    neurons = len(unique_words)

    # shuffle the dataset
    data, labels = shuffle(data, labels)

    reset_default_graph()
    network = input_data(shape=[None, 1, neurons])
    network = conv_1d(network, 8, 3, activation='relu')
    network = max_pool_1d(network, 3)

    network = conv_1d(network, 16, 3, activation='relu')
    network = max_pool_1d(network, 3)

    network = fully_connected(network, 8, activation='relu')
    network = dropout(network, 0.5)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.01,
                         loss='categorical_crossentropy')

    model = tflearn.DNN(network)

    model.fit(data,
              labels,
              n_epoch=100,
              shuffle=True,
              validation_set=(resultdata, testlabels),
              show_metric=True,
              batch_size=None,
              snapshot_epoch=True,
              run_id='task-classifier')
    model.save(
        "./model/thaitext-classifier-combined_inhousedata-UTF8-4-100.tfl")
    print(
        "Network trained and saved as thaitext-classifier-combined_inhousedata-UTF8-4-100.tfl"
    )

    result = model.evaluate(resultdata, testlabels)
    print("Evaluation result: %s" % result)

    tp = 0
    fp = 0
    tn = 0
    fn = 0
    predict = model.predict(resultdata)
    for i in range(0, len(testlabels)):
        pred = predict[i]
        label = testlabels[i]
        if label[1] == 1:  # data is supposed be positive
            if pred[1] > 0.5:  # data is positive
                tp += 1
            else:  # data is negative
                fp += 1
        else:  # data is supposed to negative
            if pred[0] > 0.5:
                tn += 1  # data is negative
            else:
                fn += 1  # data is positive

    precision = float(tp / (tp + fp))
    recall = float(tp / (tp + fn))
    accuracy = float((tp + tn) / (tp + fp + tn + fn))
    f1 = float((2 * precision * recall) / (precision + recall))
    print("Precision: %s; Recall: %s" % (precision, recall))
    print("Acc: %s, F1: %s" % (accuracy, f1))
Example #37
0
    lab = [0] * npeaks
    lab[spectrums[i].npeaks - 1] = 1
    testy.append(lab)
    spectrums[i].savefig('figs/{0}_{1}.jpg'.format(i, spectrums[i].npeaks))

#----------------------------------------------------
trainx = np.array(trainx)
trainy = np.array(trainy)
testx = np.array(testx)
testy = np.array(testy)
trainx = trainx.reshape([-1, numx, 1])

testx = testx.reshape([-1, numx, 1])
print(testx.shape)
# Building convolutional network
network = input_data(shape=[None, numx, 1], name='input')
network = conv_1d(network, 16, 3, activation='relu')
network = max_pool_1d(network, 2)
network = batch_normalization(network)
network = conv_1d(network, 32, 3, activation='relu')
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 3, activation='relu')
network = max_pool_1d(network, 2)
network = batch_normalization(network)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
# network = fully_connected(network, 128, activation='relu')
# network = dropout(network, 0.5)
network = fully_connected(network, npeaks, activation='softmax')
network = regression(network,
                     optimizer='adam',
import itertools
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
import random
import math
from sklearn.metrics import confusion_matrix, accuracy_score
from scipy.misc import imresize

# Building 'AlexNet'
#Image Augmentation
img_aug=ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_blur(sigma_max=3.)
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_flip_updown()

network = input_data(shape=[None, 227, 227, 3], data_augmentation=img_aug)
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
Example #39
0
dropout_rate = 0.5

# Defines a name for the model.
model_name = 'DeepMusicGenreClassifier'

# Creates training and validation data.
train = create_labelled_data(training_data_dir, 'training.npy')
validation = create_labelled_data(validation_data_dir, 'validation.npy')

# Use below if data is already stored in the respective files:
# train = np.load('training.npy')
# validation = np.load('validation.npy')

# 1st Layer of CNN: Input Layer
# Input dimensions 'img_height' and 'img_width' resized as required
cnn = input_data(shape=[None, img_height, img_width, 1], name='input')

# 2nd Layer of CNN: Convolutional Layer + Max Pooling (64)
# A (3 x 3) kernel with default stride of 1 with a 'relu' activation function is applied
# A (3 x 3) pooling region is selected
cnn = conv_2d(cnn, 64, kernel, activation='relu')
cnn = max_pool_2d(cnn, pool)

# 3rd Layer of CNN: Convolutional Layer + Max Pooling (128)
# A (3 x 3) kernel with default stride of 1 with a 'relu' activation function is applied
# A (3 x 3) pooling region is selected
cnn = conv_2d(cnn, 128, kernel, activation='relu')
cnn = max_pool_2d(cnn, pool)

# 4th Layer of CNN: Convolutional Layer + Max Pooling (256)
# A (3 x 3) kernel with default stride of 1 with a 'relu' activation function is applied
Example #40
0
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers import regression
from tflearn.data_utils import to_categorical

import cv2
import numpy as np

BATCH_SIZE = 32
IMG_SIZE = 75
N_CLASSES = 9
LR = 0.001
N_EPOCHS = 50

tf.reset_default_graph()

network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1])  #1

network = conv_2d(network, 32, 3, activation='relu')  #2
network = max_pool_2d(network, 2)  #3

network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)

network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)

network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)

network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
Example #41
0
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import dataextract as dx
import cv2
import numpy as np

convnet = input_data(shape=[None, 200, 200, 1], name='input')

convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 128, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 256, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 128, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
Example #42
0
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import pickle

# Data loading and preprocessing
train_data = pickle.load(open('main_data.p','rb'))
test_data = pickle.load(open('test_data.p','rb'))

X, Y  = train_data[:,0:2500],train_data[:,2500:]
X_test, Y_test = test_data[:,0:2500],test_data[:,2500:]
X = X.reshape([-1,50,50,1])
X_test = X_test.reshape([-1,50,50,1])

cnn_network = input_data(shape=[None, 50, 50, 1])

cnn_network = conv_2d(cnn_network, 30, 3, activation='relu')
cnn_network = max_pool_2d(cnn_network, 2)

cnn_network = conv_2d(cnn_network, 30, 3, activation='relu')
cnn_network = max_pool_2d(cnn_network, 2)

cnn_network = conv_2d(cnn_network, 40, 3, activation='relu')
cnn_network = max_pool_2d(cnn_network, 2)

cnn_network = conv_2d(cnn_network, 40, 3, activation='relu')
cnn_network = max_pool_2d(cnn_network, 2)

cnn_network = conv_2d(cnn_network, 40, 3, activation='relu')
cnn_network = max_pool_2d(cnn_network, 2)
Example #43
0
def inceptionv3(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3], name='input')
    conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
    pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
    pool1_3_3 = local_response_normalization(pool1_3_3)
    conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
    conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
    conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
    inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3,  activation='relu', name = 'inception_3a_3_3')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
    inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)

    inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
    inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3,  activation='relu',name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5,  name = 'inception_3b_5_5')
    inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1,  name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')

    pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
    inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
    inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')

    inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')


    inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
    inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
    inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')

    inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
    inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')

    inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')


    inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
    inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
    inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
    inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
    inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')

    inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
    inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')

    inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')

    inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
    inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
    inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
    inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
    inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')

    inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')

    inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
    inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
    inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
    inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
    inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')


    inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat')

    pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')


    inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
    inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
    inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')

    inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat')


    inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
    inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
    inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384,  filter_size=3,activation='relu', name='inception_5b_3_3')
    inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
    inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5,  activation='relu', name='inception_5b_5_5' )
    inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
    inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
    inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat')

    pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)

    
    loss = fully_connected(pool5_7_7, output,activation='softmax')


    
    network = regression(loss, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')
    
    model = tflearn.DNN(network,
                        max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log')


    return model
Example #44
0
train_file = '../images/sampling/train.txt'
test_file = '../images/sampling/test.txt'

X, Y = image_preloader(train_file,
                       image_shape=(256, 256),
                       mode='file',
                       categorical_labels=True,
                       normalize=True)
testX, testY = image_preloader(test_file,
                               image_shape=(256, 256),
                               mode='file',
                               categorical_labels=True,
                               normalize=True)

network = input_data(shape=[None, 256, 256], name='input')
network = tflearn.layers.core.reshape(network, [-1, 256, 256, 1],
                                      name='Reshape')
network = conv_2d(network, 128, 1, activation='relu', regularizer="L2")
network = conv_2d(network, 128, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
# Build neural network and train
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='target')
model = tflearn.DNN(network, tensorboard_verbose=3)
Example #45
0
    np.save('test_data.npy', testing_data)
    return testing_data

train_data = create_train_data()
# If you have already created the dataset:
#train_data = np.load('train_data.npy')


import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.reset_default_graph()

convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')

convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)

convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)

convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)

convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)

convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
Example #46
0
def getReseau(index, nb_class):
    data = json.load(open("models/" + str(index) + "_" + str(nb_class) + "/result.json"))
    settings = data['settings']

    size = settings['size']
    nb_filter = settings['nb_filter']
    filter_size = settings['filter_size']

    # Make sure the data is normalized
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create extra synthetic training data by flipping, rotating and blurring the
    # images on our data set.
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    # Define our network architecture:

    # Input is a 32x32 image with 3 color channels (red, green and blue)
    network = input_data(shape=[None, size, size, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    reseau = index

    if reseau == 1:
        # Step 1: Convolution
        network = conv_2d(network, nb_filter, filter_size, activation='relu')

        # Step 2: Max pooling
        network = max_pool_2d(network, 2)

        # Step 3: Convolution again
        network = conv_2d(network, nb_filter * 4, filter_size, activation='relu')

        # Step 4: Convolution yet again
        network = conv_2d(network, nb_filter * 4, filter_size, activation='relu')

        # Step 5: Max pooling again
        network = max_pool_2d(network, 2)

        # Step 6: Fully-connected 512 node neural network
        network = fully_connected(network, nb_filter * 16, activation='relu')

        # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting
        network = dropout(network, 0.5)


    elif reseau == 2:
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 512, activation='relu')
        network = fully_connected(network, 512, activation='relu')

    elif reseau == 3:
        network = conv_2d(network, 32, 3, activation='relu')
        network = avg_pool_2d(network, 2)
        network = conv_2d(network, 32, 3, activation='relu')
        network = avg_pool_2d(network, 2)
        network = conv_2d(network, 32, 3, activation='relu')
        network = avg_pool_2d(network, 2)
        network = fully_connected(network, 512, activation='relu')
        network = fully_connected(network, 512, activation='relu')
        network = dropout(network, 0.5)

    elif reseau == 4:
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 5, padding='valid', activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 5, padding='valid', activation='relu')
        network = fully_connected(network, 512, activation='relu')
        network = dropout(network, 0.5)

    elif reseau == 5:
        network = conv_2d(network, 64, 3, activation='relu')
        network = conv_2d(network, 64, 3, activation='relu')
        network = avg_pool_2d(network, 2)
        network = conv_2d(network, 32, 3, activation='relu')
        network = conv_2d(network, 32, 3, activation='relu')
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 512, activation='relu')
        network = fully_connected(network, 512, activation='relu')

    # Step 8: Fully-connected neural network with three outputs (0=isn't a bird, 1=is a bird) to make the final prediction
    # network = fully_connected(network, ld.getLabelsNumber(), activation='softmax')
    network = fully_connected(network, nb_class, activation='softmax')
    # Tell tflearn how we want to train the network
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=settings['learning_rate'])

    # Wrap the network in a model object
    model = tflearn.DNN(network,
                        tensorboard_verbose=0)

    return model
def construct_inceptionv1onfire(x, y):

    # Build network as per architecture in [Dunnings/Breckon, 2018]

    network = input_data(shape=[None, y, x, 3])

    conv1_7_7 = conv_2d(network,
                        64,
                        5,
                        strides=2,
                        activation='relu',
                        name='conv1_7_7_s2')

    pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
    pool1_3_3 = local_response_normalization(pool1_3_3)

    conv2_3_3_reduce = conv_2d(pool1_3_3,
                               64,
                               1,
                               activation='relu',
                               name='conv2_3_3_reduce')
    conv2_3_3 = conv_2d(conv2_3_3_reduce,
                        128,
                        3,
                        activation='relu',
                        name='conv2_3_3')

    conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3,
                            kernel_size=3,
                            strides=2,
                            name='pool2_3_3_s2')

    inception_3a_1_1 = conv_2d(pool2_3_3,
                               64,
                               1,
                               activation='relu',
                               name='inception_3a_1_1')

    inception_3a_3_3_reduce = conv_2d(pool2_3_3,
                                      96,
                                      1,
                                      activation='relu',
                                      name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce,
                               128,
                               filter_size=3,
                               activation='relu',
                               name='inception_3a_3_3')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3a_5_5_reduce')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce,
                               32,
                               filter_size=5,
                               activation='relu',
                               name='inception_3a_5_5')
    inception_3a_pool = max_pool_2d(
        pool2_3_3,
        kernel_size=3,
        strides=1,
    )
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool,
                                    32,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([
        inception_3a_1_1, inception_3a_3_3, inception_3a_5_5,
        inception_3a_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    inception_3b_1_1 = conv_2d(inception_3a_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               name='inception_3b_1_1')
    inception_3b_3_3_reduce = conv_2d(inception_3a_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce,
                               192,
                               filter_size=3,
                               activation='relu',
                               name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce,
                               96,
                               filter_size=5,
                               name='inception_3b_5_5')
    inception_3b_pool = max_pool_2d(inception_3a_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([
        inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,
        inception_3b_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_3b_output')

    pool3_3_3 = max_pool_2d(inception_3b_output,
                            kernel_size=3,
                            strides=2,
                            name='pool3_3_3')
    inception_4a_1_1 = conv_2d(pool3_3_3,
                               192,
                               filter_size=1,
                               activation='relu',
                               name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3,
                                      96,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce,
                               208,
                               filter_size=3,
                               activation='relu',
                               name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce,
                               48,
                               filter_size=5,
                               activation='relu',
                               name='inception_4a_5_5')
    inception_4a_pool = max_pool_2d(pool3_3_3,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4a_pool_1_1')

    inception_4a_output = merge([
        inception_4a_1_1, inception_4a_3_3, inception_4a_5_5,
        inception_4a_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4a_output')

    pool5_7_7 = avg_pool_2d(inception_4a_output, kernel_size=5, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)
    loss = fully_connected(pool5_7_7, 2, activation='softmax')
    network = regression(loss,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    model = tflearn.DNN(network,
                        checkpoint_path='sp-inceptiononv1onfire',
                        max_checkpoints=1,
                        tensorboard_verbose=2)

    return model
Example #48
0
X_val = np.array([i[0]
                  for i in validate_data]).reshape(-1, int(IMAGE_HEIGHT),
                                                   int(image_width), 1)
y_val = [i[1] for i in validate_data]

X_test = np.array([i[0] for i in test_data]).reshape(-1, int(IMAGE_HEIGHT),
                                                     int(image_width), 1)
y_test = [i[1] for i in test_data]
filename_test = [i[2] for i in test_data]
labels = ['empty', 'low', 'medium', 'high']

# Convolutional Neural Network
tf.reset_default_graph()

convnet = input_data(shape=[None, int(IMAGE_HEIGHT),
                            int(image_width), 1],
                     name='input')
convnet = max_pool_2d(
    convnet,
    2)  # Makes the network run faster. Get most interesting parts first?
convnet = conv_2d(convnet, 16, 7, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 7, activation='relu')
convnet = conv_2d(convnet, 16, 5, activation='relu')
convnet = fully_connected(convnet, 64, activation='relu')
convnet = dropout(convnet, 0.5)
convnet = fully_connected(convnet, 4,
                          activation='softmax')  # Because 4 categories
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LR,
Example #49
0
# ağımızı eğitirken 500 adet resmi eğitimi test etmek için kullanacağız.
egitim = egitim_verisi[:-500]
test = egitim_verisi[-500:]

X_egitim = np.array([i[0] for i in egitim]).reshape(-1, RESIM_BOYUTU, RESIM_BOYUTU, 1)
y_egitim = [i[1] for i in egitim]
X_test = np.array([i[0] for i in test]).reshape(-1, RESIM_BOYUTU, RESIM_BOYUTU, 1)
y_test = [i[1] for i in test]

### MİMARİNİN OLUŞTURULMASI ###

tf.reset_default_graph()

# ağımızın girişinin boyutlarının ne olacağını tanımlayalım
convnet = input_data(shape=[None, RESIM_BOYUTU, RESIM_BOYUTU, 1], name='input')

# 32 adet 5x5 boyutunda filtrelerden oluşan ve relu aktivasyonlu konvolüsyon katmanı
convnet = conv_2d(convnet, 32, 5, activation='relu')

# 5x5 boyutunda filtelerden oluşan max_pool katmanı
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
Example #50
0
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb

train, test, _ = imdb.load_data(path='imdb.pkl',
                                n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test
print(len(trainX[0]))
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
print(len(trainX[0]))
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

network = input_data(shape=[None, 100], name='input')
network = tflearn.embedding(network, input_dim=10000, output_dim=128)

branch1 = conv_1d(network,
                  128,
                  3,
                  padding='valid',
                  activation='relu',
                  regularizer='L2')
branch2 = conv_1d(network,
                  128,
                  4,
                  padding='valid',
                  activation='relu',
                  regularizer='L2')
branch3 = conv_1d(network,
Example #51
0
test_features, test_label = feat_ext('test_list.txt')
test_features = pad(test_features)
test_label, test_dic = label_onehot(test_label)
test_X = test_features.reshape([-1, 49, 24, 1])
print(test_X.shape)
test_Y = test_label
print(test_Y.shape)

# # The Convolutional Neural Network Structure
# #### A two convolutional layer network
# #### Each layer is followed by a max pooling layer

# In[15]:

convnet = input_data(shape=[None, 49, 24, 1], name='input')
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 12, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='targets')
model = tflearn.DNN(convnet)
model.fit({'input': X}, {'targets': Y},
Example #52
0
        img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE),
                         (IMG_SIZE, IMG_SIZE))
        testing_data.append([np.array(img), img_num])

    np.save('test_data.npy', testing_data)
    return testing_data


train_data = create_train_data()

import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

CONVNET = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')

CONVNET = conv_2d(convnet, 32, 2, activation='relu')
CONVNET = max_pool_2d(convnet, 2)

CONVNET = conv_2d(convnet, 64, 2, activation='relu')
CONVNET = max_pool_2d(convnet, 2)

CONVNET = conv_2d(convnet, 32, 2, activation='relu')
CONVNET = max_pool_2d(convnet, 2)

CONVNET = conv_2d(convnet, 64, 2, activation='relu')
CONVNET = max_pool_2d(convnet, 2)

CONVNET = conv_2d(convnet, 32, 2, activation='relu')
CONVNET = max_pool_2d(convnet, 2)
    def test_tflearn_convnet(self):

        '''
        This is a test of a non-custom convnet implementation using TFLearn wrapper library for Tensorflow,
        to test what accuracy can be achieved on my current images dataset.

        Currently achieves about 90% accuracy, which is acceptable,
        but for this particular classification problem it should be possible to get much higher than that.
        I would guess it can reach at least 95%, because Xenopus tadpoles have a very distinct pattern and shape.
        '''

        N_EPOCHS = 100
        # My training dataset is currently too small to run more epochs,
        # the convnet is probably overfitting already

        self.load_data('train_data.npy', 'test_data.npy', "", save_type="np")

        train = self.train_data[:-(self.VAL_SPLIT*len(self.train_data[0]))]
        val = self.train_data[-(self.VAL_SPLIT*len(self.train_data[0])):]

        X_train = np.array([i[0] for i in train]).reshape(-1, self.IMG_SIZE, self.IMG_SIZE, 1)
        y_train = [i[1] for i in train]
        X_test = np.array([i[0] for i in val]).reshape(-1, self.IMG_SIZE, self.IMG_SIZE, 1)
        y_test = [i[1] for i in val]

        tf.reset_default_graph()
        convnet = input_data(shape=[-1, self.IMG_SIZE, self.IMG_SIZE, 1], name='input')
        convnet = conv_2d(convnet, 32, 5, activation='relu')
        convnet = max_pool_2d(convnet, 5)
        convnet = conv_2d(convnet, 64, 5, activation='relu')
        convnet = max_pool_2d(convnet, 5)
        convnet = conv_2d(convnet, 128, 5, activation='relu')
        convnet = max_pool_2d(convnet, 5)
        convnet = conv_2d(convnet, 64, 5, activation='relu')
        convnet = max_pool_2d(convnet, 5)
        convnet = conv_2d(convnet, 32, 5, activation='relu')
        convnet = max_pool_2d(convnet, 5)
        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.8)
        convnet = fully_connected(convnet, 2, activation='softmax')
        convnet = regression(convnet, optimizer='adam', learning_rate=self.LR, loss='categorical_crossentropy', name='targets')

        model = tflearn.DNN(convnet, tensorboard_dir='log', tensorboard_verbose=0)

        model.fit({'input': X_train}, {'targets': y_train}, n_epoch=N_EPOCHS,
                  validation_set=({'input': X_test}, {'targets': y_test}),
                  snapshot_step=TadpoleConvNet.VAL_SPLIT, show_metric=True, run_id=self.MODEL_NAME)

        model.save("tadpole_model")

        # Test model on testing dataset

        fig = plt.figure()

        for num, data in enumerate(self.test_data[:30]):  # visualize classifications for the first 30 test images

            img_num = data[1]
            img_data = data[0]

            y = fig.add_subplot(5, 6, num + 1)  # nrows, ncols --> dependent on how many images you're showing in the frame
            orig = img_data
            data = img_data.reshape(self.IMG_SIZE, self.IMG_SIZE, 1)
            model_out = model.predict([data])[0]

            if np.argmax(model_out) == 1:
                str_label = 'not tadpole'      # detecting tadpoles is the only classification of interest
            else:
                str_label = 'tadpole'

            y.imshow(orig, cmap='gray')
            plt.title(str_label)
            y.axes.get_xaxis().set_visible(False)
            y.axes.get_yaxis().set_visible(False)

        plt.show()

        numb_right = 0
        numb_wrong = 0
        total_numb = 0

        # Run predictions on entire testing data set and calculate accuracy
        for data in self.test_data:

            clss = data[1][1]
            img_data = data[0]
            img_data = img_data.reshape(self.IMG_SIZE, self.IMG_SIZE, 1)
            predict = model.predict([img_data])[0]

            if np.argmax(predict) == clss:
                numb_right += 1
            else:
                numb_wrong += 1
            total_numb += 1

        print("\nClassified {} correcly out of {} --> accuracy {:.3f}".format(numb_right, total_numb, float(numb_right)/total_numb))
Example #54
0
        im.close()
labellist = np.array(labellist)
labellist = labellist.reshape(-1, 1)
enc = sklearn.preprocessing.OneHotEncoder(sparse=False)
enc.fit(labellist)
labellist = enc.transform(labellist)

#789 are yes

###END DATA PROCESSING###
#Split train test validate
xtrain, x2, ytrain, y2 = train_test_split(image_list, labellist, test_size=0.2)
xvalid, xtest, yvalid, ytest = train_test_split(x2, y2, test_size=0.5)

#start of building net
network = input_data(shape=[None, 436, 640, 3])
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = batch_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           64,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce,
                    192,
Example #55
0
def get_network():
    biases = zeros(shape=[9, 19, 1, 192])
    biases2 = zeros(shape=[19, 19, 1])
    network = input_data(shape=[None, 19, 19, 24], name='input')
    network = conv_2d(network,
                      192,
                      5,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev5),
                      bias=False) + biases[0]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[1]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[2]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[3]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[4]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[5]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[6]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[7]
    network = conv_2d(network,
                      192,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases[8]
    network = conv_2d(network,
                      1,
                      3,
                      activation='elu',
                      weights_init=truncated_normal(stddev=stddev3),
                      bias=False) + biases2
    network = fully_connected(network, 19 * 19, activation='softmax')
    momentum = Momentum(learning_rate=0.002)
    network = regression(network,
                         optimizer=momentum,
                         loss='categorical_crossentropy',
                         name='target')
    return network
Example #56
0
    - [Flower Dataset (17)](http://www.robots.ox.ac.uk/~vgg/data/flowers/17/)
"""

from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression

import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
Example #57
0
import numpy as np
from PIL import Image
from tflearn.data_utils import resize_image

tf.reset_default_graph()

#Load Datasets
X, Y = image_preloader('C:\\Users\\lukas\\Documents\\Uni\\KI\\kat\\ROOT',
                       image_shape=(160, 180),
                       mode='folder',
                       categorical_labels=True,
                       normalize=True)

#Convolutional Neural Network
network = input_data(shape=[None, 180, 160, 4])
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

#training
model = tflearn.DNN(network, tensorboard_verbose=3)
model.fit(X,
Example #58
0
fashion_model.add(Conv2D(32, kernel_size=(12, 3),activation='relu',input_shape=(28,28,1),padding='same'))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(AvePooling2D((2, 2),padding='same'))
fashion_model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(AvePooling2D(pool_size=(2, 2),padding='same'))
fashion_model.add(Conv2D(128, (3, 3), activation='relu',padding='same'))
fashion_model.add(LeakyReLU(alpha=0.1))                  
fashion_model.add(AvePooling2D(pool_size=(2, 2),padding='same'))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation='relu'))
fashion_model.add(LeakyReLU(alpha=0.1))                  
fashion_model.add(Dense(num_classes, activation='softmax'))

tf.reset_default_graph() 
convnet = input_data(shape =[None, 32440,1025, 1], name ='train_data') 
  
convnet = conv_2d(convnet, 12, 3, activation ='relu') 
convnet = Ave_pool_2d(1, 2) 
...  
convnet = conv_2d(convnet, 24, 3, activation ='relu') 
convnet = Ave_pool_2d(1, 2) 
...
convnet = conv_2d(convnet, 32, 3, activation ='relu') 
convnet = Ave_pool_2d(1, 2) 
...   
model = tflearn.DNN(convnet, tensorboard_dir ='log') 



train = train_data[:-500] 
# Make sure the data is normalized
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Create extra synthetic training data by flipping, rotating and blurring the
# images on our data set.
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur(sigma_max=3.)

# Define our network architecture:

network = input_data(shape=[None, 128, 128, 1],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)

#Step 1: Convolution
network = conv_2d(network, 128, 1, activation='relu')

# Step 2: Max pooling
network = max_pool_2d(network, 2)

# Step 3: Convolution again
network = conv_2d(network, 256, 1, activation='relu')

# Step 4: Convolution yet again
network = conv_2d(network, 256, 1, activation='relu')

# Step 5: Max pooling again
Example #60
0
train_IC50 = read_labels("./data/train_ic50")

test_protein = prepare_data(data_dir, "./data/test_sps", vocab_size_protein,
                            vocab_protein, protein_MAX_size, 1)
test_compound = prepare_data(data_dir, "./data/test_smile",
                             vocab_size_compound, vocab_compound,
                             comp_MAX_size, 0)
test_IC50 = read_labels("./data/test_ic50")

## separating train,dev, test data
compound_train, compound_dev, IC50_train, IC50_dev, protein_train, protein_dev = train_dev_split(
    train_protein, train_compound, train_IC50, dev_perc, comp_MAX_size,
    protein_MAX_size, batch_size)

## RNN for protein
prot_data = input_data(shape=[None, protein_MAX_size])
prot_embd = tflearn.embedding(prot_data,
                              input_dim=vocab_size_protein,
                              output_dim=GRU_size_prot)
prot_gru_1 = tflearn.gru(prot_embd,
                         GRU_size_prot,
                         initial_state=prot_init_state_1,
                         trainable=True,
                         return_seq=True,
                         restore=False)
prot_gru_1 = tf.stack(prot_gru_1, axis=1)
prot_gru_2 = tflearn.gru(prot_gru_1,
                         GRU_size_prot,
                         initial_state=prot_init_state_2,
                         trainable=True,
                         return_seq=True,