def build_network(self): # Building 'AlexNet' # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py # https://github.com/DT42/squeezenet_demo # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py print('[+] Building CNN') self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1]) self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = conv_2d(self.network, 256, 5, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = conv_2d(self.network, 256, 3, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = fully_connected(self.network, 1024, activation = 'tanh') self.network = dropout(self.network, 0.5) self.network = fully_connected(self.network, 1024, activation = 'tanh') self.network = dropout(self.network, 0.5) self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax') self.network = regression(self.network, optimizer = 'momentum', loss = 'categorical_crossentropy') self.model = tflearn.DNN( self.network, checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition', max_checkpoints = 1, tensorboard_verbose = 2 ) self.load_model()
def createModel(nbClasses,imageSize): print("[+] Creating model...") convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input') convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = fully_connected(convnet, 1024, activation='elu') convnet = dropout(convnet, 0.5) convnet = fully_connected(convnet, nbClasses, activation='softmax') convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy') model = tflearn.DNN(convnet) print(" Model created! ✅") return model
def cnn(): X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit({'input': X}, {'target': Y}, n_epoch=20, validation_set=({'input': testX}, {'target': testY}), snapshot_step=100, show_metric=True, run_id='cnn_demo')
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take): """ :param mfcc_array: :param sdr_array: :param n_epochs: :param take: :return: """ with tf.Graph().as_default(): network = input_data(shape=[None, 13, 100, 1]) network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation="relu") network = dropout(network, 0.8) network = fully_connected(network, 256, activation="relu") network = dropout(network, 0.8) network = fully_connected(network, 1, activation="linear") regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001) # Training model = tflearn.DNN(regress) # , session=sess) model.fit( mfcc_array, sdr_array, n_epoch=n_epochs, snapshot_step=1000, show_metric=True, run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take), ) return model
def alexnet(): X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227)) # Building 'AlexNet' network = input_data(shape=[None, 227, 227, 3]) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 17, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) # Training model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=2) model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True, show_metric=True, batch_size=64, snapshot_step=200, snapshot_epoch=False, run_id='alexnet')
def _model1(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], name='input', data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, strides = 4, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, strides = 2, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, len(Y[0]), activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=3) model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest), snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification') if modelStore: model.save(_id + '-model.tflearn')
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY): print "CNN and doc2vec 2d" trainX = trainX.reshape([-1, max_features, max_document_length, 1]) testX = testX.reshape([-1, max_features, max_document_length, 1]) # Building convolutional network network = input_data(shape=[None, max_features, max_document_length, 1], name='input') network = conv_2d(network, 16, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit({'input': trainX}, {'target': trainY}, n_epoch=20, validation_set=({'input': testX}, {'target': testY}), snapshot_step=100, show_metric=True, run_id='review')
def model_for_type(neural_net_type, tile_size, on_band_count): """The neural_net_type can be: one_layer_relu, one_layer_relu_conv, two_layer_relu_conv.""" network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count]) # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf if neural_net_type == "one_layer_relu": network = tflearn.fully_connected(network, 64, activation="relu") elif neural_net_type == "one_layer_relu_conv": network = conv_2d(network, 64, 12, strides=4, activation="relu") network = max_pool_2d(network, 3) elif neural_net_type == "two_layer_relu_conv": network = conv_2d(network, 64, 12, strides=4, activation="relu") network = max_pool_2d(network, 3) network = conv_2d(network, 128, 4, activation="relu") else: print("ERROR: exiting, unknown layer type for neural net") # classify as road or not road softmax = tflearn.fully_connected(network, 2, activation="softmax") # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum") net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy") return tflearn.DNN(net, tensorboard_verbose=0)
def main(): pickle_folder = '../pickles_rolloff' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] pickle_folders_to_load = sorted(pickle_folders_to_load) # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'sim_mat' beat_spec_len = 432 # training params n_classes = 16 training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 # set up training, testing, & validation partitions print('Loading sim_mat and sdrs') sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type) print('sim_mat and sdrs loaded') print('splitting and grooming data') train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent, testing_percent, validation_percent) trainX = np.expand_dims([sim_mat_array[i] for i in train], -1) trainY = np.expand_dims([sdr_array[i] for i in train], -1) testX = np.expand_dims([sim_mat_array[i] for i in test], -1) testY = np.array([sdr_array[i] for i in test]) print('setting up CNN') # Building convolutional network network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1]) network = conv_2d(network, 32, 10, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, 20, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01) print('running CNN') # Training model = tflearn.DNN(regress, tensorboard_verbose=1) model.fit(trainX, trainY, n_epoch=10, snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1)) predicted = np.array(model.predict(testX))[:,0] print('plotting') plot(testY, predicted)
def make_core_network(network): network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape") network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') return network
def generate_network(self): """ Return tflearn cnn network. """ print(self.image_size, self.n_epoch, self.batch_size, self.person_ids) print(type(self.image_size), type(self.n_epoch), type(self.batch_size), type(self.person_ids)) if not isinstance(self.image_size, list) \ or not isinstance(self.n_epoch, int) \ or not isinstance(self.batch_size, int) \ or not isinstance(self.person_ids, list): # if self.image_size is None or self.n_epoch is None or \ # self.batch_size is None or self.person_ids is None: raise ValueError("Insufficient values to generate network.\n" "Need (n_epoch, int), (batch_size, int)," "(image_size, list), (person_ids, list).") # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_flip_leftright() # Convolutional network building network = input_data( shape=[None, self.image_size[0], self.image_size[1], 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, self.image_size[0] * 2**4, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, self.person_num, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def _model3(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, len(yTest[0]), activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) print('Model has been made!!!?') # Training model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10', max_checkpoints=10, tensorboard_verbose=0, clip_gradients=0.) model.load(_path) pred = model.predict(xTest) df = pd.DataFrame(pred) df.to_csv(_path + ".csv") newList = pred.copy() newList = convert2(newList) if _CSV: makeCSV(newList) pred = convert2(pred) pred = convert3(pred) yTest = convert3(yTest) print(metrics.confusion_matrix(yTest, pred)) print(metrics.classification_report(yTest, pred)) print('Accuracy', accuracy_score(yTest, pred)) print() if _wrFile: writeTest(pred)
def build_network(image_size, batch_size=None, n_channels=3): network = input_data(shape=[batch_size, image_size[0], image_size[1], n_channels], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 16, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, num_classes, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0001) return network
def setup_model(checkpoint_path=None): """Sets up a deep belief network for image classification based on the set up described in :param checkpoint_path: string path describing prefix for model checkpoints :returns: Deep Neural Network :rtype: tflearn.DNN References: - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks Links: - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721 """ # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) if checkpoint_path: model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path) else: model = tflearn.DNN(network, tensorboard_verbose=3) return model
def main(): """ :return: """ pickle_folder = '../NMF/mfcc_pickles' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] fg_or_bg = 'background' sdr_type = 'sdr' feature = 'mfcc_clusters' beat_spec_len = 432 n_epochs = 200 take = 1 # set up training, testing, & validation partitions mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder, feature, fg_or_bg, sdr_type) mfcc_array = np.expand_dims(mfcc_array, -1) sdr_array = np.expand_dims(sdr_array, -1) # Building convolutional network network = input_data(shape=[None, 13, 100, 1]) network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001) start = time.time() # Training model = tflearn.DNN(regress) # , session=sess) model.fit(mfcc_array, sdr_array, n_epoch=n_epochs, snapshot_step=1000, show_metric=True, run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take)) elapsed = (time.time() - start) print('Finished training after ' + elapsed + 'seconds. Saving...') model_output_folder = 'network_outputs/' model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take)) model.save(model_output_file)
def _model2(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() net = input_data(shape=[None, inputSize, inputSize, dim], name='input', data_preprocessing=img_prep, data_augmentation=img_aug) n = 2 j = 64 ''' net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.residual_block(net, n, j) net = tflearn.residual_block(net, 1, j*2, downsample=True) net = tflearn.residual_block(net, n-1, j*2) net = tflearn.residual_block(net, 1, j*4, downsample=True) net = tflearn.residual_block(net, n-1, j*4) net = tflearn.residual_block(net, 1, j*8, downsample=True) net = tflearn.residual_block(net, n-1, j*8) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) ''' net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001) net = max_pool_2d(net, 2, strides=2) net = tflearn.residual_block(net, n, j) net = tflearn.residual_block(net, 1, j*2, downsample=True) net = tflearn.residual_block(net, n-1, j*2) net = tflearn.residual_block(net, 1, j*4, downsample=True) net = tflearn.residual_block(net, n-1, j*4) net = tflearn.residual_block(net, 1, j*8, downsample=True) net = tflearn.residual_block(net, n-1, j*8) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax') mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy') model = tflearn.DNN(net, checkpoint_path='model2_resnet', max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.) model.load(_path) pred = model.predict(xTest) df = pd.DataFrame(pred) df.to_csv(_path + ".csv") newList = pred.copy() newList = convert2(newList) if _CSV: makeCSV(newList) pred = convert2(pred) pred = convert3(pred) yTest = convert3(yTest) print(metrics.confusion_matrix(yTest, pred)) print(metrics.classification_report(yTest, pred)) print('Accuracy', accuracy_score(yTest, pred)) print() if _wrFile: writeTest(pred)
def convolutional_neural_network(width=5, height=6): """Create the neural network model. Args: width: Width of the pseudo image height: Height of the pseudo image Returns: convnet: Output """ # Initialize key variables conv1_filter_count = 32 conv2_filter_count = 64 fc_units = 1024 image_height = height image_width = width filter_size = 2 pooling_kernel_size = 2 keep_probability = 0.6 fully_connected_units = 10 # Create the convolutional network stuff convnet = input_data( shape=[None, image_width, image_height, 1], name='input') convnet = conv_2d( convnet, conv1_filter_count, filter_size, activation='relu') convnet = max_pool_2d(convnet, pooling_kernel_size) convnet = conv_2d( convnet, conv2_filter_count, filter_size, activation='relu') convnet = max_pool_2d(convnet, pooling_kernel_size) convnet = fully_connected(convnet, fc_units, activation='relu') convnet = dropout(convnet, keep_probability) convnet = fully_connected( convnet, fully_connected_units, activation='softmax') convnet = regression( convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') return convnet
def do_cnn_word2vec_2d(trainX, testX, trainY, testY): global max_features global max_document_length print "CNN and word2vec2d" y_test = testY #trainX = pad_sequences(trainX, maxlen=max_features, value=0.) #testX = pad_sequences(testX, maxlen=max_features, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Building convolutional network network = input_data(shape=[None,max_document_length,max_features,1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(trainX, trainY, n_epoch=5, shuffle=True, validation_set=(testX, testY), show_metric=True,run_id="sms") y_predict_list = model.predict(testX) print y_predict_list y_predict = [] for i in y_predict_list: print i[0] if i[0] > 0.5: y_predict.append(0) else: y_predict.append(1) print(classification_report(y_test, y_predict)) print metrics.confusion_matrix(y_test, y_predict)
def vggnet(): X, Y = oxflower17.load_data(one_hot=True,resize_pics=(227, 227)) # Building 'VGG Network' network = input_data(shape=[None, 227, 227, 3]) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 128, 3, activation='relu') network = conv_2d(network, 128, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 17, activation='softmax') network = regression(network, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=0.0001) # Training model = tflearn.DNN(network, checkpoint_path='model_vgg', max_checkpoints=1, tensorboard_verbose=0) model.fit(X, Y, n_epoch=500, shuffle=True, show_metric=True, batch_size=32, snapshot_step=500, snapshot_epoch=False, run_id='vgg')
def build_model_2_conv(learning_rate, input_shape, nb_classes, base_path, drop): network = input_data(shape=input_shape, name='input') network = conv_2d(network, 64, [4, 16], activation='relu') network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, [4, 16], activation='relu') network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='relu') network = dropout(network, drop) network = fully_connected(network, 64, activation='relu') network = dropout(network, drop) network = fully_connected(network, nb_classes, activation='softmax') network = regression(network, optimizer='sgd', learning_rate=learning_rate, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/", checkpoint_path=base_path + "/checkpoints/step") return model
def create_model(learning_rate, input_shape, nb_classes, base_path, drop=1): network = input_data(shape=input_shape, name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, drop) network = fully_connected(network, 256, activation='tanh') network = dropout(network, drop) network = fully_connected(network, nb_classes, activation='softmax') network = regression(network, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=base_path + "/checkpoints/step") return model
def main(): """ Trains a CNN architecture and plots the results over a validation set. Returns: """ # Load the SDR and hist data data = load_data('reverb_pan_full_sdr.txt', 'pickle/') # split data into train and test sets test_percent = 0.15 train, test, validate = split_into_sets(len(data['sdr']), 1-test_percent, test_percent, 0) x_train = np.expand_dims([data['input'][i] for i in train], -1) y_train = np.expand_dims([data['sdr'][i] for i in train], -1) x_test = np.expand_dims([data['input'][i] for i in test], -1) y_test = np.expand_dims([data['sdr'][i] for i in test], -1) # construct the CNN. inp = input_data(shape=[None, 50, 50, 1], name='input') # two convolutional layers with max pooling conv1 = conv_2d(inp, 32, [5, 5], activation='relu', regularizer="L2") max_pool = max_pool_2d(conv1, 2) conv2 = conv_2d(max_pool, 64, [5, 5], activation='relu', regularizer="L2") max_pool2 = max_pool_2d(conv2, 2) # two fully connected layers full = fully_connected(max_pool2, 128, activation='tanh') full = dropout(full, 0.8) full2 = fully_connected(full, 256, activation='tanh') full2 = dropout(full2, 0.8) # output regression node out = fully_connected(full2, 1, activation='linear') network = regression(out, optimizer='sgd', learning_rate=0.01, name='target', loss='mean_square') model = tflearn.DNN(network, tensorboard_verbose=1, checkpoint_path='checkpoint.p', tensorboard_dir='tmp/tflearn_logs/') model.fit({'input': x_train}, {'target': y_train}, n_epoch=1000, validation_set=(x_test, y_test), snapshot_step=10000, run_id='convnet_duet_3x3') predicted = np.array(model.predict(x_test))[:,0] plot(y_test, predicted)
def train_neural_net(convolution_patch_size, bands_to_use, image_size, train_images, train_labels, test_images, test_labels, number_of_batches, batch_size): on_band_count = 0 for b in bands_to_use: if b == 1: on_band_count += 1 train_images = train_images.astype(numpy.float32) train_images = (train_images - 127.5) / 127.5 test_images = test_images.astype(numpy.float32) test_images = (test_images - 127.5) / 127.5 # Convolutional network building network = input_data(shape=[None, image_size, image_size, on_band_count]) network = conv_2d(network, 32, convolution_patch_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, convolution_patch_size, activation='relu') network = conv_2d(network, 64, convolution_patch_size, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # batch_size was originally 96 # n_epoch was originally 50 # each epoch is 170 steps I think # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(train_images, train_labels, n_epoch=int(number_of_batches/100), shuffle=False, validation_set=(test_images, test_labels), show_metric=True, batch_size=batch_size, run_id='cifar10_cnn') return model.predict(test_images)
def cnn(): network = input_data(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input') network = conv_2d(network, 8, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = batch_normalization(network) network = conv_2d(network, 16, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = batch_normalization(network) network = conv_2d(network, 16, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = batch_normalization(network) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, CODE_LEN * MAX_CHAR, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') return network
def _model1(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], name='input', data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, strides = 4, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, strides = 2, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, len(yTest[0]), activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=3) model.load(_path) pred = model.predict(xTest) df = pd.DataFrame(pred) df.to_csv(_path + ".csv") newList = pred.copy() newList = convert2(newList) if _CSV: makeCSV(newList) pred = convert2(pred) pred = convert3(pred) yTest = convert3(yTest) print(metrics.confusion_matrix(yTest, pred)) print(metrics.classification_report(yTest, pred)) print('Accuracy', accuracy_score(yTest, pred)) print() if _wrFile: writeTest(pred)
def create_cnn_layers(): shape = [None, IMAGE_STD_HEIGHT, IMAGE_STD_WIDTH, RGB_COLOR_COUNT] # input_layer = Input(name='input', shape=shape) input_layer = input_data(name='input', shape=shape) # h = Convolution2D(22, 5, 5, activation='relu', dim_ordering=dim_ordering)(input_layer) h = conv_2d_specialized(input_layer, 22, [5, 5]) POOL_SIZE = [2, 2] # h = MaxPooling2D(pool_size=POOL_SIZE)(h) h = max_pool_2d(h, POOL_SIZE, padding='valid') h = local_response_normalization(h) # h = Convolution2D(44, 3, 3, activation='relu', dim_ordering=dim_ordering)(h) h = conv_2d_specialized(h, 44, [3, 3]) # h = MaxPooling2D(pool_size=POOL_SIZE)(h) h = max_pool_2d(h, POOL_SIZE, padding='valid') h = local_response_normalization(h) # h = Dropout(0.25)(h) h = dropout(h, 1-0.25) # last_cnn_layer = Flatten()(h) last_cnn_layer = flatten(h) return input_layer, last_cnn_layer
def stop_dnn(): img_pre_processing = ImagePreprocessing() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=10.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_pre_processing, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def create_alexnet(num_classes, restore=False): # Building 'AlexNet' network = input_data(shape=[None, 224, 224, 3]) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) return network
def _model3(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, len(Y[0]), activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) print('Model has been made!!!?') # Training model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10', max_checkpoints=10, tensorboard_verbose=0, clip_gradients=0.) model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest), snapshot_epoch=False, snapshot_step=200, show_metric=True, batch_size=batchNum, shuffle=True, run_id='resnext_cifar10') if modelStore: model.save(_id + '-model.tflearn')
h5f = h5py.File('dataset.h5', 'r') X = h5f['X'] Y = h5f['Y'] #img_aug = tflearn.ImageAugmentation() #img_aug.add_random_flip_leftright() #img_aug.add_random_90degrees_rotation (rotations=[0, 2]) network = input_data(shape=[None, 300, 300, 3]) conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2,
from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import tflearn.datasets.mnist as mnist if __name__ == '__main__': x, y, test_x, test_y = mnist.load_data(one_hot=True) x = x.reshape([-1, 28, 28, 1]) test_x = test_x.reshape([-1, 28, 28, 1]) input_data = input_data(shape=[None, 28, 28, 1], name='input') # nb_filter表示输出通道数,filter_size表示卷积核大小5x5 conv1 = conv_2d(input_data, nb_filter=32, filter_size=5, activation='relu') # kernel_size表示池化窗口大小2x2 pool1 = max_pool_2d(conv1, kernel_size=2) conv2 = conv_2d(pool1, nb_filter=64, filter_size=5, activation='relu') pool2 = max_pool_2d(conv2, kernel_size=2) fc = fully_connected(pool2, n_units=1024, activation='relu') fc = dropout(fc, 0.8) output = fully_connected(fc, n_units=10, activation='softmax') network = regression(output, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets')
X_train, y_train, X_test, y_test = mnist.load_data( data_dir="/home/user/dataset/MNIST_data/", one_hot=True) X_train = X_train.reshape([-1, 28, 28, 1]) X_test = X_test.reshape([-1, 28, 28, 1]) # define a placholder to recv data net = input_data(shape=[None, 28, 28, 1], name="input") # layer1 net = conv_2d(net, nb_filter=32, filter_size=[5, 5], activation="relu", padding='same') net = max_pool_2d( net, kernel_size=[2, 2]) # strides default to be the same as kernel_size # layer2 net = conv_2d(net, nb_filter=64, filter_size=[5, 5], activation="relu", padding='same') net = max_pool_2d(net, kernel_size=[2, 2]) #################################################################### # No need to reshape #################################################################### # layer3 net = fully_connected(net, n_units=500, activation="relu")
params = { 'conv_filter': 5, 'pool_width': 3, 'pool_stride': 2, 'epoch': 50, 'id': 'cnn_try' } # Build CNN input_data = input_data(shape=[None, 32, 32, 3], data_augmentation=img_aug) conv1 = conv_2d(input_data, 64, params['conv_filter'], activation='relu', regularizer='L2') pool1 = max_pool_2d(conv1, params['pool_width'], params['pool_stride']) lrn1 = local_response_normalization(pool1) conv2 = conv_2d(lrn1, 64, params['conv_filter'], activation='relu', regularizer='L2') pool2 = max_pool_2d(conv2, params['pool_width'], params['pool_stride']) lrn2 = local_response_normalization(pool2) conv3 = conv_2d(lrn2, 128, params['conv_filter'], activation='relu', regularizer='L2')
#conda install tflearn or pip install tflearn import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, fully_connected, dropout from tflearn.layers.estimator import regression # In[7]: model = input_data(shape=[None, 50, 50, 1], name='input') # In[8]: #first convolution Layer model = conv_2d(model, 46, 5, activation='relu') model = max_pool_2d(model, 5) #second Convolution Layer model = conv_2d(model, 42, 5, activation='relu') model = max_pool_2d(model, 5) #third Convolution Layer model = conv_2d(model, 38, 5, activation='relu') model = max_pool_2d(model, 5) #fully Connected Layer model = fully_connected(model, 1024, activation='relu') model = dropout(model, 0.7) model = fully_connected(model, 2, activation='softmax')
np.save('test_data.npy', testing_data) return testing_data train_data = create_train_data() train_data = np.load('train_data.npy') # Convolutional Neural Network import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input') convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') #saving our model after every session, and reloading it if we have a saved version if os.path.exists('{}.meta'.format(MODEL_NAME)):
def create_googlenet(num_classes): # Building 'GoogleNet' network = input_data(shape=[None, 227, 227, 3], data_preprocessing=img_prep, data_augmentation=img_aug) conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') # 3a inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu', name='inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce') inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5') inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, name='inception_3a_pool') inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3) # 3b inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1') inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1') inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat', axis=3, name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') # 4a inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output') # 4b inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output') # 4c inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3, name='inception_4c_output') # 4d inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output') # 4e inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5, inception_4e_pool_1_1], axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') # 5a inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1, activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3, mode='concat') # 5b inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3, activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5') inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) #fc loss = fully_connected(pool5_7_7, num_classes, activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.01) return network
from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import pyautogui cam = cv2.VideoCapture(0) cam.set(3, 200) cam.set(4, 200) #cam.set(cv2.CAP_PROP_FPS, 3) img_size = 224 boolean = 1 KEY = 0 convnet = input_data(shape=[None, img_size, img_size, 1], name='input') convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') MaxPool2D_1 = max_pool_2d(convnet, 5) convnet = fully_connected(MaxPool2D_1, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log')
(IMG_SIZE, IMG_SIZE)) # Resizing the image to 64*64 training_data.append([np.array(img), np.array(label)]) shuffle(training_data) np.save('train_data.npy', training_data) return training_data ############################################################################# # Building the model tf.reset_default_graph() train_data = create_train_data() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') convnet = conv_2d( convnet, 32, 5, activation='relu') # Convolution Layer-1 with 5 Filters of size 32*32 convnet = max_pool_2d(convnet, 5) # Max Pooling with filter size of 5*5 convnet = conv_2d( convnet, 64, 5, activation='relu') # Convolution Layer-2 with 5 Filters of size 64*64 convnet = max_pool_2d(convnet, 5) # Max pooling with filter size of 5*5 convnet = conv_2d( convnet, 32, 5, activation='relu') # Convolution Layer-3 with 5 Filters of size 32*32 convnet = max_pool_2d(convnet, 5) # Max pooling with filter size of 5*5 convnet = fully_connected( convnet, 1024, activation='relu') # Fully Connected Layer-4 with 1024 neurons convnet = dropout(convnet, 0.4) # Dropout rate set to 0.4 convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam',
train_y.append(training[img][1]) for img in range(len(testing)): test_x.append((testing[img][0])) test_y.append(testing[img][1]) print(np.array(train_x).shape) print(np.array(train_y).shape) train_x = np.array(train_x).reshape(3686, 128, 128, 1) train_y = np.array(train_y) test_x = np.array(test_x).reshape(1579, 128, 128, 1) test_y = np.array(test_y) print(type(train_x)) convnet = input_data(shape=[None, 128, 128, 1], name='input') convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 256, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = fully_connected(convnet, 4096, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2048, activation='relu') convnet = dropout(convnet, 0.8)
def analysis(filepath): verify_data = process_verify_data(filepath) str_label = "Cannot make a prediction." status = "Error" tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') ''' # relu: Relu is used in the middle / hidden layers of the network to regularize the activation. It is essentialy the function: max(0, x) Activation should not be in negative, either it should be zero or more than that. # softmax: Softmax is used for the output layer in multi class classification problems. It is essentialy the function: log(1 + e^x) It outputs a vector of probabilities of each class. ''' convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 4, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print ('Model loaded successfully.') else: print ('Error: Create a model using neural_network.py first.') img_data, img_name = verify_data[0], verify_data[1] orig = img_data data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3) model_out = model.predict([data])[0] if np.argmax(model_out) == 0: str_label = 'Healthy' elif np.argmax(model_out) == 1: str_label = 'Bacterial' elif np.argmax(model_out) == 2: str_label = 'Viral' elif np.argmax(model_out) == 3: str_label = 'Lateblight' if str_label =='Healthy': status = 'Healthy' else: status = 'Unhealthy' result = 'Status: ' + status + '.' if (str_label != 'Healthy'): result += '\nDisease: ' + str_label + '.' return result
testy = pd.read_csv("data/csvTestLabel 10k x 1.csv", header=None) #Process data trainx = trainx.values.astype('float32').reshape([-1, 28, 28, 1]) testx = testx.values.astype('float32').reshape([-1, 28, 28, 1]) trainy = trainy.values.astype('int32') trainy = to_categorical(trainy, 10) testy = testy.values.astype('int32') testy = to_categorical(testy, 10) # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target')
# Modified by: Ajinkya Malhotra # # Model that defines all the architecture used for the neural network import tflearn from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.estimator import regression from tflearn.metrics import Accuracy acc = Accuracy() network = input_data(shape=[None, 100, 100, 1]) # Conv layers ------------------------------------ network = conv_2d(network, 64, 3, strides=1, activation='tanh') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 64, 3, strides=1, activation='tanh') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 64, 3, strides=1, activation='tanh') network = conv_2d(network, 64, 3, strides=1, activation='relu') network = conv_2d(network, 64, 3, strides=1, activation='relu') network = max_pool_2d(network, 2, strides=2) # Fully Connected Layers ------------------------- network = fully_connected(network, 1024, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 1024, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 7, activation='softmax') network = regression(network, optimizer='momentum',
def construct_inceptionv1onfire(x, y): # from Dunnings/Breckon research paper 2018 network = input_data(shape=[None, y, x, 3]) conv1_7_7 = conv_2d(network, 64, 5, strides=2, activation='relu', name='conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 128, 3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu', name='inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce') inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5') inception_3a_pool = max_pool_2d( pool2_3_3, kernel_size=3, strides=1, ) inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([ inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1 ], mode='concat', axis=3) inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1') inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([ inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1 ], mode='concat', axis=3, name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([ inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1 ], mode='concat', axis=3, name='inception_4a_output') pool5_7_7 = avg_pool_2d(inception_4a_output, kernel_size=5, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, 2, activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) model = tflearn.DNN(network, checkpoint_path='sp-inceptiononv1onfire', max_checkpoints=1, tensorboard_verbose=2) return model
normalize=True) X_test = np.reshape(X_test, (-1, 128, 128, 3)) X_test = cv2.imread('1.bmp') x = io.imread(X_test).reshape((128, 128, 3)).astype(np.float) / 255 img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() convnet = input_data(shape=[None, 128, 128, 3], data_preprocessing=img_prep, name='input') convnet = conv_2d(convnet, 32, 2, activation='relu') convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 64, 2, activation='relu') convnet = max_pool_2d(convnet, 2) convnet = fully_connected(convnet, 512, activation='relu') convnet = dropout(convnet, 0.6) convnet = fully_connected(convnet, 4, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet)
def inceptionv3(width, height, frame_count, lr, output=9, model_name = 'inceptionv3.model'): network = input_data(shape=[None, width, height,3], name='input') conv1_7_7 = conv_2d(network, 64, 28, strides=4, activation='relu', name = 'conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 9,strides=4) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,12, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=12, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=12, activation='relu', name = 'inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' ) inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=15, activation='relu', name= 'inception_3a_5_5') inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=12, strides=1, ) inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3) inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' ) inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=9, activation='relu',name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=15, name = 'inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=12, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output') inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output') inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output') inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output') inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat') inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' ) inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.45) loss = fully_connected(pool5_7_7, output,activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log') return model
def analysis(): import cv2 import numpy as np import os from random import shuffle from tqdm import \ tqdm verify_dir = 'testpicture' IMG_SIZE = 50 LR = 1e-3 MODEL_NAME = 'healthyvsunhealthy-{}-{}.model'.format(LR, '2conv-basic') def process_verify_data(): verifying_data = [] for img in tqdm(os.listdir(verify_dir)): path = os.path.join(verify_dir, img) img_num = img.split('.')[0] img = cv2.imread(path, cv2.IMREAD_COLOR) img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) verifying_data.append([np.array(img), img_num]) np.save('verify_data.npy', verifying_data) return verifying_data verify_data = process_verify_data() import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import tensorflow as tf tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 4, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print('model loaded!') import matplotlib.pyplot as plt fig = plt.figure() for num, data in enumerate(verify_data): img_num = data[1] img_data = data[0] y = fig.add_subplot(3, 4, num + 1) orig = img_data data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3) # model_out = model.predict([data])[0] model_out = model.predict([data])[0] if np.argmax(model_out) == 0: str_label = 'healthy' elif np.argmax(model_out) == 1: str_label = 'bacterial' elif np.argmax(model_out) == 2: str_label = 'viral' elif np.argmax(model_out) == 3: str_label = 'lateblight' if str_label == 'healthy': status = "HEALTHY" else: status = "UNHEALTHY" message = tk.Label(text='Status: ' + status, background="lightgreen", fg="Brown", font=("", 15)) message.grid(column=0, row=3, padx=10, pady=10) if str_label == 'bacterial': diseasename = "Bacterial Spot " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=bact) button3.grid(column=0, row=6, padx=10, pady=10) elif str_label == 'viral': diseasename = "Yellow leaf curl virus " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=vir) button3.grid(column=0, row=6, padx=10, pady=10) elif str_label == 'lateblight': diseasename = "Late Blight " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=latebl) button3.grid(column=0, row=6, padx=10, pady=10) else: r = tk.Label(text='Plant is healthy', background="lightgreen", fg="Black", font=("", 15)) r.grid(column=0, row=4, padx=10, pady=10) button = tk.Button(text="Exit", command=exit) button.grid(column=0, row=9, padx=20, pady=20)
def define_network(self): """ Defines CNN architecture :return: CNN model """ # My CNN 1 (type1) # # For data normalization # img_prep = ImagePreprocessing() # img_prep.add_featurewise_zero_center() # img_prep.add_featurewise_stdnorm() # # # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images # img_aug = ImageAugmentation() # img_aug.add_random_flip_leftright() # img_aug.add_random_rotation(max_angle=25.0) # img_aug.add_random_blur(sigma_max=3.0) # # self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], # data_augmentation=img_aug, # data_preprocessing=img_prep) # self.network = conv_2d(self.network, 64, 5, activation='relu') # self.network = max_pool_2d(self.network, 3, strides=2) # self.network = conv_2d(self.network, 64, 5, activation='relu') # self.network = max_pool_2d(self.network, 3, strides=2) # self.network = conv_2d(self.network, 128, 4, activation='relu') # self.network = dropout(self.network, 0.3) # self.network = fully_connected(self.network, 3072, activation='relu') # self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') # self.network = regression(self.network, optimizer='adam', loss='categorical_crossentropy') # self.model = tflearn.DNN(self.network, checkpoint_path=os.path.join(CHECKPOINTS_PATH + '/emotion_recognition'), # max_checkpoints=1, tensorboard_verbose=0) # My CNN 2 (type2) # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.0) img_aug.add_random_blur(sigma_max=3.0) self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], data_augmentation=img_aug) self.network = conv_2d(self.network, 64, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 64, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = conv_2d(self.network, 128, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 128, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.2) self.network = conv_2d(self.network, 256, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 256, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.25) self.network = conv_2d(self.network, 512, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 512, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.25) self.network = fully_connected(self.network, 1024, activation='relu') self.network = batch_normalization(self.network) self.network = dropout(self.network, 0.45) self.network = fully_connected(self.network, 1024, activation='relu') self.network = batch_normalization(self.network) self.network = dropout(self.network, 0.45) self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') self.network = regression(self.network, optimizer='adam', loss='categorical_crossentropy') self.model = tflearn.DNN( self.network, checkpoint_path=os.path.join(CHECKPOINTS_PATH + '/emotion_recognition'), max_checkpoints=1, tensorboard_verbose=0) return self.model
from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.normalization import local_response_normalization from tflearn.layers.estimator import regression # Data loading and basic transformations import tflearn.datasets.mnist as mnist data_dir = "datasets/MNIST" X, Y, X_test, Y_test = mnist.load_data(data_dir=data_dir, one_hot=True) X = X.reshape([-1, 28, 28, 1]) X_test = X_test.reshape([-1, 28, 28, 1]) # Building the network CNN = input_data(shape=[None, 28, 28, 1], name='input') CNN = conv_2d(CNN, 32, 5, activation='relu', regularizer='L2') CNN = max_pool_2d(CNN, 2) CNN = local_response_normalization(CNN) CNN = conv_2d(CNN, 64, 5, activation='relu', regularizer='L2') CNN = max_pool_2d(CNN, 2) CNN = local_response_normalization(CNN) CNN = fully_connected(CNN, 1024, activation=None) CNN = dropout(CNN, 0.5) CNN = fully_connected(CNN, 10, activation='softmax') CNN = regression(CNN, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='target') # Training the network
def foo(img_fn, model_fn='../data/model/model_weights'): img = cv2.imread(img_fn, cv2.IMREAD_GRAYSCALE) haar_fn = '../data/haarcascade_russian_plate_number.xml' haar = cv2.CascadeClassifier(haar_fn) detected = haar.detectMultiScale(img) plates = [] for x, y, w, h in detected: obj = img[y:y + h, x:x + w] plates.append(obj) chars = plates[0] < filters.threshold_minimum(plates[0]) labeled_chars, a = ndi.label(chars) labeled_chars = (labeled_chars > 1).astype(np.int8) c = measure.find_contours(labeled_chars, .1) letters = [] for i, v in enumerate(c): xs, ys = zip(*[i for i in v]) x = int(min(xs)) y = int(min(ys)) w = int(max(xs) - x + 2) h = int(max(ys) - y + 2) if w < 15: continue letters.append((y, x, h, w)) letters = sorted(letters) letters_img = [plates[0][x:x + w, y:y + h] for y, x, h, w in letters] letters_img = [i for i in letters_img if i[0, 0] > 127] sizes = [image.size for image in letters_img] median = np.median(sizes) allowed_size = median + median / 4 letters_img = [image for image in letters_img if image.size < allowed_size] size = 64 normalized_img = [] for i in letters_img: ratio = i.shape[0] / i.shape[1] img1 = transform.resize(i, [size, int(size / ratio)], mode='constant') width = img1.shape[1] missing = (size - width) // 2 ones = np.ones([size, missing]) img2 = np.append(ones, img1, 1) img3 = np.append(img2, ones, 1) if 2 * missing + width != size: one = np.ones([size, 1]) img4 = np.append(img3, one, 1) else: img4 = img3 normalized_img.append(img4 * 255) net_input = input_data(shape=[None, 64, 64, 1]) conv1 = conv_2d(net_input, nb_filter=4, filter_size=5, strides=[1, 1, 1, 1], activation='relu') max_pool1 = max_pool_2d(conv1, kernel_size=2) conv2 = conv_2d(max_pool1, nb_filter=8, filter_size=5, strides=[1, 2, 2, 1], activation='relu') max_pool2 = max_pool_2d(conv2, kernel_size=2) conv3 = conv_2d(max_pool2, nb_filter=12, filter_size=4, strides=[1, 1, 1, 1], activation='relu') max_pool3 = max_pool_2d(conv3, kernel_size=2) fc1 = fully_connected(max_pool3, n_units=200, activation='relu') drop1 = dropout(fc1, keep_prob=.5) fc2 = fully_connected(drop1, n_units=36, activation='softmax') net = regression(fc2) model = DNN(network=net) model.load(model_file=model_fn) labels = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') predicted = [] for i in normalized_img: y = model.predict(i.reshape([1, 64, 64, 1])) y_pred = np.argmax(y[0]) predicted.append(labels[y_pred]) return ''.join(predicted)