def augmentation(self, max_angle=5., sigma_max=3., flip_left_right=True, random_rotation=True, random_blur=True): if any([flip_left_right, random_blur, random_rotation]): pass else: raise ValueError try: image_aug = ImageAugmentation() if flip_left_right: image_aug.add_random_flip_leftright() # adds left- and right flipped images to the training data if random_rotation: image_aug.add_random_rotation( max_angle=max_angle) # rotates random training data by a specified angle ( # degrees) if random_blur: image_aug.add_random_blur(sigma_max=sigma_max) # blurs random training data by a specified sigma self.data_augmentation = image_aug except ValueError: print("No augmentation selected!")
def net_nodule2d_swethasubramanian(image_dims): #image augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #image pre-processing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() net = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2], image_dims[3]], dtype=tf.float32, data_preprocessing=img_prep, data_augmentation=img_aug) net = layers.conv.conv_2d(net, 50, 3, activation='relu') net = layers.conv.max_pool_2d(net, 2) net = layers.conv.conv_2d(net, 64, 3, activation='relu') net = layers.conv.conv_2d(net, 64, 3, activation='relu') net = layers.conv.max_pool_2d(net, 2) net = layers.core.fully_connected(net, 512, activation='relu') net = layers.core.dropout(net, 0.5) net = layers.core.fully_connected(net, 2, activation='softmax') net = layers.estimator.regression(net, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return net
def network(img_shape, name, LR): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # # # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_blur (sigma_max=3.0) img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_90degrees_rotation(rotations=[0, 2]) # Building 'AlexNet' network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug ) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=LR, name='targets') return network
def network(img_shape, name, LR): # # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # # # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_blur(sigma_max=3.0) img_aug.add_random_90degrees_rotation(rotations=[0, 2]) network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug) # def rete(img_shape, name, LR): # network = input_data(shape=img_shape, name=name) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=LR, name='targets') return network
def network(): tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.8) # Normalization of the data img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create random new data (more you have, better is) img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #Input network must match inputs of the data set network = input_data(shape=[None, 100, 100, 3], data_preprocessing=img_prep, data_augmentation=img_aug) """ Creation of the different hidden layers ================ Editing section ================ """ network = conv_2d(network, 64, 3, strides=2, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 2, activation='relu') network = conv_2d(network, 64, 2, activation='relu') network = max_pool_2d(network, 2) #Fully connected layer then we drop a part of the data in order to not overfit network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.7) """ ====================== End of Editing section ====================== """ network = fully_connected(network, 120, activation='softmax') # Training hyper-parameters network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) #Creation of the deep neural network with the back up name #tensorboard_verbose=0 is the most optimal for the calculation time model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='dog_classifier.tfl.ckpt') return model
def create_network(optimiser): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_samplewise_stdnorm() img_prep.add_featurewise_stdnorm() # Real-time data augmentation to add variance to the data img_aug = ImageAugmentation() img_aug.add_random_blur(sigma_max=3) img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer=optimiser, loss='categorical_crossentropy', learning_rate=0.002) return network
def CNN_Model_Creation(): # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Define our network architecture: # Input is a 512x512 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 512, 512,3], data_preprocessing=img_prep, data_augmentation=img_aug) # Step 1: Convolution network = conv_2d(network, 32, 5, activation='relu') network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution yet again network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling again network = max_pool_2d(network, 2) network = conv_2d(network, 96, 2, activation='relu') network = max_pool_2d(network, 2) # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with two outputs (0=isn't a cancer, 1=is a cancer) to make the final prediction network = fully_connected(network, 2, activation='softmax') #momentum = tflearn.optimizers.Momentum(learning_rate=0.05, momentum=0.7, lr_decay=0.5) # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy') # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0) return model
def build_network(self, loadModel=False): """ 构建模型 """ # Smaller 'AlexNet' # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py print('[+] Building CNN') img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() # img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=0.3) # 输入数据 http://tflearn.org/layers/core/#input-data self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1], data_augmentation=img_aug) # self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1]) # 卷积层 http://tflearn.org/layers/conv/#convolution-2d # 激活函数 http://tflearn.org/activations/ self.network = conv_2d(self.network, 64, 3, activation='relu') # self.gap1 = global_avg_pool(self.network) # 池化层 http://tflearn.org/layers/conv/#max-pooling-2d self.network = max_pool_2d(self.network, 2, strides=2) # 卷积层 self.network = conv_2d(self.network, 96, 3, activation='relu') # self.gap2 = global_avg_pool(self.network) # 池化层 self.network = max_pool_2d(self.network, 2, strides=2) # 卷积层 self.network = conv_2d(self.network, 128, 3, activation='relu') self.network = global_avg_pool(self.network) # 全连接层 http://tflearn.org/layers/core/#fully-connected self.network = fully_connected(self.network, 2048, activation='relu', weight_decay=0.001) # dropout随机将部分输出改为0,避免过拟合 http://tflearn.org/layers/core/#dropout self.network = dropout(self.network, 0.8) # 全连接层:softmax分类 # self.network = merge([self.gap1, self.gap2, self.gap3], mode="concat", name="concat") self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') # 定义损失函数和优化器 http://tflearn.org/layers/estimator/#regression self.network = regression(self.network, # http://tflearn.org/optimizers/ optimizer='Adam', # optimizer='SGD', # http://tflearn.org/objectives/ loss='categorical_crossentropy', learning_rate=0.001) # 定义模型 http://tflearn.org/models/dnn/#deep-neural-network-model self.model = tflearn.DNN( self.network, checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition', tensorboard_dir='c:\\tmp\\tflearn_logs', max_checkpoints=1, tensorboard_verbose=2 ) if loadModel: self.load_model()
def augmentation(self): """ Create extra synthetic training data by flipping, rotating and blurring the images on our data set. """ img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) return img_aug
def main(data_dir, hdf5, name): batch_size = 256 num_epochs = 10 learning_rate = 0.001 X, Y, X_test, Y_test = get_data(data_dir, hdf5) X, Y = shuffle(X, Y) img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling network = max_pool_2d(network, 2) # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with two outputs (0=isn't a bird, 1=is a bird) to make the final prediction network = fully_connected(network, 2, activation='softmax') # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='bird-classifier.tfl.ckpt') # Train it! We'll do 100 training passes and monitor it as it goes. model.fit(X, Y, n_epoch=100, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=96, snapshot_epoch=True, run_id='bird-classifier') # Save model when training is complete to a file model.save("bird-classifier.tfl") print("Network trained and saved as bird-classifier.tfl!")
def load_model( model_path='/mnt/ARRAY/classifier/model/particle-classifier.tfl'): ''' Load the trained tensorflow model Args: model_path (str) : path to particle-classifier e.g. '/mnt/ARRAY/classifier/model/particle-classifier.tfl' Returns: model (tf model object) : loaded tfl model from load_model() ''' path, filename = os.path.split(model_path) header = pd.read_csv(os.path.join(path, 'header.tfl.txt')) OUTPUTS = len(header.columns) class_labels = header.columns tf.reset_default_graph() # Same network definition as in tfl_tools scripts img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.75) network = fully_connected(network, OUTPUTS, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=model_path) model.load(model_path) return model, class_labels
def train_model(images,labels,input_size,kernel_size,cwd_data,cwd_checkpoint,run_name,num_epoch=40,num_labels=6): '''Trains a CNN network and saves the trained model in cwd_checkpoint path :param np.float32 images: RGB images with shape (training_size, input_size, input_size, 3) :param np.float32 labels: labels with shape (training_size, num_labels) :param int input_size: width and height of images :param int kernel_size: kernel size of network :param str cwd_data: path to data folder :param str cwd_data: path to checkpoint folder :param int num_epoch: number of epochs model should train for :param int num_labels: number of classes the network trains for ''' # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=360.) img_aug.add_random_blur(sigma_max=3.) img_aug.add_random_flip_updown() # Convolutional network building network = input_data(shape=[None, input_size, input_size, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, input_size/2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, num_labels, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=0,tensorboard_dir=cwd_data,checkpoint_path=cwd_checkpoint,max_checkpoints=2) #model.load(cwd_data+'oct-cvn-48bal-6c-114300') model.fit(images, labels, n_epoch=num_epoch, validation_set=0.1, show_metric=True, run_id=run_name, snapshot_epoch=True)
def setup_model(checkpoint_path=None): """Sets up a deep belief network for image classification based on the set up described in :param checkpoint_path: string path describing prefix for model checkpoints :returns: Deep Neural Network :rtype: tflearn.DNN References: - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks Links: - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721 """ # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) if checkpoint_path: model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path) else: model = tflearn.DNN(network, tensorboard_verbose=3) return model
def load_model(): tf.reset_default_graph() image_aug = ImageAugmentation() image_aug.add_random_blur(1) image_aug.add_random_flip_leftright() net = input_data(shape=[None] + [W, W, 3], data_augmentation=image_aug) net = conv_2d(net, 96, 5, strides=2, activation='relu') net = batch_normalization(net) net = max_pool_2d(net, 2) net = dropout(net, 0.8) net = conv_2d(net, 256, 5, strides=2, activation='relu') net = batch_normalization(net) net = max_pool_2d(net, 2) net = dropout(net, 0.8) net = conv_2d(net, 384, 3, activation='relu') net = conv_2d(net, 384, 3, activation='relu') net = conv_2d(net, 256, 3, activation='relu') net = batch_normalization(net) net = max_pool_2d(net, 2) net = dropout(net, 0.8) net = fully_connected(net, 1024, activation='tanh') net = dropout(net, 0.5) net = fully_connected(net, 1024, activation='tanh') net = dropout(net, 0.5) net = fully_connected(net, NUM_CLASSES, activation='softmax') net = regression(net, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0001) clf = tflearn.DNN(net) clf.load('../results/checkpoint_1543800554.7789965/tmp.model') return clf
def network(img_shape, name, LR): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # # # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_blur(sigma_max=3.0) img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_90degrees_rotation(rotations=[0, 2]) # Building Residual Network network = tflearn.input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug) network = tflearn.conv_2d(network, 16, 3, regularizer='L2', weight_decay=0.0001) network = tflearn.resnext_block(network, n, 16, 32) network = tflearn.resnext_block(network, 1, 32, 32, downsample=True) network = tflearn.resnext_block(network, n - 1, 32, 32) network = tflearn.resnext_block(network, 1, 64, 32, downsample=True) network = tflearn.resnext_block(network, n - 1, 64, 32) network = tflearn.batch_normalization(network) network = tflearn.activation(network, 'relu') network = tflearn.global_avg_pool(network) # Regression network = tflearn.fully_connected(network, 2, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) network = tflearn.regression(network, optimizer=opt, name='targets', loss='categorical_crossentropy') return network
def __init__(self): image_size = 32 self.image_size = image_size # Same network definition as before img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) network = input_data(shape=[None, image_size, image_size, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 4, activation='softmax') acc = Accuracy(name="Accuracy") network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0005, metric=acc) #self.model = tflearn.DNN(network, checkpoint_path='AllData.tflearn', max_checkpoints = 3, #tensorboard_verbose = 3, tensorboard_dir='tmp/tflearn_logs/') self.model = tflearn.DNN(network) self.model.load("Candidates/AllData_final.tflearn_Nov25")
def getReseau(): size = settings.size nb_filter = settings.nb_filter filter_size = settings.filter_size # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Define our network architecture: # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, size, size, 3], data_preprocessing=img_prep, data_augmentation=img_aug) reseau = settings.reseau if reseau == 1: # Step 1: Convolution network = conv_2d(network, nb_filter, filter_size, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again network = conv_2d(network, nb_filter * 2, filter_size, activation='relu') # Step 4: Convolution yet again network = conv_2d(network, nb_filter * 2, filter_size, activation='relu') # Step 5: Max pooling again network = max_pool_2d(network, 2) # Step 6: Fully-connected 512 node neural network network = fully_connected(network, nb_filter * 8, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) elif reseau == 2: network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') elif reseau == 3: network = conv_2d(network, 32, 3, activation='relu') network = avg_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = avg_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = avg_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) elif reseau == 4: network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 5, padding='valid', activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 5, padding='valid', activation='relu') network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) elif reseau == 5: network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = avg_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') # Step 8: Fully-connected neural network with three outputs (0=isn't a bird, 1=is a bird) to make the final prediction network = fully_connected(network, ld.getLabelsNumber(), activation='softmax') # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=settings.learning_rate) # Wrap the network in a model object # model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='dataviz-classifier.tfl.ckpt') model = tflearn.DNN( network, tensorboard_verbose=0 ) # , checkpoint_path='data-classifier/dataviz-classifier.tfl.ckpt') return model
def test(labels_orig, filenames, n_epoch, n_classes, image_size, conv_pass, logfile): print() print('n_epochs: ', n_epoch) print('selected image size: ', image_size) print('number of convolution block passes: ', conv_pass) print() dirname = '/data/shared/images/grocerystore/' sz = int(image_size) print('loading data') X = pickle.load(open("grocery_dataset_X.pkl", "rb")) X_test = pickle.load(open("grocery_dataset_X_test.pkl", "rb")) Y = pickle.load(open("grocery_dataset_Y.pkl", "rb")) Y_test = pickle.load(open("grocery_dataset_Y_test.pkl", "rb")) print('image preparations') X, Y = shuffle(X, Y) # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) print('defining network architecture') # Define network architecture:Input is a 32x32 image with 3 color channels #network = input_data(shape=[None, 32, 32, 3],data_preprocessing=img_prep,data_augmentation=img_aug) network = input_data(shape=[None, sz, sz, 3], data_preprocessing=img_prep, data_augmentation=img_aug) #2 passes through convolution and pooling for j in range(1, (int(conv_pass) + 1)): print('convolution and pooling sequence: ', str(j)) # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution yet again network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling again network = max_pool_2d(network, 2) print('finished convolution and pooling') # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with five outputs to make the final prediction network = fully_connected(network, 5, activation='softmax') # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) print('wrapping network in a model') # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='grocery-classifier.tfl.ckpt') # Train it! We'll do 100 training passes and monitor it as it goes. print('training with ' + str(n_epoch) + ' passes...') # batch_size was 96 for 32x32 image; n*image size bs = 3 * sz model.fit(X, Y, int(n_epoch), validation_set=(X_test, Y_test), show_metric=True, batch_size=bs) accuracy_score = model.evaluate(X_test, Y_test) accuracy = '... accuracy: %0.4f%%' % (accuracy_score[0] * 100) print(accuracy) print('saving results') message = str(accuracy) + ' with ' + str(n_epoch) + ' passes' logstatus(os.getcwd(), logfile, message, 'a')
def evaluate(index, zone): if (not os.path.isfile('models/zone' + str(zone) + '/' + str(index) + '/checkpoint')): return [], [], [] # Same network definition as before img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #adding here if (zone == 13 or zone == 14): dimX = 256 dimY = 75 elif (zone == 11 or zone == 12): dimX = 256 dimY = 75 elif (zone == 10): dimX = 237 dimY = 80 elif (zone == 9): dimX = 50 dimY = 80 elif (zone == 8): if (index == 1 or index == 13): dimX = 225 dimY = 80 elif (index == 9): dimX = 237 dimY = 80 elif (zone == 6 or zone == 7): dimX = 256 dimY = 60 elif (zone == 5): dimX = 512 dimy = 80 else: dimX = 0 dimY = 0 network = input_data( shape=[None, dimY, dimX, 1], #zone14,13 #network = input_data(shape=[None, 80, 512, 1], #zone5 data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='models/zone' + str(zone) + '/' + str(index) + '/TSA-Zone' + str(zone) + '-Angle-' + str(index) + '.tfl') #zone14 model.load("models/zone" + str(zone) + "/" + str(index) + "/TSA-Zone" + str(zone) + "-Angle-" + str(index) + ".tfl") #zone14 apsid = [] predArray = [] bnotb = [] mypath = "../aps/test_data/" onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] tp = 0 tn = 0 fp = 0 fn = 0 for fname in onlyfiles: if fname.endswith(".aps"): # Load the image file #img = scipy.ndimage.imread(args.image, mode="RGB") apsid.append(fname.split('.')[0]) single_image = sg.get_single_image("../aps/test_data/" + fname, index) img = sg.convert_to_grayscale(single_image) crop_dim = sg.get_crop_dimensions(index, zone) img = img[crop_dim[0]:crop_dim[1], crop_dim[2]:crop_dim[3]] img = np.asfarray(img).reshape(dimY, dimX, 1) #zone14,13 # Predict prediction = model.predict([img]) #print(prediction) predArray.append(prediction) # Check the result. is_threat = np.argmax(prediction[0]) == 1 bnotb.append(is_threat) final_result = [] filename = 'test_labels.csv' with open(filename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') for row in readCSV: final_result.append(row) if is_threat: print("That's detetced: " + str(fname)) flag = True for value in final_result: if value[0] + ".aps" == fname: label = int(value[1]) if zone == label: tp = tp + 1 flag = False break if flag: fp = fp + 1 else: flag = True for value in final_result: if value[0] + ".aps" == fname: label = int(value[1]) if zone == label: fn = fn + 1 flag = False break if flag: tn = tn + 1 print('True positives', tp) print('False positives', fp) print('True negatives', tn) print('False negatives', fn) return apsid, predArray, bnotb
def test_attribute(featurenum, n_epoch, n_images, image_size, conv_pass, training, logfile): print() print('iteration: ', featurenum) print('n_epochs: ', n_epoch) print('number of images: ', n_images) print('selected image size: ', image_size) print('number of convolution block passes: ', conv_pass) print('training set (%): ', training) print() attrvalues = {} with open('list_attr_celeba.txt') as f: numimages = int(f.readline()) attrnames = f.readline().split() for line in f: tokens = line.strip().split() attrvalues[tokens[0]] = np.array([int(a) for a in tokens[1:]]) numfeatures = 1 selfeatures = [featurenum] selected = ([attrnames[i] for i in selfeatures]) print('current rule is: ', selected) selfeaturesvec = -np.ones([ 40, ]) for i in selfeatures: selfeaturesvec[i] = 1 selfeaturesvec = selfeaturesvec.astype(int) # create labels labelsall = [] X = [] for k in attrvalues.keys(): if all(attrvalues[k][selfeatures] == 1): labelsall.append(1) else: labelsall.append(0) print('resizing images') num = int(n_images) sz = int(image_size) rids = np.random.permutation(numimages)[0:num] data = np.zeros([num, sz, sz, 3]) labels = np.zeros([num, 2]) for i in range(num): sampleid = rids[i] #error fix 0 image fname = './img_align_celeba/' + str(sampleid + 1).zfill(6) + '.jpg' #print(fname) img = Image.open(fname) attrs = attrvalues[str(sampleid + 1).zfill(6) + '.jpg'] img1 = img.resize([sz, sz]) imgarr = np.array(img1.getdata()).reshape(img1.size[0], img1.size[1], 3) data[i, :, :, :] = imgarr labels[i, labelsall[sampleid]] = 1 # possible eror here print('splitting into training and test data') #split data into train and test (cast to int) ''' spl = int(math.floor(num/2)) X = data[0:spl,:,:,:] X_test = data[spl:,:,:,:] Y = labels[0:spl,:] Y_test = labels[spl:,:] ''' tr = int(math.floor(num * (int(training) / 100))) te = int(num - tr) print("training and testing numbers: ", tr, te) tot = tr + te print("checking for equality ", tot, num) X = data[0:tr, :, :, :] X_test = data[tr:, :, :, :] Y = labels[0:te, :] Y_test = labels[te:, :] print('pickling data') # save data pickle.dump(X, open('face_dataset_X.pkl', 'wb')) pickle.dump(Y, open('face_dataset_Y.pkl', 'wb')) pickle.dump(X_test, open('face_dataset_X_test.pkl', 'wb')) pickle.dump(Y_test, open('face_dataset_Y_test.pkl', 'wb')) print('loading data') X = pickle.load(open("face_dataset_X.pkl", "rb")) X_test = pickle.load(open("face_dataset_X_test.pkl", "rb")) Y = pickle.load(open("face_dataset_Y.pkl", "rb")) Y_test = pickle.load(open("face_dataset_Y_test.pkl", "rb")) print('image preparations') print("not shuffling when training not equal to test size") #X, Y = shuffle(X, Y) # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) print('defining network architecture') # Define network architecture:Input is a 32x32 image with 3 color channels #network = input_data(shape=[None, 32, 32, 3],data_preprocessing=img_prep,data_augmentation=img_aug) network = input_data(shape=[None, sz, sz, 3], data_preprocessing=img_prep, data_augmentation=img_aug) #2 passes through convolution and pooling for j in range(1, (int(conv_pass) + 1)): print('convolution and pooling sequence: ', str(j)) # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution yet again network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling again network = max_pool_2d(network, 2) print('finished convolution and pooling') # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with two outputs (0=isn't a face, 1=is a face) to make the final prediction network = fully_connected(network, 2, activation='softmax') # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) print('wraping network in a model') # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='face-classifier.tfl.ckpt') # Train it! We'll do 100 training passes and monitor it as it goes. print('training with ' + str(n_epoch) + ' passes...') # batch_size was 96 for 32x32 image; n*image size bs = 3 * sz model.fit(X, Y, int(n_epoch), validation_set=(X_test, Y_test), show_metric=True, batch_size=bs) accuracy_score = model.evaluate(X_test, Y_test) accuracy = '... accuracy: %0.4f%%' % (accuracy_score[0] * 100) print(accuracy) print('saving results') message = 'feature #' + str(featurenum) + ': ' + selected[0] + ' ' + str( accuracy) + ' with ' + str(n_epoch) + ' passes' logstatus(os.getcwd(), logfile, message, 'a')
X2_FAST = genfromtxt('X2(FAST).csv', delimiter=',') testX_FAST = genfromtxt('testX(FAST).csv', delimiter=',') X1_FAST = X1_FAST.reshape([-1, 29, 29, 1]) X2_FAST = X2_FAST.reshape([-1, 29, 29, 1]) X_FAST = np.concatenate((X1_FAST,X2_FAST), axis=0) testX_FAST = testX_FAST.reshape([-1, 29, 29, 1]) X = np.concatenate((X, X_canny), axis=3) X = np.concatenate((X, X_FAST), axis=3) testX = np.concatenate((testX, testX_canny), axis=3) testX = np.concatenate((testX, testX_FAST), axis=3) # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_rotation(max_angle=10.) img_aug.add_random_blur(sigma_max=1.5) # Building convolutional network network = input_data(shape=[None, 29, 29, 3], name='input', data_augmentation=img_aug) network = conv_2d(network, 16, 3, activation='relu') network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 128, 3, activation='relu') network = max_pool_2d(network, 2)
def network(img_shape, name, LR): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # # # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_blur (sigma_max=3.0) img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_90degrees_rotation(rotations=[0, 2]) network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug ) conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3'))) conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3'))) conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3'))) maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3') conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1'))) conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3'))) maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3') tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1'))) tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1'))) tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5'))) tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1'))) tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3'))) tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3'))) tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3') tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1'))) tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3) net = repeat(tower_5b_out, 10, block35, scale=0.17) tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3'))) tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1'))) tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3'))) tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3'))) tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3') net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3) net = repeat(net, 20, block17, scale=0.1) tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1'))) # tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1'))) tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1'))) tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1'))) # tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3'))) tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3'))) tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1'))) tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None))) # tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3'))) tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3'))) # tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3') tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3') net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3) net = repeat(net, 9, block8, scale=0.2) net = block8(net, activation=None) net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1'))) net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8') net = flatten(net) net = dropout(net, dropout_keep_prob) loss = fully_connected(net, num_classes,activation='softmax') network = tflearn.regression(loss, optimizer='RMSprop', loss='categorical_crossentropy', learning_rate=0.0001, name='targets') return network
def initialize_network(): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Define our network architecture: # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 128, 128, 3], data_preprocessing=img_prep, data_augmentation=img_aug) dropout_chance = 0.5 print("Step 1") # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') print("Step 2") # Step 2: Max pooling network = max_pool_2d(network, 2) #network = dropout(network, dropout_chance) print("Step 3") # Step 3: Convolution again network = conv_2d(network, 64, 3, activation='relu') #network = max_pool_2d(network, 2) #network = dropout(network, dropout_chance) print("Step 4") # Step 4: Convolution yet again network = conv_2d(network, 64, 4, activation='relu') #network = max_pool_2d(network, 2) #network = dropout(network, dropout_chance) print("Step 5") # Step 5: Max pooling again network = max_pool_2d(network, 2) print("Step 6") # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') print("Step 7") # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting #network = dropout(network, dropout_chance) # Step 6: Fully-connected 512 node neural network #network = fully_connected(network, 512, activation='relu') print("Step 7") # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, dropout_chance) print("Step 8") # Step 8: Fully-connected neural network with two outputs (0=isn't a bird, 1=is a bird) to make the final prediction network = fully_connected(network, 7, activation='softmax') print("Step 9") # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0) return model
# Real-time data preprocessing and normalizing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # one encoded output # transforming to a binary matrix also binary width is 10 since they are 10 classes Y = to_categorical(Y, 10) Y_test = to_categorical(Y_test, 10) # Creating extra synthetic data by flipping, rotating and blurring the images or Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(max_angle=3.) #defining the network architecture: #input is a 32 by 32 image with 3 color channels RGB # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) #Step1:convolution network = conv_2d(network, 32, 3, activation='relu') #Step2:max pooling network = max_pool_2d(network, 2) #Step3: Convolution network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu')
import tflearn from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d from tflearn.layers.normalization import local_response_normalization from tflearn.layers.merge_ops import merge from tflearn.layers.estimator import regression from tflearn.data_utils import image_preloader from tflearn.data_utils import image_preloader from tflearn.data_augmentation import ImageAugmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_crop([224, 224], 10) img_aug.add_random_blur() img_aug.add_random_rotation(max_angle=25.) X,Y = image_preloader('files_list', image_shape = (224,224),mode='file',categorical_labels=True,normalize=True,files_extension=['.jpg', '.jpeg','.png']) network = input_data(shape=[None, 224, 224, 3],data_augmentation=img_aug) conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
#Loading dataset X_train, Y_train, X_test, Y_test = pickle.load(open("full_dataset.pkl", "rb")) #Shuffle dataset X_train, Y_train = shuffle(X_train, Y_train) #Normalise dataset preprocessed_img = ImagePreprocessing() preprocessed_img.add_featurewise_zero_center() preprocessed_img.add_featurewise_stdnorm() #Augmenting dataset augmented_image = ImageAugmentation() augmented_image.add_random_flip_leftright() augmented_image.add_random_rotation(max_angle=25.) augmented_image.add_random_blur(sigma_max=3.) #Network neural_net = input_data(shape=[None, 32, 32, 3], data_preprocessing=preprocessed_img, data_augmentation=augmented_image) neural_net = conv_2d(neural_net, 32, 3, activation='relu') neural_net = max_pool_2d(neural_net, 2) neural_net = conv_2d(neural_net, 64, 3, activation='relu') neural_net = conv_2d(neural_net, 64, 3, activation='relu')
def label_on_fly(im, model_name,cwd_checkpoint, stride, box_size): '''Converts pixels of image into labels Goes through smaller image and prepares it in the same way the images were prepared for training the model which will be used to predict a label Goes through the image line by line to avoid using too much memory :param PIL image im: for prediction, :parammodel to load, :param path to model, :param stride of scanning image :param box_size :return: rgb values of the label for each pixel :rtype: int tuples ''' input_size = box_size kernel_size = get_kernel_size(input_size) # Real-time data preprocessing im_prep = ImagePreprocessing() im_prep.add_featurewise_zero_center() im_prep.add_featurewise_stdnorm() # Real-time data augmentation im_aug = ImageAugmentation() im_aug.add_random_flip_leftright() im_aug.add_random_rotation(max_angle=360.) im_aug.add_random_blur(sigma_max=3.) im_aug.add_random_flip_updown() # Convolutional network building network = input_data(shape=[None, input_size, input_size, 3], data_preprocessing=im_prep, data_augmentation=im_aug) network = conv_2d(network, input_size/2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 6, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Defining model model = tflearn.DNN(network, tensorboard_verbose=0,tensorboard_dir=cwd_checkpoint,checkpoint_path=cwd_checkpoint) print('Loading model:', model_name) model.load(model_name) print('Sucessfully loaded model') max_box_size = box_size labels_predcited = [] #Define the width and the height of the image to be cut up in smaller images width, height = im.size box = 0.2*width,0.2*height,0.8*width,0.8*height im = im.crop(box) width, height = im.size #Go through the height (y-axes) of the image for i in xrange(int((height-max_box_size)/stride)): center_point_y = max_box_size/2+i*stride im_temp = [] predictions_temp = [] labels_temp = [] #Go through the width (x-axes) of the image using the same centerpoint independent of boxsize for j in xrange(int((width- max_box_size)/stride)): center_point_x = max_box_size/2+j*stride box = center_point_x-box_size/2, center_point_y-box_size/2, center_point_x+box_size/2,center_point_y+box_size/2 im_temp.append(im.crop(box)) predictions_temp = model.predict(im_temp) #labels_temp = [colorizer(get_label_from_cnn(predictions_temp[k])) for k in xrange(len(predictions_temp))] labels_temp = [get_label_from_cnn(predictions_temp[k]) for k in xrange(len(predictions_temp))] labels_predcited.append(labels_temp) print('Line %s done' % str(i)) labels_final = [item for m in xrange(len(labels_predcited)) for item in labels_predcited[m]] return(labels_final)
images, labels = shuffle(images, labels) images_test, labels_test = shuffle(images_test, labels_test) # create preprocessor to normalize images img_preprocessor = ImagePreprocessing() img_preprocessor.add_featurewise_zero_center() img_preprocessor.add_featurewise_stdnorm() # distort images img_distortion = ImageAugmentation() # only flip left/right for shape training #img_distortion.add_random_flip_leftright() img_distortion.add_random_rotation(max_angle=360) img_distortion.add_random_blur(sigma_max=1.) ### ### network architecture ### network = input_data(shape=[None, 64, 64, 1], data_preprocessing=img_preprocessor, data_augmentation=img_distortion) # convolution network = conv_2d(network, 64, 4, activation='relu') # max pooling
''' #preProc = tflearn.DataPreprocessing() #preProc.add_custom_preprocessing(ResizeImg) #images = np.reshape(images, (-1, 128, 128, 1)) #labels = np.reshape(labels, (-1, 2)) img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation() img_aug.add_random_blur() acc = tflearn.metrics.Accuracy() network = input_data(shape=[None, 227, 227, 3], data_augmentation=img_aug) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = tflearn.local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2)
X, Y, X_test, Y_test = pickle.load(open("full_dataset.pkl", "rb"), encoding='latin1') # Shuffle the data X, Y = shuffle(X, Y) # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Define our network architecture: # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again
import pickle X, Y, X_test, Y_test = pickle.load(open("full_dataset.pkl","rb")) X, Y = shuffle(X,Y) img_prep = ImageProcessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #define network architecture network = input_data(shape=[None,32,32,3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network,2) network = conv_2d(network, 64, 3, activation='relu')
def createCNN(args): size_entry_filter = 32 size_mid_filter = 64 filter_size = 3 color_channels = 3 model_main_activation = 'relu' model_exit_activation = 'softmax' max_pool_kernel_size = 2 model_learning_rate = 0.001 num_classes = args['num_classes'] ################################### # Image transformations ################################### # normalisation of images img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping & rotating images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) ################################### # Define network architecture ################################### # Input is a image_size x image_size image with 3 color channels (red, green and blue) network = input_data( shape=[None, args['size'], args['size'], color_channels], data_preprocessing=img_prep, data_augmentation=img_aug) # 1: Convolution layer with 32 filters, each 3x3x3 network = conv_2d(network, size_entry_filter, filter_size, activation=model_main_activation) # 2: Max pooling layer network = max_pool_2d(network, max_pool_kernel_size) # 3: Convolution layer with 64 filters network = conv_2d(network, size_mid_filter, filter_size, activation=model_main_activation) # 4: Convolution layer with 64 filters network = conv_2d(network, size_mid_filter, filter_size, activation=model_main_activation) # 5: Max pooling layer network = max_pool_2d(network, max_pool_kernel_size) # 6: Convolution layer with 64 filters network = conv_2d(network, size_mid_filter, filter_size, activation=model_main_activation) # 7: Convolution layer with 64 filters network = conv_2d(network, size_mid_filter, filter_size, activation=model_main_activation) # 8: Max pooling layer network = max_pool_2d(network, max_pool_kernel_size) # 9: Fully-connected 512 node layer network = fully_connected(network, 512, activation=model_main_activation) # 10: Dropout layer to combat overfitting network = dropout(network, 0.5) # 11: Fully-connected layer with two outputs network = fully_connected(network, num_classes, activation=model_exit_activation) # Configure how the network will be trained acc = Accuracy(name="Accuracy") network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=model_learning_rate, metric=acc) #main_dir = os.path.dirname(os.path.dirname(os.getcwd())) # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0, max_checkpoints=2, best_checkpoint_path=os.path.join( os.path.dirname(os.path.dirname(os.getcwd())), args['id'] + '.tfl.ckpt'), best_val_accuracy=args['accuracy']) return model
def network(img_shape, name, LR): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # # # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_blur(sigma_max=3.0) img_aug.add_random_90degrees_rotation(rotations=[0, 2]) network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug) conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu', name='inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce') inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5') inception_3a_pool = max_pool_2d( pool2_3_3, kernel_size=3, strides=1, ) inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([ inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1 ], mode='concat', axis=3) inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1') inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([ inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1 ], mode='concat', axis=3, name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([ inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1 ], mode='concat', axis=3, name='inception_4a_output') inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([ inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1 ], mode='concat', axis=3, name='inception_4b_output') inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([ inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1 ], mode='concat', axis=3, name='inception_4c_output') inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([ inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1 ], mode='concat', axis=3, name='inception_4d_output') inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([ inception_4e_1_1, inception_4e_3_3, inception_4e_5_5, inception_4e_pool_1_1 ], axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1, activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([ inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1 ], axis=3, mode='concat') inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3, activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5') inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([ inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1 ], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, class_num, activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=LR, name='targets') return network