def res_graph(X): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_crop([64, 64], padding=4) n = 5 net = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.residual_block(net, n, 16) net = tflearn.residual_block(net, 1, 32, downsample=True) net = tflearn.residual_block(net, n - 1, 32) net = tflearn.residual_block(net, 1, 64, downsample=True) net = tflearn.residual_block(net, n - 1, 64) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) net = tflearn.fully_connected(net, 2, activation='softmax') mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy') model = tflearn.DNN(net) # rnn typo -> res model.load('model\\res\\jun_rnn_cat_dog.tflearn') res_result = model.predict(X) return res_result
def simple_graph(X): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_crop([64, 64], padding=4) network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) conv = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(conv, 2) conv = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(conv, 2) conv = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(conv, 2) conv = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(conv, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0005) simple_model = tflearn.DNN(network) simple_model.load('model\\simple\\jun_simple_cat_dog_final.tflearn') simple_result = simple_model.predict(X) return simple_result
def createNetwork(self, input_size): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building # network = input_data(shape=[None, input_size, input_size, 3], # data_preprocessing=img_prep, # data_augmentation=img_aug) network = input_data(shape=[None, input_size, input_size, 3]) network = conv_2d(network, input_size, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size * 2, 3, activation='relu') network = conv_2d(network, input_size * 2, 3, activation='relu') # network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 4, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def create_network(optimiser): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_samplewise_stdnorm() img_prep.add_featurewise_stdnorm() # Real-time data augmentation to add variance to the data img_aug = ImageAugmentation() img_aug.add_random_blur(sigma_max=3) img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer=optimiser, loss='categorical_crossentropy', learning_rate=0.002) return network
def network(): tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.8) # Normalization of the data img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create random new data (more you have, better is) img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #Input network must match inputs of the data set network = input_data(shape=[None, 100, 100, 3], data_preprocessing=img_prep, data_augmentation=img_aug) """ Creation of the different hidden layers ================ Editing section ================ """ network = conv_2d(network, 64, 3, strides=2, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 2, activation='relu') network = conv_2d(network, 64, 2, activation='relu') network = max_pool_2d(network, 2) #Fully connected layer then we drop a part of the data in order to not overfit network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.7) """ ====================== End of Editing section ====================== """ network = fully_connected(network, 120, activation='softmax') # Training hyper-parameters network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) #Creation of the deep neural network with the back up name #tensorboard_verbose=0 is the most optimal for the calculation time model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='dog_classifier.tfl.ckpt') return model
def CNN_Model_Creation(): # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Define our network architecture: # Input is a 512x512 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 512, 512,3], data_preprocessing=img_prep, data_augmentation=img_aug) # Step 1: Convolution network = conv_2d(network, 32, 5, activation='relu') network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution again network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution yet again network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling again network = max_pool_2d(network, 2) network = conv_2d(network, 96, 2, activation='relu') network = max_pool_2d(network, 2) # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with two outputs (0=isn't a cancer, 1=is a cancer) to make the final prediction network = fully_connected(network, 2, activation='softmax') #momentum = tflearn.optimizers.Momentum(learning_rate=0.05, momentum=0.7, lr_decay=0.5) # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy') # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0) return model
def alex_graph(X): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_crop([64, 64], padding=4) network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 64, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0005) alex_model = tflearn.DNN(network) alex_model.load('model\\alex\\jun_ALEX_cat_dog_final.tflearn') alex_result = alex_model.predict(X) return alex_result
def build_model(): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return tflearn.DNN(network, tensorboard_verbose=0)
def data_augmentation(): # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) return img_aug
def net_nodule2d_swethasubramanian(image_dims): #image augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) #image pre-processing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() net = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2], image_dims[3]], dtype=tf.float32, data_preprocessing=img_prep, data_augmentation=img_aug) net = layers.conv.conv_2d(net, 50, 3, activation='relu') net = layers.conv.max_pool_2d(net, 2) net = layers.conv.conv_2d(net, 64, 3, activation='relu') net = layers.conv.conv_2d(net, 64, 3, activation='relu') net = layers.conv.max_pool_2d(net, 2) net = layers.core.fully_connected(net, 512, activation='relu') net = layers.core.dropout(net, 0.5) net = layers.core.fully_connected(net, 2, activation='softmax') net = layers.estimator.regression(net, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return net
def create_own_model(): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) convnet = input_data(shape=[None, img_width, img_height, 1], data_preprocessing=img_prep, data_augmentation=img_aug) convnet = conv_2d(convnet, 28, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 28, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = fully_connected(convnet, 512, activation='relu') oonvnet = dropout(convnet, 0.2) convnet = fully_connected(convnet, nb_classes, activation='softmax') model = regression(convnet, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='targets') return model
def augmentation(self, max_angle=5., sigma_max=3., flip_left_right=True, random_rotation=True, random_blur=True): if any([flip_left_right, random_blur, random_rotation]): pass else: raise ValueError try: image_aug = ImageAugmentation() if flip_left_right: image_aug.add_random_flip_leftright() # adds left- and right flipped images to the training data if random_rotation: image_aug.add_random_rotation( max_angle=max_angle) # rotates random training data by a specified angle ( # degrees) if random_blur: image_aug.add_random_blur(sigma_max=sigma_max) # blurs random training data by a specified sigma self.data_augmentation = image_aug except ValueError: print("No augmentation selected!")
def create_model(nb_classes): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) network = input_data(shape=[None, 60, 60, 1], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 30, 3, strides=2, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 30, 3, strides=2, activation='relu') network = max_pool_2d(network, 3, strides=2) network = fully_connected(network, 128, activation='relu') network = fully_connected(network, nb_classes, activation='softmax') model = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return model
def build_network(self): ''' This method builds the network architecture for the CNN model. Modify as needed. ''' # Image Preprocessing img_preprocess = DataPreprocessing() img_preprocess.add_samplewise_zero_center() img_preprocess.add_featurewise_stdnorm() # Image Augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=5.0) # Input Layer self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], data_preprocessing=img_preprocess, data_augmentation=img_aug) # Convolution Layer 1 self.network = conv_2d(self.network, 64, 5, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 3, strides=2) # Convolution Layer 2 self.network = conv_2d(self.network, 64, 5, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 3, strides=2) # Convolution Layer 3 self.network = conv_2d(self.network, 128, 4, activation='relu') self.network = batch_normalization(self.network) self.network = dropout(self.network, 0.2) # Penultimate FC Layer self.network = fully_connected(self.network, 3072, activation='relu') self.network = batch_normalization(self.network) # Final FC Layer self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') # Create network optimizer = Momentum(learning_rate=0.01, lr_decay=0.99, decay_step=250) # Learning function self.network = regression(self.network, optimizer=optimizer, loss='categorical_crossentropy') # Create model self.model = tflearn.DNN(self.network, tensorboard_dir=TENSORBOARD_PATH, checkpoint_path=CHECKPOINT_PATH, max_checkpoints=1, tensorboard_verbose=1)
def snack_detection_worker(): global snack_image # image preprocessors for neural network input img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping & rotating images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # setting up neural network network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) # 1: Convolution layer with 32 filters, each 3x3x3 network = conv_2d(network, 32, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 64, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 128, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 64, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 32, 5, activation='relu') network = max_pool_2d(network, 5) network = fully_connected(network, 1024, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy') #TODO: change checkpoint path model = tflearn.DNN(network, checkpoint_path='model_cat_dog_7.tflearn', max_checkpoints=3, tensorboard_verbose=3, tensorboard_dir='tmp/tflearn_logs/') model.load('reference code/model_cat_dog_6_final.tflearn') while True: #print (image) image = cv2.resize(snack_image, (64, 64)) if (image.all() != 0): print("detecting snacks") test(model, image)
def main(_): print(FLAGS.buckets) print(FLAGS.checkpointDir) print(FLAGS.test_para) if tf.gfile.Exists(FLAGS.checkpointDir): tf.gfile.DeleteRecursively(FLAGS.checkpointDir) tf.gfile.MakeDirs(FLAGS.checkpointDir) dirname = os.path.join(FLAGS.buckets, "") (X, Y), (X_test, Y_test) = load_data(dirname) print("load data done") X, Y = shuffle(X, Y) Y = to_categorical(Y, 10) Y_test = to_categorical(Y_test, 10) # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=96, run_id='cifar10_cnn') model_path = os.path.join(FLAGS.checkpointDir, "model.tfl") print(model_path) model.save(model_path)
def augmentation(self): """ Create extra synthetic training data by flipping, rotating and blurring the images on our data set. """ img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) return img_aug
def main(data_dir, hdf5, name): batch_size = 256 num_epochs = 10 learning_rate = 0.001 X, Y, X_test, Y_test = get_data(data_dir, hdf5) X, Y = shuffle(X, Y) img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) # Step 1: Convolution network = conv_2d(network, 32, 3, activation='relu') # Step 2: Max pooling network = max_pool_2d(network, 2) # Step 3: Convolution network = conv_2d(network, 64, 3, activation='relu') # Step 4: Convolution network = conv_2d(network, 64, 3, activation='relu') # Step 5: Max pooling network = max_pool_2d(network, 2) # Step 6: Fully-connected 512 node neural network network = fully_connected(network, 512, activation='relu') # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting network = dropout(network, 0.5) # Step 8: Fully-connected neural network with two outputs (0=isn't a bird, 1=is a bird) to make the final prediction network = fully_connected(network, 2, activation='softmax') # Tell tflearn how we want to train the network network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Wrap the network in a model object model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='bird-classifier.tfl.ckpt') # Train it! We'll do 100 training passes and monitor it as it goes. model.fit(X, Y, n_epoch=100, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=96, snapshot_epoch=True, run_id='bird-classifier') # Save model when training is complete to a file model.save("bird-classifier.tfl") print("Network trained and saved as bird-classifier.tfl!")
def net(X, Y, save_model=False): tflearn.config.init_graph(gpu_memory_fraction=1) # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # use Transfer learning for better performance # Building convolutional network network = input_data(shape=[None, 48, 48, 3], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 2, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 2, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 328, activation='relu') network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') network = fully_connected(network, 128, activation='relu') # network = dropout(network, 0.8) network = fully_connected(network, len(set(Y)) + 1, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='softmax_categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=3) # print(Y) print(np.eye(len(set(Y)) + 1)[Y]) model.fit({'input': X}, {'target': np.array(np.eye(len(set(Y)) + 1)[Y])}, n_epoch=15, batch_size=50, validation_set=0.3, snapshot_step=15000, show_metric=True, run_id='face_recogn') if os.path.exists('fr.tflearn.index'): print('Loading pre-trained model') model.load('fr.tflearn') if save_model: model.save('fr.tflearn') return model
def train_rec(): X, Y, X_test, Y_test = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) X_test = X_test.reshape([-1, 28, 28, 1]) #X, Y = shuffle(X, Y) #Y = to_categorical(Y,10) #Y_test = to_categorical(Y_test,10) # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() #img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building inputs = input_data( shape=[None, 28, 28, 1], data_preprocessing=img_prep, data_augmentation=img_aug, name="inputs") network = conv_2d(inputs, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.6) network = fully_connected(network, 10, activation='relu') network = regression( network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=3) global rec_input, rec_network rec_input, rec_network = inputs, network model.fit( X, Y, n_epoch=20, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=128, run_id='mnist') return model
def build_network(output_dims=None): # outputdims is a list of num_classes # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) networks = [] for i, output_dim in enumerate(output_dims): for j in xrange(i): network = conv_2d(network, 64, 3, activation='relu', name="unique_Conv2D_{}".format(3 + 2 * j)) network = conv_2d(network, 64, 3, activation='relu', name="unique_Conv2D_{}".format(3 + 2 * j + 1)) network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu', name="unique_FullyConnected") network = dropout(network, 0.5) cur_network = fully_connected(network, output_dim, activation='softmax', name="unique_FullyConnected_1") cur_network = regression(cur_network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) networks.append(cur_network) if len(networks) == 1: return networks[0] return networks
def load_model( model_path='/mnt/ARRAY/classifier/model/particle-classifier.tfl'): ''' Load the trained tensorflow model Args: model_path (str) : path to particle-classifier e.g. '/mnt/ARRAY/classifier/model/particle-classifier.tfl' Returns: model (tf model object) : loaded tfl model from load_model() ''' path, filename = os.path.split(model_path) header = pd.read_csv(os.path.join(path, 'header.tfl.txt')) OUTPUTS = len(header.columns) class_labels = header.columns tf.reset_default_graph() # Same network definition as in tfl_tools scripts img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.75) network = fully_connected(network, OUTPUTS, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=model_path) model.load(model_path) return model, class_labels
def generate_network(self): """ Return tflearn cnn network. """ print(self.image_size, self.n_epoch, self.batch_size, self.person_ids) print(type(self.image_size), type(self.n_epoch), type(self.batch_size), type(self.person_ids)) if not isinstance(self.image_size, list) \ or not isinstance(self.n_epoch, int) \ or not isinstance(self.batch_size, int) \ or not isinstance(self.person_ids, list): # if self.image_size is None or self.n_epoch is None or \ # self.batch_size is None or self.person_ids is None: raise ValueError("Insufficient values to generate network.\n" "Need (n_epoch, int), (batch_size, int)," "(image_size, list), (person_ids, list).") # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_flip_leftright() # Convolutional network building network = input_data( shape=[None, self.image_size[0], self.image_size[1], 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, self.image_size[0] * 2**4, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, self.person_num, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def load_model(): global model img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping & rotating images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # setting up neural network network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) # 1: Convolution layer with 32 filters, each 3x3x3 network = conv_2d(network, 32, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 64, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 128, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 64, 5, activation='relu') network = max_pool_2d(network, 5) network = conv_2d(network, 32, 5, activation='relu') network = max_pool_2d(network, 5) network = fully_connected(network, 1024, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 3, activation='softmax') network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy') #TODO: change checkpoint path model = tflearn.DNN( network, checkpoint_path='model_chips_drinks_chocs_canteen_cp.tflearn', max_checkpoints=3, tensorboard_verbose=3, tensorboard_dir='tmp/tflearn_logs/') model.load('training/model_chips_drinks_chocs_canteen.tflearn')
def train_model(images,labels,input_size,kernel_size,cwd_data,cwd_checkpoint,run_name,num_epoch=40,num_labels=6): '''Trains a CNN network and saves the trained model in cwd_checkpoint path :param np.float32 images: RGB images with shape (training_size, input_size, input_size, 3) :param np.float32 labels: labels with shape (training_size, num_labels) :param int input_size: width and height of images :param int kernel_size: kernel size of network :param str cwd_data: path to data folder :param str cwd_data: path to checkpoint folder :param int num_epoch: number of epochs model should train for :param int num_labels: number of classes the network trains for ''' # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=360.) img_aug.add_random_blur(sigma_max=3.) img_aug.add_random_flip_updown() # Convolutional network building network = input_data(shape=[None, input_size, input_size, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, input_size/2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, input_size*2*2, kernel_size, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, num_labels, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=0,tensorboard_dir=cwd_data,checkpoint_path=cwd_checkpoint,max_checkpoints=2) #model.load(cwd_data+'oct-cvn-48bal-6c-114300') model.fit(images, labels, n_epoch=num_epoch, validation_set=0.1, show_metric=True, run_id=run_name, snapshot_epoch=True)
def vgg_graph(X): tflearn.config.init_training_mode() tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_crop([64, 64], padding=4) network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 128, 3, activation='relu') network = conv_2d(network, 128, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = conv_2d(network, 512, 3, activation='relu') network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=0.0001) vgg_model = tflearn.DNN(network) vgg_model.load('model\\vgg\\jun_vgg_cat_dog_final.tflearn') vgg_result = vgg_model.predict(X) return vgg_result
def get_data(): data_norm = True data_augmentation = True data1 = unpickle('../cifar-10-batches-py/data_batch_1') data2 = unpickle('../cifar-10-batches-py/data_batch_2') data3 = unpickle('../cifar-10-batches-py/data_batch_3') data4 = unpickle('../cifar-10-batches-py/data_batch_4') data5 = unpickle('../cifar-10-batches-py/data_batch_5') # print(list(data1.keys())) # X = np.concatenate((get_proper_images(data1['data']), # get_proper_images(data2['data']), # get_proper_images(data3['data']), # get_proper_images(data4['data']), # get_proper_images(data5['data']))) X = np.concatenate( (get_proper_images(data1[b'data']), get_proper_images(data2[b'data']), get_proper_images(data3[b'data']), get_proper_images(data4[b'data']), get_proper_images(data5[b'data']))) # Y = np.concatenate((onehot_labels(data1['labels']), # onehot_labels(data2['labels']), # onehot_labels(data3['labels']), # onehot_labels(data4['labels']), # onehot_labels(data5['labels']))) Y = np.concatenate( (onehot_labels(data1[b'labels']), onehot_labels(data2[b'labels']), onehot_labels(data3[b'labels']), onehot_labels(data4[b'labels']), onehot_labels(data5[b'labels']))) # X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')['data']) # Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')['labels']) X_test = get_proper_images( unpickle('../cifar-10-batches-py/test_batch')[b'data']) Y_test = onehot_labels( unpickle('../cifar-10-batches-py/test_batch')[b'labels']) img_prep = ImagePreprocessing() if data_norm: img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() if data_augmentation: img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=30.) img_aug.add_random_crop((32, 32), 6) return X, Y, X_test, Y_test, img_prep, img_aug
def tflearn_cifar(): """ 图像分类 :return: """ (X_train, Y_train), (X_test, Y_test) = cifar10.load_data() X_train, Y_train = shuffle(X_train, Y_train) Y_train = to_categorical(Y_train, nb_classes=10) Y_test = to_categorical(Y_test, nb_classes=10) # 对数据集进行零中心化(即对整个数据集计算平均值),同时进行 STD 标准化(即对整个数据集计算标准差) img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # 通过随机左右翻转和随机旋转来增强数据集 img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # 定义模型 network = input_data(shape=(None, 32, 32, 3), data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation="relu") network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation="relu") network = conv_2d(network, 64, 3, activation="relu") network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation="relu") network = dropout(network, 0.5) network = fully_connected(network, 10, activation="softmax") network = regression(network, optimizer="adam", loss="categorical_crossentropy", learning_rate=0.001) # 训练模型 model = DNN(network, tensorboard_verbose=0) model.fit(X_train, Y_train, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=96, run_id="cifar10_cnn")
def setup_model(checkpoint_path=None): """Sets up a deep belief network for image classification based on the set up described in :param checkpoint_path: string path describing prefix for model checkpoints :returns: Deep Neural Network :rtype: tflearn.DNN References: - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks Links: - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721 """ # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) if checkpoint_path: model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path) else: model = tflearn.DNN(network, tensorboard_verbose=3) return model
def build_network(output_dims=None): # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 16, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 128, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') network = fully_connected(network, 512, activation='relu') networks = [] for output_dim in output_dims: cur_network = fully_connected( network, output_dim, activation='softmax', name="unique_FullyConnected_output_dim_{}".format(output_dim)) cur_network = regression(cur_network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) networks.append(cur_network) if len(networks) == 1: return networks[0] return networks
def build_network(output_dims=None, get_hidden_reps=False): # outputdims is a list of num_classes # Real-time data preprocessing img_prep = ImagePreprocessing() # img_prep.add_featurewise_zero_center() # img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) network = input_data( shape=[None, 32, 32, 3], # data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') if get_hidden_reps: return network network = dropout(network, 0.5) networks = [] for output_dim in output_dims: cur_network = fully_connected( network, output_dim, activation='softmax', name="unique_FullyConnected_output_dim_{}".format(output_dim)) cur_network = regression(cur_network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.0000001) networks.append(cur_network) import tensorflow as tf tf.nn.sparse_softmax_cross_entropy_with_logits if len(networks) == 1: return networks[0] return networks
def init_model(self): img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug) conv_1 = conv_2d(network, 16, 3, 1, 'same', activation='relu', name='conv_1') conv_2 = conv_2d(conv_1, 16, 3, 1, 'same', activation='relu', name='conv_2') network = max_pool_2d(conv_2, 2, 2, 'same') conv_3 = conv_2d(network, 16, 3, 1, 'same', activation='relu', name='conv_3') conv_4 = conv_2d(conv_3, 1, 3, 1, 'same', activation='relu', name='conv_4') network = max_pool_2d(conv_4, 2, 2, 'same') network1 = fully_connected(network, 100, activation='relu') network2 = fully_connected(network1, 100, activation='relu') network3 = fully_connected(network2, 3, activation='softmax') self.model = tflearn.DNN(network3) self.model.load('model_fire_smoke_6_final.tflearn', weights_only=False)
def stop_dnn(): img_pre_processing = ImagePreprocessing() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=10.) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_pre_processing, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def generate_image_augumentation(self): # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) return img_aug
# Data loading and preprocessing from tflearn.datasets import cifar10 (X, Y), (X_test, Y_test) = cifar10.load_data() X, Y = shuffle(X, Y) Y = to_categorical(Y) Y_test = to_categorical(Y_test) # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy',
from tflearn.datasets import cifar10 (X,Y), (X_test, Y_test) = cifar10.load_data() X, Y = shuffle(X,Y) Y = to_categorical(Y, 10) Y_test = to_categorical(Y_test, 10) # Data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation() # Building the CNN network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug, name='first_layer') network = max_pool_2d(network, 2) # Max pooling layer network = conv_2d(network, 64, 3 , activation='relu') network = conv_2d(network, 64, 3 , activation='relu') # Multiple convolution layers network = max_pool_2d(network, 2) # Max pooling layer network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 10, activation='softmax') # Layer responsible for prediction network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Training using classifier model = tflearn.DNN(network, tensorboard_verbose=2)