Example #1
0
def network(img_shape, name, LR):

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    #
    # # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_blur (sigma_max=3.0)
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    img_aug.add_random_90degrees_rotation(rotations=[0, 2])    

    # Building 'AlexNet'
    network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug  )
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=LR, name='targets')
    return network
Example #2
0
def res_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)

    n = 5
    net = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                             loss='categorical_crossentropy')
    model = tflearn.DNN(net)
    # rnn typo -> res
    model.load('model\\res\\jun_rnn_cat_dog.tflearn')
    res_result = model.predict(X)
    return res_result
Example #3
0
def simple_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)
    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    conv = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0005)
    simple_model = tflearn.DNN(network)
    simple_model.load('model\\simple\\jun_simple_cat_dog_final.tflearn')
    simple_result = simple_model.predict(X)
    return simple_result
Example #4
0
def build_model():
    logging.info('building model')
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    encoder = input_data(shape=(None, IMAGE_INPUT_SIZE[0], IMAGE_INPUT_SIZE[1],
                                3), data_preprocessing=img_prep)
    encoder = conv_2d(encoder, 16, 7, activation='relu')
    encoder = dropout(encoder, 0.25)  # you can have noisy input instead
    encoder = max_pool_2d(encoder, 2)
    encoder = conv_2d(encoder, 16, 7, activation='relu')
    encoder = max_pool_2d(encoder, 2)
    encoder = conv_2d(encoder, 8, 7, activation='relu')
    encoder = max_pool_2d(encoder, 2)
    
    decoder = conv_2d(encoder, 8, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 16, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 16, 7, activation='relu')
    decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, 3, 7)

    encoded_str = re.search(r', (.*)\)', str(encoder.get_shape)).group(1)
    encoded_size = np.prod([int(o) for o in encoded_str.split(', ')])
    
    original_img_size = np.prod(IMAGE_INPUT_SIZE) * 3
    
    percentage = round(encoded_size / original_img_size, 2) * 100
    logging.debug('the encoded representation is {}% of the original \
image'.format(percentage))
    
    return regression(decoder, optimizer='adadelta',
                      loss='binary_crossentropy', learning_rate=0.005)
Example #5
0
def create_model(nb_classes):
    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    network = input_data(shape=[None, 60, 60, 1],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    network = conv_2d(network, 30, 3, strides=2, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 30, 3, strides=2, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = fully_connected(network, 128, activation='relu')
    network = fully_connected(network, nb_classes, activation='softmax')
    model = regression(network,
                       optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.001)

    return model
Example #6
0
def data_preprocessing():
    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    return img_prep
Example #7
0
    def createNetwork(self, input_size):

        # Real-time data preprocessing
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_rotation(max_angle=25.)

        # Convolutional network building
        # network = input_data(shape=[None, input_size, input_size, 3],
        # data_preprocessing=img_prep,
        # data_augmentation=img_aug)

        network = input_data(shape=[None, input_size, input_size, 3])

        network = conv_2d(network, input_size, 3, activation='relu')
        network = max_pool_2d(network, 2)
        network = conv_2d(network, input_size * 2, 3, activation='relu')
        network = conv_2d(network, input_size * 2, 3, activation='relu')
        # network = max_pool_2d(network, 2)
        network = fully_connected(network, 512, activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, 4, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        return network
Example #8
0
def create_network(optimiser):
    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_samplewise_stdnorm()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation to add variance to the data
    img_aug = ImageAugmentation()
    img_aug.add_random_blur(sigma_max=3)
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    # Convolutional network building
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network,
                         optimizer=optimiser,
                         loss='categorical_crossentropy',
                         learning_rate=0.002)

    return network
Example #9
0
def network():

    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.8)

    # Normalization of the data
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create random new data (more you have, better is)
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    #Input network must match inputs of the data set
    network = input_data(shape=[None, 100, 100, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    """
		Creation of the different hidden layers

		================
		Editing section
		================
	"""
    network = conv_2d(network, 64, 3, strides=2, activation='relu')
    network = max_pool_2d(network, 2)

    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)

    network = conv_2d(network, 64, 2, activation='relu')
    network = conv_2d(network, 64, 2, activation='relu')
    network = max_pool_2d(network, 2)

    #Fully connected layer then we drop a part of the data in order to not overfit
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.7)
    """
		======================
		End of Editing section
		======================
	"""

    network = fully_connected(network, 120, activation='softmax')

    # Training hyper-parameters
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    #Creation of the deep neural network with the back up name
    #tensorboard_verbose=0 is the most optimal for the calculation time
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path='dog_classifier.tfl.ckpt')

    return model
def build_model():
    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Convolutional network building
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    return tflearn.DNN(network, tensorboard_verbose=0)
Example #11
0
def network(img_shape, name, LR):
    # # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    #
    # # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_blur(sigma_max=3.0)
    img_aug.add_random_90degrees_rotation(rotations=[0, 2])

    network = input_data(shape=img_shape,
                         name=name,
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    # def rete(img_shape, name, LR):
    #     network = input_data(shape=img_shape, name=name)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=LR,
                         name='targets')
    return network
Example #12
0
def net_nodule2d_swethasubramanian(image_dims):

    #image augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)
    
    #image pre-processing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    
    net = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2], image_dims[3]], dtype=tf.float32, data_preprocessing=img_prep, data_augmentation=img_aug)
    
    net = layers.conv.conv_2d(net, 50, 3, activation='relu')
    net = layers.conv.max_pool_2d(net, 2)
    net = layers.conv.conv_2d(net, 64, 3, activation='relu')
    net = layers.conv.conv_2d(net, 64, 3, activation='relu')
    net = layers.conv.max_pool_2d(net, 2)
    net = layers.core.fully_connected(net, 512, activation='relu')
    net = layers.core.dropout(net, 0.5)
    net = layers.core.fully_connected(net, 2, activation='softmax')

    net = layers.estimator.regression(net, optimizer='adam',
                                      loss='categorical_crossentropy',
                                      learning_rate=0.001)
    return net
def create_own_model():

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    convnet = input_data(shape=[None, img_width, img_height, 1],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    convnet = conv_2d(convnet, 28, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 28, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = fully_connected(convnet, 512, activation='relu')
    oonvnet = dropout(convnet, 0.2)

    convnet = fully_connected(convnet, nb_classes, activation='softmax')

    model = regression(convnet,
                       optimizer='adam',
                       learning_rate=0.001,
                       loss='categorical_crossentropy',
                       name='targets')

    return model
Example #14
0
def transfer(prjname, basenet, numclasses, dims):
    # load data
    tf.reset_default_graph()
    X1, Y1 = image_preloader(".\\data\\train\\classes",
                             image_shape=tuple(dims)[0:2],
                             mode="folder",
                             categorical_labels=True,
                             normalize=True,
                             files_extension=[".jpg", ".png"],
                             filter_channel=True)

    # retrain basenet on new dataset
    img_prep = ImagePreprocessing()
    # 2FIX: normalization should be done per class and per channel
    img_prep.add_featurewise_zero_center()
    #img_prep.add_featurewise_stdnorm()

    x1 = tflearn.input_data(shape=np.insert(np.asarray(dims, dtype=object), 0,
                                            None).tolist(),
                            data_preprocessing=img_prep,
                            name="input")
    mt1 = templates.fetch("vgg16")

    numepochs = 2
    validpct = 0.1
    batchsize = 3
    model1 = nets.train(mt1, x1, X1, Y1, numclasses, numepochs, validpct,
                        batchsize, "vgg16" + prjname)

    model1.save(".\\logs\\ckp\\" + "vgg16goteborg\\" + "vgg16" + prjname +
                ".ckpt")
def CNN_Model_Creation():
    # Make sure the data is normalized
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    
    # Create extra synthetic training data by flipping, rotating and blurring the
    # images on our data set.
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)
    
    # Define our network architecture:
    
    # Input is a 512x512 image with 3 color channels (red, green and blue)
    network = input_data(shape=[None, 512, 512,3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
                         
    # Step 1: Convolution
    network = conv_2d(network, 32, 5, activation='relu')
    
    network = conv_2d(network, 32, 3, activation='relu')
    
    # Step 2: Max pooling
    network = max_pool_2d(network, 2)
    
    # Step 3: Convolution again
    network = conv_2d(network, 64, 3, activation='relu')
    
    # Step 4: Convolution yet again
    network = conv_2d(network, 64, 3, activation='relu')
    
    # Step 5: Max pooling again
    network = max_pool_2d(network, 2)
    
    network = conv_2d(network, 96, 2, activation='relu')
    
    network = max_pool_2d(network, 2)
    
    # Step 6: Fully-connected 512 node neural network
    network = fully_connected(network, 512, activation='relu')
    
    # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting
    network = dropout(network, 0.5)
    
    # Step 8: Fully-connected neural network with two outputs (0=isn't a cancer, 1=is a cancer) to make the final prediction
    network = fully_connected(network, 2, activation='softmax')
    
    #momentum = tflearn.optimizers.Momentum(learning_rate=0.05, momentum=0.7, lr_decay=0.5)    
    
    # Tell tflearn how we want to train the network
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy')
    
    # Wrap the network in a model object
    model = tflearn.DNN(network, tensorboard_verbose=0)

    return model
Example #16
0
def alex_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)
    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    network = conv_2d(network, 64, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0005)
    alex_model = tflearn.DNN(network)
    alex_model.load('model\\alex\\jun_ALEX_cat_dog_final.tflearn')
    alex_result = alex_model.predict(X)
    return alex_result
Example #17
0
    def preprocessing(self):
        """
		Make sure the data is normalized
		"""
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()
        return img_prep
Example #18
0
def snack_detection_worker():
    global snack_image

    # image preprocessors for neural network input
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create extra synthetic training data by flipping & rotating images
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # setting up neural network
    network = input_data(shape=[None, 64, 64, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    # 1: Convolution layer with 32 filters, each 3x3x3
    network = conv_2d(network, 32, 5, activation='relu')
    network = max_pool_2d(network, 5)

    network = conv_2d(network, 64, 5, activation='relu')
    network = max_pool_2d(network, 5)

    network = conv_2d(network, 128, 5, activation='relu')
    network = max_pool_2d(network, 5)

    network = conv_2d(network, 64, 5, activation='relu')
    network = max_pool_2d(network, 5)

    network = conv_2d(network, 32, 5, activation='relu')
    network = max_pool_2d(network, 5)

    network = fully_connected(network, 1024, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=1e-3,
                         loss='categorical_crossentropy')

    #TODO: change checkpoint path
    model = tflearn.DNN(network,
                        checkpoint_path='model_cat_dog_7.tflearn',
                        max_checkpoints=3,
                        tensorboard_verbose=3,
                        tensorboard_dir='tmp/tflearn_logs/')

    model.load('reference code/model_cat_dog_6_final.tflearn')

    while True:
        #print (image)
        image = cv2.resize(snack_image, (64, 64))
        if (image.all() != 0):
            print("detecting snacks")
            test(model, image)
Example #19
0
def main(_):
    print(FLAGS.buckets)
    print(FLAGS.checkpointDir)
    print(FLAGS.test_para)

    if tf.gfile.Exists(FLAGS.checkpointDir):
        tf.gfile.DeleteRecursively(FLAGS.checkpointDir)
    tf.gfile.MakeDirs(FLAGS.checkpointDir)

    dirname = os.path.join(FLAGS.buckets, "")
    (X, Y), (X_test, Y_test) = load_data(dirname)
    print("load data done")

    X, Y = shuffle(X, Y)
    Y = to_categorical(Y, 10)
    Y_test = to_categorical(Y_test, 10)

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Convolutional network building
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Train using classifier
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(X,
              Y,
              n_epoch=50,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=96,
              run_id='cifar10_cnn')
    model_path = os.path.join(FLAGS.checkpointDir, "model.tfl")
    print(model_path)
    model.save(model_path)
Example #20
0
  def build_network(self):
    print('---------------------Building CNN---------------------')
    img_preProcess=ImagePreprocessing()
    img_preProcess.add_featurewise_zero_center()
    img_preProcess.add_featurewise_stdnorm()

    # Mean: 189.80002318
    # STD: 85.4885473338
    #img_aug=ImageAugmentation()
    #img_aug.add_random_flip_leftright()
    #img_aug.add_random_rotation(max_angle=25)
    #img_aug.add_random_blur(sigma_max=3)

    self.network = input_data(shape = [None, constants.IMAGE_HEIGHT, constants.IMAGE_WIDTH, 3])

    self.network = conv_2d(self.network, 64, 3, activation='relu')
    self.network = conv_2d(self.network, 64, 3, activation='relu')
    self.network = max_pool_2d(self.network, 2, strides=2)

    self.network = conv_2d(self.network, 128, 3, activation='relu')
    self.network = conv_2d(self.network, 128, 3, activation='relu')
    self.network = max_pool_2d(self.network, 2, strides=2)

    self.network = conv_2d(self.network, 256, 3, activation='relu')
    self.network = conv_2d(self.network, 256, 3, activation='relu')
    self.network = conv_2d(self.network, 256, 3, activation='relu')
    self.network = max_pool_2d(self.network, 2, strides=2)

    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = max_pool_2d(self.network, 2, strides=2)

    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = conv_2d(self.network, 512, 3, activation='relu')
    self.network = max_pool_2d(self.network, 2, strides=2)

    self.network = fully_connected(self.network, 4096, activation='relu')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, 4096, activation='relu')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, 5, activation='softmax')

    self.network = regression(self.network, optimizer='adam',
                              loss='categorical_crossentropy',
                              learning_rate=0.0001)
    self.model = tflearn.DNN(
      self.network,
      tensorboard_dir=constants.DATA_PATH,
      checkpoint_path =constants.DATA_PATH + '/gun_checkpoint',
      max_checkpoints = 1,
      tensorboard_verbose = 2
    )
    self.load_model()
    print('-----------------------Model Loaded----------------------')
Example #21
0
def predict(prjname,basenet,predictclass,perclass,numclasses,dims,withtransfer):
    
    # load data
    tf.reset_default_graph()
    X1, Y1 = image_preloader(".\\data\\test\\classes",
                            image_shape=tuple(dims)[0:2],
                            mode="folder",
                            categorical_labels=True,
                            normalize=True,
                            files_extension=[".jpg",".png"],
                            filter_channel=True)
    
    # retrain vgg16 on goteborg
    img_prep = ImagePreprocessing()
    # 2FIX: normalization should be done per channel
    img_prep.add_featurewise_zero_center()
    #img_prep.add_featurewise_stdnorm()
    
    x1 = tflearn.input_data(shape=np.insert(np.asarray(dims, dtype=object),0,None).tolist(),
                            data_preprocessing=img_prep,
                            name="input")
    
    if withtransfer:
        checkpoint = "vgg16goteborg"
    else:
        checkpoint = "vgg16"
        
    mt1 = templates.fetch(checkpoint)
    #numclasses = 5
    model1, _, codelist = nets.getcodes(mt1, x1, numclasses)
    
    # prepare input for second stage
    # 2DO: should X2 be normalized? is ReLUing sufficient?
    pixclasses = numclasses + 1
    X2 = codes.formatcodes(codelist,model1,x1,X1)
    Y2 = utils.OHpixlabels(predictclass, perclass, pixclasses, dims[0], "test")
    
    # use second net on codes
    tf.reset_default_graph()

    x2 = tflearn.input_data(shape=np.insert(np.asarray(list(X2.shape)[1:], dtype=object),0,None).tolist(),
                            name='mlp_input')
    
    mt2 = templates.fetch("pixelnet2.2")
    model2, _, _ = nets.getcodes(mt2, x2, pixclasses)
    model2.load(".\\logs\\ckp\\" + "pixelnetgoteborg\\" + "pixelnet" + prjname + ".ckpt",weights_only=True)

    outv = model2.predict(X2)
    outv = np.asarray(outv)
    pixclass = np.argmax(outv[0,:,:,:], axis=2)
    
    preddir = ".\\data\\test\\preds"
    os.makedirs(preddir, exist_ok=True)
    np.save( preddir + "\\" "pred_0.npy",pixclass )
    print("bien!")
Example #22
0
def build_net():

    n = 5

    tflearn.config.init_training_mode()

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    # img_aug.add_random_crop([48, 48], padding=8)

    # Building Residual Network
    net = tflearn.input_data(shape=[None, 48, 48, 1],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net,
                          nb_filter=16,
                          filter_size=3,
                          regularizer='L2',
                          weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)

    # Regression
    net = tflearn.fully_connected(net, 7, activation='softmax')
    mom = tflearn.Momentum(learning_rate=0.1,
                           lr_decay=0.0001,
                           decay_step=32000,
                           staircase=True,
                           momentum=0.9)
    net = tflearn.regression(net,
                             optimizer=mom,
                             loss='categorical_crossentropy')
    print("make model")
    model = tflearn.DNN(
        net,
        checkpoint_path='upload/Resmodels/model_resnet_emotion',
        max_checkpoints=10,
        tensorboard_verbose=0,
        clip_gradients=0.)
    print("load model start")
    model.load('upload/Resmodels/model_resnet_emotion-10500')
    print("load model success")

    return model
Example #23
0
def ANN(WIDTH, HEIGHT, CHANNELS, LABELS):
    dropout_value = 0.35

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Building the network
    network = input_data(shape=[None, WIDTH, HEIGHT, CHANNELS],
                         data_preprocessing=img_prep,
                         name='input')

    # Branch 1
    branch1 = conv_2d(network,
                      10, [2, 2],
                      activation='relu',
                      name='B1Conv2d_2x2')
    #branch1 = dropout(branch1, dropout_value)

    # Branch 2
    branch2 = conv_2d(branch1,
                      10, [2, 2],
                      activation='relu',
                      name='B2Conv2d_2x2')
    #branch2 = dropout(branch2, dropout_value)

    # Fully connected 1
    full_1 = fully_connected(branch2, 100, activation='relu')
    full_1 = dropout(full_1, dropout_value)

    # Fully connected 2
    full_2 = fully_connected(full_1, 100, activation='relu')
    full_2 = dropout(full_2, dropout_value)

    # Output layer
    network = fully_connected(full_2, LABELS, activation='softmax')
    '''
	network = tflearn.regression(network, optimizer = 'momentum',
	                         loss  = 'categorical_crossentropy',
	                         learning_rate = 0.1)
	'''
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')

    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir='./logs',
                        best_checkpoint_path='./checkpoints/best/best_val',
                        max_checkpoints=1)

    return model
Example #24
0
def train_rec():
    X, Y, X_test, Y_test = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    X_test = X_test.reshape([-1, 28, 28, 1])
    #X, Y = shuffle(X, Y)
    #Y = to_categorical(Y,10)
    #Y_test = to_categorical(Y_test,10)

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    #img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Convolutional network building
    inputs = input_data(
        shape=[None, 28, 28, 1],
        data_preprocessing=img_prep,
        data_augmentation=img_aug,
        name="inputs")
    network = conv_2d(inputs, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)

    network = fully_connected(network, 128, activation='relu')
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.6)
    network = fully_connected(network, 10, activation='relu')
    network = regression(
        network,
        optimizer='adam',
        loss='categorical_crossentropy',
        learning_rate=0.001)

    # Train using classifier
    model = tflearn.DNN(network, tensorboard_verbose=3)
    global rec_input, rec_network
    rec_input, rec_network = inputs, network
    model.fit(
        X,
        Y,
        n_epoch=20,
        shuffle=True,
        validation_set=(X_test, Y_test),
        show_metric=True,
        batch_size=128,
        run_id='mnist')
    return model
Example #25
0
def net(X, Y, save_model=False):
    tflearn.config.init_graph(gpu_memory_fraction=1)

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # use Transfer learning for better performance
    # Building convolutional network
    network = input_data(shape=[None, 48, 48, 3], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 2, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 2, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 328, activation='relu')
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')
    network = fully_connected(network, 128, activation='relu')
    # network = dropout(network, 0.8)
    network = fully_connected(network, len(set(Y)) + 1, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='softmax_categorical_crossentropy',
                         name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=3)
    # print(Y)
    print(np.eye(len(set(Y)) + 1)[Y])
    model.fit({'input': X}, {'target': np.array(np.eye(len(set(Y)) + 1)[Y])},
              n_epoch=15,
              batch_size=50,
              validation_set=0.3,
              snapshot_step=15000,
              show_metric=True,
              run_id='face_recogn')
    if os.path.exists('fr.tflearn.index'):
        print('Loading pre-trained model')
        model.load('fr.tflearn')
    if save_model:
        model.save('fr.tflearn')
    return model
Example #26
0
def ANN(WIDTH, HEIGHT, CHANNELS, LABELS):

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Building the network
    network = input_data(shape=[None, WIDTH, HEIGHT, CHANNELS],
                         data_preprocessing=img_prep,
                         name='input')

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)

    # Output layer
    merged_layers = fully_connected(network, LABELS, activation='softmax')
    network = regression(merged_layers,
                         optimizer='adam',
                         learning_rate=0.0005,
                         loss='categorical_crossentropy',
                         name='target')

    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir='./logs',
                        best_checkpoint_path='./checkpoints/best/best_val',
                        max_checkpoints=1)
    return model
Example #27
0
    def setup_image_preprocessing(self):
        """ Setup image preprocessing """
        # normalization of images
        self.tf_img_prep = ImagePreprocessing()
        self.tf_img_prep.add_featurewise_zero_center()
        self.tf_img_prep.add_featurewise_stdnorm()

        # Randomly create extra image data by rotating and flipping images
        self.tf_img_aug = ImageAugmentation()
        self.tf_img_aug.add_random_flip_leftright()
        self.tf_img_aug.add_random_rotation(max_angle=30.)
def main(data_dir, hdf5, name):
    batch_size = 256
    num_epochs = 10
    learning_rate = 0.001
    X, Y, X_test, Y_test = get_data(data_dir, hdf5)
    X, Y = shuffle(X, Y)
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    # Step 1: Convolution
    network = conv_2d(network, 32, 3, activation='relu')
    # Step 2: Max pooling
    network = max_pool_2d(network, 2)
    # Step 3: Convolution
    network = conv_2d(network, 64, 3, activation='relu')
    # Step 4: Convolution
    network = conv_2d(network, 64, 3, activation='relu')
    # Step 5: Max pooling
    network = max_pool_2d(network, 2)
    # Step 6: Fully-connected 512 node neural network
    network = fully_connected(network, 512, activation='relu')
    # Step 7: Dropout - throw away some data randomly during training to prevent over-fitting
    network = dropout(network, 0.5)
    # Step 8: Fully-connected neural network with two outputs (0=isn't a bird, 1=is a bird) to make the final prediction
    network = fully_connected(network, 2, activation='softmax')
    # Tell tflearn how we want to train the network
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    # Wrap the network in a model object
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path='bird-classifier.tfl.ckpt')
    # Train it! We'll do 100 training passes and monitor it as it goes.
    model.fit(X,
              Y,
              n_epoch=100,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=96,
              snapshot_epoch=True,
              run_id='bird-classifier')
    # Save model when training is complete to a file
    model.save("bird-classifier.tfl")
    print("Network trained and saved as bird-classifier.tfl!")
def build_network(output_dims=None):
    # outputdims is a list of num_classes
    # Real-time data preprocessing

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)

    networks = []
    for i, output_dim in enumerate(output_dims):
        for j in xrange(i):
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              name="unique_Conv2D_{}".format(3 + 2 * j))
            network = conv_2d(network,
                              64,
                              3,
                              activation='relu',
                              name="unique_Conv2D_{}".format(3 + 2 * j + 1))
            network = max_pool_2d(network, 2)
        network = fully_connected(network,
                                  512,
                                  activation='relu',
                                  name="unique_FullyConnected")
        network = dropout(network, 0.5)
        cur_network = fully_connected(network,
                                      output_dim,
                                      activation='softmax',
                                      name="unique_FullyConnected_1")
        cur_network = regression(cur_network,
                                 optimizer='adam',
                                 loss='categorical_crossentropy',
                                 learning_rate=0.001)
        networks.append(cur_network)

    if len(networks) == 1:
        return networks[0]
    return networks
Example #30
0
def load_model(
        model_path='/mnt/ARRAY/classifier/model/particle-classifier.tfl'):
    '''
    Load the trained tensorflow model

    Args:
        model_path (str)        : path to particle-classifier e.g.
                                  '/mnt/ARRAY/classifier/model/particle-classifier.tfl'

    Returns:
        model (tf model object) : loaded tfl model from load_model()
    '''
    path, filename = os.path.split(model_path)
    header = pd.read_csv(os.path.join(path, 'header.tfl.txt'))
    OUTPUTS = len(header.columns)
    class_labels = header.columns

    tf.reset_default_graph()

    # Same network definition as in tfl_tools scripts
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.75)
    network = fully_connected(network, OUTPUTS, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path=model_path)
    model.load(model_path)

    return model, class_labels