Example #1
0
def res_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)

    n = 5
    net = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                             loss='categorical_crossentropy')
    model = tflearn.DNN(net)
    # rnn typo -> res
    model.load('model\\res\\jun_rnn_cat_dog.tflearn')
    res_result = model.predict(X)
    return res_result
Example #2
0
def alex_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)
    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    network = conv_2d(network, 64, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0005)
    alex_model = tflearn.DNN(network)
    alex_model.load('model\\alex\\jun_ALEX_cat_dog_final.tflearn')
    alex_result = alex_model.predict(X)
    return alex_result
Example #3
0
def simple_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)
    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    conv = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    conv = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(conv, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0005)
    simple_model = tflearn.DNN(network)
    simple_model.load('model\\simple\\jun_simple_cat_dog_final.tflearn')
    simple_result = simple_model.predict(X)
    return simple_result
Example #4
0
def vgg_graph(X):
    tflearn.config.init_training_mode()
    tf.reset_default_graph()

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)

    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')

    network = regression(network, optimizer='rmsprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    vgg_model = tflearn.DNN(network)
    vgg_model.load('model\\vgg\\jun_vgg_cat_dog_final.tflearn')
    vgg_result = vgg_model.predict(X)
    return vgg_result
def get_data():
    data_norm = True
    data_augmentation = True

    data1 = unpickle('../cifar-10-batches-py/data_batch_1')
    data2 = unpickle('../cifar-10-batches-py/data_batch_2')
    data3 = unpickle('../cifar-10-batches-py/data_batch_3')
    data4 = unpickle('../cifar-10-batches-py/data_batch_4')
    data5 = unpickle('../cifar-10-batches-py/data_batch_5')
    # print(list(data1.keys()))
    # X = np.concatenate((get_proper_images(data1['data']),
    #                     get_proper_images(data2['data']),
    #                     get_proper_images(data3['data']),
    #                     get_proper_images(data4['data']),
    #                     get_proper_images(data5['data'])))
    X = np.concatenate(
        (get_proper_images(data1[b'data']), get_proper_images(data2[b'data']),
         get_proper_images(data3[b'data']), get_proper_images(data4[b'data']),
         get_proper_images(data5[b'data'])))
    # Y = np.concatenate((onehot_labels(data1['labels']),
    #                     onehot_labels(data2['labels']),
    #                     onehot_labels(data3['labels']),
    #                     onehot_labels(data4['labels']),
    #                     onehot_labels(data5['labels'])))
    Y = np.concatenate(
        (onehot_labels(data1[b'labels']), onehot_labels(data2[b'labels']),
         onehot_labels(data3[b'labels']), onehot_labels(data4[b'labels']),
         onehot_labels(data5[b'labels'])))

    # X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')['data'])
    # Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')['labels'])

    X_test = get_proper_images(
        unpickle('../cifar-10-batches-py/test_batch')[b'data'])
    Y_test = onehot_labels(
        unpickle('../cifar-10-batches-py/test_batch')[b'labels'])

    img_prep = ImagePreprocessing()
    if data_norm:
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    if data_augmentation:
        img_aug.add_random_flip_leftright()
        img_aug.add_random_rotation(max_angle=30.)
        img_aug.add_random_crop((32, 32), 6)

    return X, Y, X_test, Y_test, img_prep, img_aug
Example #6
0
def build_network():

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center(per_channel=True)

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([IMG_WIDTH, IMG_HEIGHT], padding=4)

    #Ashis: transition layer didn't use here
    #Hence, each densenet_block used same nb_layers and growth (k)
    #transition layer needs to balance two adjacent densenet_block

    #by default, dropout is set as false. Downsample is used as True

    # Building Residual Network
    net = input_data(shape=[None, IMG_WIDTH, IMG_HEIGHT, CHANNELS],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
    net = conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = densenet_block(net, nb_layers, k)
    #no transition layer
    net = densenet_block(net, nb_layers, k)
    #no transition layer

    #net = densenet_block(net, nb_layers, k)   #Ignore one

    #no transition layer
    net = tflearn.global_avg_pool(net)

    # Regression
    net = fully_connected(net, CLASS_NUMBER, activation='softmax')
    #opt = tflearn.optimizers.Nesterov(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    #opt = tflearn.optimizers.AdaGrad (learning_rate=0.01, initial_accumulator_value=0.01)

    net = regression(net,
                     optimizer="adam",
                     loss='categorical_crossentropy',
                     learning_rate=0.001)
    # Training
    model = tflearn.DNN(net,
                        checkpoint_path='model_densenet',
                        max_checkpoints=10,
                        tensorboard_verbose=0,
                        clip_gradients=0.)
    return model
Example #7
0
    def Define():
        img_aug = ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_crop((48, 48), 6)
        img_aug.add_random_rotation(max_angle=25.)

        network = input_data(shape=[None, 48, 48, 1],
                             data_augmentation=img_aug)  #48 x 48 grayscale
        network = conv_2d(network, 64, 5, activation='relu')
        #network = local_response_normalization(network)
        network = max_pool_2d(network, 3, strides=2)
        network = conv_2d(network, 64, 5, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = conv_2d(network, 128, 4, activation='relu')
        network = dropout(network, 0.3)
        network = fully_connected(network, 3072, activation='tanh')
        network = fully_connected(network, 7, activation='softmax')

        return network
Example #8
0
class BatchGenerator():
    def __init__(self, data_dir, phase, config=None):
        if phase.upper() in ['TRAIN', 'VAL', 'TEST']:
            self.dataset_list = config['{}_DATASET'.format(phase.upper())]
        else:
            raise ValueError('select only from train')

        # its not list in classical setting ? 
        self.data_root = data_dir
        self.dataset_size = DATASET_SIZE
        self.phase = phase
        for i, dname in enumerate(self.dataset_list): 
            load_dir = os.path.join(data_dir, phase,
                    dname + '.npy')
            self.dataset = np.load(load_dir)

        self.n_classes = len(self.dataset)
        self.hw = 32 if self.dataset_list[0]=='cl_cifar10' else 84

        y = []
        for i in range(len(self.dataset)):
            y.append(np.zeros([len(self.dataset[i])])+i)
        self.x = np.reshape(self.dataset, [-1,self.hw,self.hw,3]) / 255.
        self.y = np.reshape(y, [-1])

        self.aug = ImageAugmentation()
        self.aug.add_random_flip_leftright()
        self.aug.add_random_crop([32,32], padding=4)
#        self.aug.add_random_rotation(max_angle=25.)

    def get_batch(self, batch_size, onehot=True, aug=False):
        rndidx = np.random.choice(len(self.y), size=batch_size, replace=False)
        x = self.x[rndidx]
        y = self.y[rndidx]
        if onehot:
            y1hot = np.zeros([batch_size, self.n_classes])
            y1hot[np.arange(batch_size), y.astype(int)] = 1
            y = y1hot
        if aug:
            x = self.aug.apply(x)
        return x, y
Example #9
0
def get_data():
    data_norm = True
    data_augmentation = False

    # f = h5py.File('naive_dataset_large.h5', 'r')
    f = h5py.File('naive_dataset_small.h5', 'r')
    g = f['training']
    X = np.zeros((0, 1024))
    Y = []
    for item in g:
        X = np.vstack((X, np.array(g[item]['spectra'])))
        Y += [onehot_labels(np.array(g[item]['label']))]

    Y = np.array(Y)

    g = f['testing']
    X_test = np.zeros((0, 1024))
    Y_test = []
    for item in g:
        X_test = np.vstack((X_test, np.array(g[item]['spectra'])))
        Y_test += [onehot_labels(np.array(g[item]['label']))]

    Y_test = np.array(Y_test)

    img_prep = ImagePreprocessing()
    if data_norm:
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    if data_augmentation:
        img_aug.add_random_flip_leftright()
        img_aug.add_random_rotation(max_angle=30.)
        img_aug.add_random_crop((32, 32), 6)

    return X, Y, X_test, Y_test, img_prep, img_aug
Example #10
0
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import dataset

img_dir = '/home/chrisjan/project/training/koi/images/train/'
dataset_file = '/home/chrisjan/project/training/koi/data/koi_dataset.pkl'

X_train, X_label, Y_test, Y_label = dataset.load_dataset(img_dir, dataset_file, shuffle_data=True, one_hot=True)

img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

img_aug = ImageAugmentation()
img_aug.add_random_crop((400, 300), 50)
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur(sigma_max=3.)

network = input_data(shape=[None, 400, 300, 3], data_preprocessing=img_prep, data_augmentation=img_aug)

conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')

# 3a
Example #11
0

trainY = to_categorical(trainY, 2)
testY = to_categorical(testY, 2)


# Image transformations

img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_crop([64, 64], padding=4)

network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
from tflearn.data_utils import image_preloader
from tflearn.data_augmentation import ImageAugmentation


img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_crop([224, 224], 10)
img_aug.add_random_blur()
img_aug.add_random_rotation(max_angle=25.)

X,Y = image_preloader('files_list', image_shape = (224,224),mode='file',categorical_labels=True,normalize=True,files_extension=['.jpg', '.jpeg','.png'])


network = input_data(shape=[None, 224, 224, 3],data_augmentation=img_aug)
conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
Example #13
0
    x = x.astype("float")
    x_test = x_test.astype("float")
    #data pre-processing
    for i in xrange(len(x)):
        for j in xrange(32):
            for k in xrange(32):
                x[i][j][k] = (x[i][j][k] - mean) / std
    for i in xrange(len(x_test)):
        for j in xrange(32):
            for k in xrange(32):
                x_test[i][j][k] = (x_test[i][j][k] - mean) / std
    print('data pre-processing finished.')
#data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop((32, 32), 4)
#input
network = input_data(shape=[None, 32, 32, 3], data_augmentation=img_aug)
#conv1
network = conv_2d(network,
                  192,
                  5,
                  activation='relu',
                  weights_init=tflearn.initializations.normal(stddev=0.01),
                  weight_decay=0.0001)
network = conv_2d(network,
                  160,
                  1,
                  activation='relu',
                  weights_init=tflearn.initializations.normal(stddev=0.05),
                  weight_decay=0.0001)
Example #14
0
    data[i, :, :] = im[:, :]

X = data.reshape([-1, inputSize, inputSize, 1])

# Real-time data preprocessing
img_prep = ImagePreprocessing()
img_prep.add_samplewise_zero_center()
img_prep.add_samplewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
#img_aug.add_random_rotation(max_angle=3.)
img_aug.add_random_90degrees_rotation(rotations=[0, 1, 2, 3])
img_aug.add_random_crop((inputSize, inputSize), 5)
img_aug.add_random_blur(sigma_max=1.0)

# Convolutional network building
network = input_data(shape=[None, inputSize, inputSize, 1],
                     name='input',
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 4, activation='softmax')
Example #15
0
from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
from tflearn.data_utils import image_preloader
from tflearn.data_augmentation import ImageAugmentation

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_crop([224, 224], 10)
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur()

X, Y = image_preloader('files_list',
                       image_shape=(224, 224),
                       mode='file',
                       categorical_labels=True,
                       normalize=True,
                       files_extension=['.jpg', '.png'])

network = input_data(shape=[None, 224, 224, 3], data_augmentation=img_aug)
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
Example #16
0
(X, Y), (X_test, Y_test) = load_data.load_data()
X, Y = shuffle(X, Y)
Y = to_categorical(Y, num_classes)
Y_test = to_categorical(Y_test, num_classes)


# Real-time data preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=180.)
img_aug.add_random_crop(image_size, padding=6)

def build_network(image_size, batch_size=None, n_channels=3):
    network = input_data(shape=[batch_size, image_size[0], image_size[1], n_channels],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
    network = conv_2d(network, 16, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, num_classes, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    return network
Example #17
0
def google_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)
    network = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
    pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
    pool1_3_3 = local_response_normalization(pool1_3_3)
    conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
    conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
    conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
    inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu', name='inception_3a_3_3')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5')
    inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu',
                                    name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1],
                                mode='concat', axis=3)

    inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1')
    inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu',
                                      name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu',
                                      name='inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5')
    inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu',
                                    name='inception_3b_pool_1_1')

    # merge the inception_3b_*
    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1],
                                mode='concat', axis=3, name='inception_3b_output')

    pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
    inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
    inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu',
                                    name='inception_4a_pool_1_1')

    inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1],
                                mode='concat', axis=3, name='inception_4a_output')

    inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu',
                                      name='inception_4b_3_3_reduce')
    inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu',
                                      name='inception_4b_5_5_reduce')
    inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')

    inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
    inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu',
                                    name='inception_4b_pool_1_1')

    inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1],
                                mode='concat', axis=3, name='inception_4b_output')

    inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1')
    inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',
                                      name='inception_4c_3_3_reduce')
    inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
    inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu',
                                      name='inception_4c_5_5_reduce')
    inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')

    inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
    inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu',
                                    name='inception_4c_pool_1_1')

    inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1],
                                mode='concat', axis=3, name='inception_4c_output')

    inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu',
                                      name='inception_4d_3_3_reduce')
    inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu',
                                      name='inception_4d_5_5_reduce')
    inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
    inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
    inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu',
                                    name='inception_4d_pool_1_1')

    inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1],
                                mode='concat', axis=3, name='inception_4d_output')

    inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu',
                                      name='inception_4e_3_3_reduce')
    inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu',
                                      name='inception_4e_5_5_reduce')
    inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
    inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
    inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu',
                                    name='inception_4e_pool_1_1')

    inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5, inception_4e_pool_1_1], axis=3,
                                mode='concat')

    pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')

    inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
    inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
    inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1, activation='relu',
                                    name='inception_5a_pool_1_1')

    inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,
                                mode='concat')

    inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1')
    inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu',
                                      name='inception_5b_3_3_reduce')
    inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3, activation='relu', name='inception_5b_3_3')
    inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu',
                                      name='inception_5b_5_5_reduce')
    inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5')
    inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
    inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu',
                                    name='inception_5b_pool_1_1')
    inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3,
                                mode='concat')
    pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)
    network = fully_connected(pool5_7_7, 2, activation='softmax')

    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0005)
    google_model = tflearn.DNN(network)
    google_model.load('model\\google\\jun_glnet_cat_dog_final.tflearn')
    google_result = google_model.predict(X)
    return google_result
Example #18
0
from imagenet_classes import class_names
import csv

tflearn.config.init_graph (num_cores=4, gpu_memory_fraction=0.5)

dataset_file = '../../dataset/val.txt'
r = csv.reader(open(dataset_file,'r'), delimiter=' ')
X, Y = image_preloader(dataset_file, image_shape=(256, 256),   mode='file', categorical_labels=True,   normalize=True)


img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

img_aug = ImageAugmentation()
img_aug.add_random_crop((224,224))
img_aug.add_random_flip_leftright()

inp = input_data(shape=[None, 224, 224, 3], data_preprocessing=img_prep, data_augmentation=img_aug, name='input')

conv1_1 = conv_2d(inp, 64, 3, activation='relu', name="conv1_1")
conv1_2 = conv_2d(conv1_1, 64, 3, activation='relu', name="conv1_2")
pool1 = max_pool_2d(conv1_2, 2, strides=2)

conv2_1 = conv_2d(pool1, 128, 3, activation='relu', name="conv2_1")
conv2_2 = conv_2d(conv2_1, 128, 3, activation='relu', name= "conv2_2")
pool2 = max_pool_2d(conv2_2, 2, strides=2)

conv3_1 = conv_2d(pool2, 256, 3, activation='relu', name="conv3_1")
conv3_2 = conv_2d(conv3_1, 256, 3, activation='relu', name="conv3_2")
conv3_3 = conv_2d(conv3_2, 256, 3, activation='relu', name="conv3_3")