Esempio n. 1
0
    def test_apply_activation(self):
        lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
        x = tf.constant(-0.25, tf.float32)

        with tf.Session() as sess:
            # Case 1: 'linear'
            self.assertEqual(
                sess.run(tflearn.activation(x, 'linear')),
                -0.25)

            # Case 2: 'relu'
            self.assertEqual(
                sess.run(tflearn.activation(x, 'relu')),
                0)

            # Case 3: 'leaky_relu'
            self.assertAlmostEqual(
                sess.run(tflearn.activation(x, 'leaky_relu')),
                -0.025, places=TestActivations.PLACES)

            # Case 4: 'tanh'
            self.assertAlmostEqual(
                sess.run(tflearn.activation(x, 'tanh')),
                -0.2449, places=TestActivations.PLACES)

            # Case 5: lrelu_02 (callable)
            self.assertAlmostEqual(
                sess.run(tflearn.activation(x, lrelu_02)),
                -0.05, places=TestActivations.PLACES)
Esempio n. 2
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
    def run(self):

        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        # Resnet model below:  Adapted from tflearn website
        self.n = 5 #32 layer resnet

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
        net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
        net = tflearn.regression(net, optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
                            max_checkpoints=10, tensorboard_verbose=0,
                            clip_gradients=0.)

        self.model.load('model.tfl')

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cap = cv2.VideoCapture(0)

        #Main Loop where we will be capturing live webcam feed, crop image and process the image for emotion recognition on trained model
        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                self.image_processing(roi_gray, img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Esempio n. 4
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 3
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(Y[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),snapshot_epoch=False,
              snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id= _id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
Esempio n. 5
0
File: ddpg.py Progetto: ataitler/DQN
    def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.fully_connected(inputs, 400, activation='relu')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 300)
        t2 = tflearn.fully_connected(action, 300)

        net = tflearn.activation(tf.matmul(net,t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')

        # linear layer connected to 1 output representing Q(s,a) 
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
Esempio n. 6
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
Esempio n. 7
0
 def build_residual_network(self, network, res_n=5):
     #     data_augmentation=self.generate_image_augumentation())
     network = tflearn.conv_2d(network, 16, 3, regularizer='L2',
                               weight_decay=0.0001)
     network = tflearn.residual_block(network, res_n, 16)
     network = tflearn.residual_block(network, 1, 32, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 32)
     network = tflearn.residual_block(network, 1, 64, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 64)
     network = tflearn.batch_normalization(network)
     network = tflearn.activation(network, 'relu')
     network = tflearn.global_avg_pool(network)
     # Regression
     network = tflearn.fully_connected(network, 2, activation='softmax')
     mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000,
                            staircase=True)
     network = tflearn.regression(network, optimizer=mom,
                                  loss='categorical_crossentropy')
     return network
Esempio n. 8
0
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
Esempio n. 9
0
    def create_critic_network(self):
        inputs = tf.placeholder(
            tf.float32, [None, self.s_dim[0], self.s_dim[1], self.s_dim[2]])
        action = tf.placeholder(tf.float32, [None, self.a_dim])

        net = tflearn.layers.conv.conv_2d(incoming=inputs,
                                          nb_filter=2,
                                          filter_size=[1, 3],
                                          strides=1,
                                          padding="valid",
                                          activation="relu",
                                          weights_init=self.initializer,
                                          weight_decay=0.0)

        width = net.get_shape()[2]

        net = tflearn.layers.conv.conv_2d(incoming=net,
                                          nb_filter=10,
                                          filter_size=[1, width],
                                          strides=1,
                                          padding="valid",
                                          activation="relu",
                                          weights_init=self.initializer,
                                          regularizer="L2",
                                          weight_decay=5e-09)

        net = tflearn.layers.conv.conv_2d(incoming=net,
                                          nb_filter=1,
                                          filter_size=1,
                                          padding="valid",
                                          weights_init=self.initializer,
                                          regularizer="L2",
                                          weight_decay=5e-08)

        net = tflearn.fully_connected(
            net,
            20,
            weights_init=self.initializer,
        )
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)

        # Agregar el tensor de acción
        # Se utilizan dos capas temporales para obtener los pasos y sesgos correspondientes
        t1 = tflearn.fully_connected(
            net,
            10,
            weights_init=self.initializer,
        )
        t2 = tflearn.fully_connected(
            action,
            10,
            weights_init=self.initializer,
        )

        net = tflearn.activation(tf.matmul(net, t1.W) +
                                 tf.matmul(action, t2.W) + t2.b,
                                 activation="relu")

        out = tflearn.fully_connected(
            net,
            1,
            weights_init=self.initializer,
        )

        return inputs, action, out
Esempio n. 10
0
def residual_block(incoming,
                   nb_blocks,
                   out_channels,
                   downsample=False,
                   downsample_strides=2,
                   activation='relu',
                   batch_norm=True,
                   bias=True,
                   weights_init='variance_scaling',
                   bias_init='zeros',
                   regularizer='L2',
                   weight_decay=0.0001,
                   trainable=True,
                   restore=True,
                   reuse=False,
                   scope=None,
                   name="ResidualBlock",
                   is_training=True):
    """ Residual Block.

	A residual block as described in MSRA's Deep Residual Network paper.
	Full pre-activation architecture is used here.

	Input:
		4-D Tensor [batch, height, width, in_channels].

	Output:
		4-D Tensor [batch, new height, new width, nb_filter].

	Arguments:
		incoming: `Tensor`. Incoming 4-D Layer.
		nb_blocks: `int`. Number of layer blocks.
		out_channels: `int`. The number of convolutional filters of the
			convolution layers.
		downsample: `bool`. If True, apply downsampling using
			'downsample_strides' for strides.
		downsample_strides: `int`. The strides to use when downsampling.
		activation: `str` (name) or `function` (returning a `Tensor`).
			Activation applied to this layer (see tflearn.activations).
			Default: 'linear'.
		batch_norm: `bool`. If True, apply batch normalization.
		bias: `bool`. If True, a bias is used.
		weights_init: `str` (name) or `Tensor`. Weights initialization.
			(see tflearn.initializations) Default: 'uniform_scaling'.
		bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
			(see tflearn.initializations) Default: 'zeros'.
		regularizer: `str` (name) or `Tensor`. Add a regularizer to this
			layer weights (see tflearn.regularizers). Default: None.
		weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
		trainable: `bool`. If True, weights will be trainable.
		restore: `bool`. If True, this layer weights will be restored when
			loading a model.
		reuse: `bool`. If True and 'scope' is provided, this layer variables
			will be reused (shared).
		scope: `str`. Define this layer scope (optional). A scope can be
			used to share variables between layers. Note that scope will
			override name.
		name: A name for this layer (optional). Default: 'ShallowBottleneck'.
		is_training: True for training mode and False for val or test mode.
	References:
		- Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
			Zhang, Shaoqing Ren, Jian Sun. 2015.
		- Identity Mappings in Deep Residual Networks. Kaiming He, Xiangyu
			Zhang, Shaoqing Ren, Jian Sun. 2015.

	Links:
		- [http://arxiv.org/pdf/1512.03385v1.pdf]
			(http://arxiv.org/pdf/1512.03385v1.pdf)
		- [Identity Mappings in Deep Residual Networks]
			(https://arxiv.org/pdf/1603.05027v2.pdf)

	"""
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.variable_scope(values=[incoming],
                           name_or_scope=scope,
                           default_name=name,
                           reuse=reuse) as scope:
        name = scope.name  # TODO
        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = batch_normalization(resnet,
                                             is_training,
                                             name=name + '_bn_' + str(i) +
                                             '_1')
            resnet = tflearn.activation(resnet, activation)

            resnet = tflearn.conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)

            if batch_norm:
                resnet = batch_normalization(resnet,
                                             is_training,
                                             name=name + '_bn_' + str(i) +
                                             '_2')
            resnet = tflearn.activation(resnet, activation)

            resnet = tflearn.conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_2d(identity, 1, downsample_strides)

            # Projection to new dimension
            '''
			if in_channels != out_channels:
				ch = (out_channels - in_channels)//2
				identity = tf.pad(identity,
								  [[0, 0], [0, 0], [0, 0], [ch, ch]])
				in_channels = out_channels
			'''
            resnet = resnet + identity

    return resnet
Esempio n. 11
0
img_prep.add_featurewise_stdnorm()

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_crop([64, 64], padding=4)


# Define network
net = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 2, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path='jun_rnn_cat_dog.tflearn',
                    max_checkpoints=1, tensorboard_verbose=3, tensorboard_dir='tmp/tflearn_logs/')
model.fit(trainX, trainY, n_epoch=100, validation_set=(testX, testY), shuffle=True,
          show_metric=True, batch_size=64, snapshot_step=200,
          snapshot_epoch=False, run_id='jun_rnn_cat_dog')
model.save('jun_rnn_cat_dog_final.tflearn')
# rnn typo -> res
Esempio n. 12
0
    def make_core_network(net, regularizer='L2'):
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_split, 1])
        print("reshaped input", net.get_shape())
        net = tflearn.conv_1d(net, 64, 16, 2)
        #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
        print("cov1", net.get_shape())
        net = tflearn.batch_normalization(net)
        print("bn1", net.get_shape())
        net = tflearn.activation(net, 'relu')
        print("relu1", net.get_shape())

        # Residual blocks
        '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn10", net.get_shape())'''

        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True,
                                          is_first_block=True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn10", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn12", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn14", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn16", net.get_shape())
        '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn18", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn20", net.get_shape())'''

        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        #net = tflearn.global_avg_pool(net)
        # LSTM
        print("before LSTM, before reshape", net.get_shape())
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_dim // n_split, 512])
        print("before LSTM", net.get_shape())
        net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
        print("after LSTM", net.get_shape())
        #net = tflearn.layers.recurrent.lstm(net, n_units=512)
        #print("after LSTM", net.get_shape())
        net = dropout(net, 0.5)

        # Regression
        feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
        net = tflearn.dropout(feature_layer, 0.5)
        net = tflearn.fully_connected(net, 4, activation='softmax')
        print("dense", net.get_shape())
        return net, feature_layer
Esempio n. 13
0
def residual_bottleneck(incoming,
                        nb_blocks,
                        bottleneck_size,
                        out_channels,
                        downsample=False,
                        downsample_strides=2,
                        activation='relu',
                        batch_norm=True,
                        bias=True,
                        weights_init='variance_scaling',
                        bias_init='zeros',
                        regularizer='L2',
                        weight_decay=0.0001,
                        trainable=True,
                        restore=True,
                        name="ResidualBottleneck"):
    """ Residual Bottleneck.

    A residual bottleneck block as described in MSRA's Deep Residual Network
    paper. Full pre-activation architecture is used here.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        out_channels: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    References:
        - Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.
        - Identity Mappings in Deep Residual Networks. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        - [http://arxiv.org/pdf/1512.03385v1.pdf]
            (http://arxiv.org/pdf/1512.03385v1.pdf)
        - [Identity Mappings in Deep Residual Networks]
            (https://arxiv.org/pdf/1603.05027v2.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 1, downsample_strides,
                             'valid', 'linear', bias, weights_init, bias_init,
                             regularizer, weight_decay, trainable, restore)

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 3, 1, 'same', 'linear',
                             bias, weights_init, bias_init, regularizer,
                             weight_decay, trainable, restore)

            resnet = conv_2d(resnet, out_channels, 1, 1, 'valid', activation,
                             bias, weights_init, bias_init, regularizer,
                             weight_decay, trainable, restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_2d(identity, 1, downsample_strides)

            # Projection to new dimension
            if in_channels != out_channels:
                ch = (out_channels - in_channels) // 2
                identity = tf.pad(identity, [[0, 0], [0, 0], [0, 0], [ch, ch]])
                in_channels = out_channels

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
Esempio n. 14
0
    def build_net(self, X, reuse=False):
        net = tflearn.layers.conv_3d(X,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv1')
        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='linear',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv2')
        net = tflearn.layers.normalization.batch_normalization(net,
                                                               reuse=reuse,
                                                               scope='batch1')
        net = tflearn.activation(net, 'relu')

        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv3')
        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv4')

        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv5')
        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv6')
        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv7')
        net = tflearn.layers.conv_3d(net,
                                     30,
                                     3,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='conv8')

        net = tflearn.layers.conv_3d(net,
                                     300,
                                     1,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='fc1')
        net = tflearn.layers.core.dropout(net, 0.5)
        net = tflearn.layers.conv_3d(net,
                                     300,
                                     1,
                                     activation='relu',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='fc2')
        net = tflearn.layers.core.dropout(net, 0.5)
        net = tflearn.layers.conv_3d(net,
                                     2,
                                     1,
                                     activation='linear',
                                     padding='valid',
                                     reuse=reuse,
                                     scope='output')
        return net
Esempio n. 15
0
    def build_net(self, X, PD, reuse=False):
        # Using TFLearn wrappers for network building
        init = tflearn.initializations.xavier()
        with tf.variable_scope("level1", reuse=reuse):
            net1_bmode = tflearn.layers.conv_3d(X,
                                                4,
                                                3,
                                                activation='prelu',
                                                padding='same',
                                                regularizer='L2',
                                                reuse=reuse,
                                                scope='conv1-1-bmode',
                                                weights_init=init)
            net1_pd = tflearn.layers.conv_3d(PD,
                                             4,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv1-1-pd',
                                             weights_init=init)
            net1 = tflearn.layers.merge_ops.merge([net1_bmode, net1_pd],
                                                  axis=4,
                                                  mode='concat')
            net1 = tflearn.activation(net1, 'prelu')

        with tf.variable_scope("level2", reuse=reuse):
            net2_in = tflearn.layers.conv_3d(net1,
                                             16,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds1',
                                             weights_init=init)
            net2 = tflearn.layers.conv_3d(net2_in,
                                          16,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv2-1',
                                          weights_init=init)
            net2 = tflearn.layers.conv_3d(net2,
                                          16,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv2-2',
                                          weights_init=init)
            net2 = tflearn.activation(net2, 'prelu')
            net2 = tf.add(net2_in, net2)

        with tf.variable_scope("level3"):
            net3_in = tflearn.layers.conv_3d(net2,
                                             32,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds2',
                                             weights_init=init)
            net3 = tflearn.layers.conv_3d(net3_in,
                                          32,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-1',
                                          weights_init=init)
            net3 = tflearn.layers.conv_3d(net3,
                                          32,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-2',
                                          weights_init=init)
            net3 = tflearn.layers.conv_3d(net3,
                                          32,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-3',
                                          weights_init=init)
            net3 = tflearn.activation(net3, 'prelu')
            net3 = tf.add(net3_in, net3)

        with tf.variable_scope("level4"):
            net4_in = tflearn.layers.conv_3d(net3,
                                             64,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds3',
                                             weights_init=init)
            net4 = tflearn.layers.conv_3d(net4_in,
                                          64,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-1',
                                          weights_init=init)
            net4 = tflearn.layers.conv_3d(net4,
                                          64,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-2',
                                          weights_init=init)
            net4 = tflearn.layers.conv_3d(net4,
                                          64,
                                          3,
                                          activation='prelu',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-3',
                                          weights_init=init)
            net4 = tflearn.activation(net4, 'prelu')
            net4 = tf.add(net4_in, net4)

        with tf.variable_scope("bottom"):
            netbot_in = tflearn.layers.conv_3d(net4,
                                               128,
                                               2,
                                               strides=2,
                                               activation='prelu',
                                               padding='same',
                                               regularizer='L2',
                                               reuse=reuse,
                                               scope='ds4',
                                               weights_init=init)
            netbot = tflearn.layers.conv_3d(netbot_in,
                                            128,
                                            3,
                                            activation='prelu',
                                            padding='same',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='convbot-1',
                                            weights_init=init)
            netbot = tflearn.layers.conv_3d(netbot,
                                            128,
                                            3,
                                            activation='prelu',
                                            padding='same',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='convbot-2',
                                            weights_init=init)
            netbot = tflearn.layers.conv_3d(netbot,
                                            128,
                                            3,
                                            activation='prelu',
                                            padding='same',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='convbot-3',
                                            weights_init=init)
            netbot = tflearn.activation(netbot, 'prelu')
            netbot = tf.add(netbot_in, netbot)

        with tf.variable_scope("level4_up"):
            net4_up = tflearn.layers.conv.conv_3d_transpose(
                netbot,
                64,
                2, [net4.get_shape().as_list()[1]] * 3,
                strides=2,
                activation='prelu',
                padding='same',
                regularizer='L2',
                reuse=reuse,
                scope='trans1',
                weights_init=init)
            net4_up_concat = tflearn.layers.merge_ops.merge([net4_up, net4],
                                                            axis=4,
                                                            mode='concat')
            net4_up = tflearn.layers.conv_3d(net4_up_concat,
                                             128,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv5-1',
                                             weights_init=init)
            net4_up = tflearn.layers.conv_3d(net4_up,
                                             128,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv5-2',
                                             weights_init=init)
            net4_up = tflearn.layers.conv_3d(net4_up,
                                             128,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv5-3',
                                             weights_init=init)
            net4_up = tflearn.activation(net4_up, 'prelu')
            net4_up = tf.add(net4_up_concat, net4_up)

        with tf.variable_scope("level3_up"):
            net3_up = tflearn.layers.conv.conv_3d_transpose(
                net4_up,
                32,
                2, [net3.get_shape().as_list()[1]] * 3,
                strides=2,
                activation='prelu',
                padding='same',
                regularizer='L2',
                reuse=reuse,
                scope='trans2',
                weights_init=init)
            net3_up_concat = tflearn.layers.merge_ops.merge([net3_up, net3],
                                                            axis=4,
                                                            mode='concat')
            net3_up = tflearn.layers.conv_3d(net3_up_concat,
                                             64,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv6-1',
                                             weights_init=init)
            net3_up = tflearn.layers.conv_3d(net3_up,
                                             64,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv6-2',
                                             weights_init=init)
            net3_up = tflearn.layers.conv_3d(net3_up,
                                             64,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv6-3',
                                             weights_init=init)
            net3_up = tflearn.activation(net3_up, 'prelu')
            net3_up = tf.add(net3_up_concat, net3_up)

        with tf.variable_scope("level2_up"):
            net2_up = tflearn.layers.conv.conv_3d_transpose(
                net3_up,
                16,
                2, [net2.get_shape().as_list()[1]] * 3,
                strides=2,
                activation='prelu',
                padding='same',
                regularizer='L2',
                reuse=reuse,
                scope='trans3',
                weights_init=init)
            net2_up_concat = tflearn.layers.merge_ops.merge([net2_up, net2],
                                                            axis=4,
                                                            mode='concat')
            net2_up = tflearn.layers.conv_3d(net2_up_concat,
                                             32,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv7-1',
                                             weights_init=init)
            net2_up = tflearn.layers.conv_3d(net2_up,
                                             32,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv7-2',
                                             weights_init=init)
            net2_up = tflearn.activation(net2_up, 'prelu')
            net2_up = tf.add(net2_up_concat, net2_up)

        with tf.variable_scope("level1_up"):
            net1_up = tflearn.layers.conv.conv_3d_transpose(
                net2_up,
                8,
                2, [net1.get_shape().as_list()[1]] * 3,
                strides=2,
                activation='prelu',
                padding='same',
                regularizer='L2',
                reuse=reuse,
                scope='trans4',
                weights_init=init)
            net1_up_concat = tflearn.layers.merge_ops.merge([net1_up, net1],
                                                            axis=4,
                                                            mode='concat')
            net1_up = tflearn.layers.conv_3d(net1_up_concat,
                                             16,
                                             3,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='conv8-1',
                                             weights_init=init)
            net1_up = tflearn.activation(net1_up, 'prelu')
            net1_up = tf.add(net1_up_concat, net1_up)

        with tf.variable_scope("out"):
            net_fc1 = tflearn.layers.conv_3d(net1_up,
                                             8,
                                             1,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='fc1',
                                             weights_init=init)
            net_fc1 = tflearn.layers.normalization.batch_normalization(
                net_fc1, reuse=reuse, scope='batch_fc_1')
            net_fc1 = tflearn.activation(net_fc1, "prelu")
            net_out = tflearn.layers.conv_3d(net_fc1,
                                             2,
                                             1,
                                             activation='sigmoid',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='output',
                                             weights_init=init)

        return net_out, net1_bmode
                        dest='test_path',
                        type=str,
                        help='Test Data Path')
    config = parser.parse_args()

    #Load Test data
    image_count = (3, 6)
    patch_count = 20
    X = generate_patches(img2numpy_arr(config.test_path), image_count,
                         patch_count)

    # Building Residual Network
    net = tl.input_data(shape=[None, 42, 42, 3])
    net = tl.conv_2d(net, 32, 3)
    net = tl.batch_normalization(net)
    net = tl.activation(net, 'relu')
    net = tl.shallow_residual_block(net, 4, 32, regularizer='L2')
    net = tl.shallow_residual_block(net,
                                    1,
                                    32,
                                    downsample=True,
                                    regularizer='L2')
    net = tl.shallow_residual_block(net, 4, 64, regularizer='L2')
    net = tl.shallow_residual_block(net,
                                    1,
                                    64,
                                    downsample=True,
                                    regularizer='L2')
    net = tl.shallow_residual_block(net, 5, 64, regularizer='L2')
    net = tl.global_avg_pool(net)
Esempio n. 17
0
def get_deep_mimic_feats(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 4, downsample=True)
    print("resn4", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 4, downsample=True)
    # print("resn6", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 4, downsample=True)
    # print("resn8", net.get_shape())


    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    print("before reshape", net.get_shape())

    # net = tf.reshape(net, [-1, n_dim//n_split*net.get_shape()[-2], net.get_shape()[-1]])
    # LSTM
    ############ reshape for sub_seq 
    before_reshaped_shape = net.get_shape().as_list()
    net = tf.reshape(net, [-1, n_dim//n_split, before_reshaped_shape[1]*before_reshaped_shape[2]])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(64), BasicLSTMCell(64))
    print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam', loss='mean_square')


    # Training
    model = tflearn.DNN(net)
    model.load('../model/mimic/mimic_model_offline_v4.1')

    pred = []
    num_of_test = len(test_data)
    cur_data = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            tmp_testX = np.array(cur_data, dtype=np.float32)
            pred.extend(model.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
    
    return pred
Esempio n. 18
0
 def _build_network(self, layers):
     network = tf.transpose(self.input_tensor, [0, 2, 3, 1])
     # [batch, assets, window, features]
     network = network / network[:, :, -1, 0, None, None]
     for layer_number, layer in enumerate(layers):
         if layer["type"] == "DenseLayer":
             network = tflearn.layers.core.fully_connected(network,
                                                           int(layer["neuron_number"]),
                                                           layer["activation_function"],
                                                           regularizer=layer["regularizer"],
                                                           weight_decay=layer["weight_decay"] )
             self.add_layer_to_dict(layer["type"], network)
         elif layer["type"] == "Activation":
             network = tflearn.activation(network, activation=layer["activation_type"])
         elif layer["type"] == "DropOut":
             network = tflearn.layers.core.dropout(network, layer["keep_probability"])
         elif layer["type"] == "EIIE_Dense":
             width = network.get_shape()[2]
             network = tflearn.layers.conv_2d(network, int(layer["filter_number"]),
                                              [1, width],
                                              [1, 1],
                                              "valid",
                                              layer["activation_function"],
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
         elif layer["type"] == "ConvLayer":
             network = tflearn.layers.conv_2d(network, int(layer["filter_number"]),
                                              allint(layer["filter_shape"]),
                                              allint(layer["strides"]),
                                              layer["padding"],
                                              layer["activation_function"],
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
         elif layer["type"] == "DilatedConvLayer":
             network = tflearn.layers.conv.atrous_conv_2d(network, int(layer["filter_number"]),
                                              allint(layer["filter_shape"]),
                                              int(layer["rate"]),
                                              layer["padding"],
                                              layer["activation_function"],
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
         elif layer["type"] == "MaxPooling":
             network = tflearn.layers.conv.max_pool_2d(network, layer["strides"])
         elif layer["type"] == "AveragePooling":
             network = tflearn.layers.conv.avg_pool_2d(network, layer["strides"])
         elif layer["type"] == "LocalResponseNormalization":
             network = tflearn.layers.normalization.local_response_normalization(network)
         elif layer["type"] == "BatchNormalization":
             #network = tf.nn.batch_normalization(network, 0.0, 1.0,0.0,0.0,0.0)
             network = tflearn.layers.normalization.batch_normalization(network, trainable=False)
             self.add_layer_to_dict(layer["type"], network)
         elif layer["type"] == "ResidualTCN":
             #From "Probabilistic Forecasting with Temporal Convolutional Neural Network"
             #Originally used for retail sales prediction
             start = network
             network = tflearn.layers.conv.atrous_conv_2d(network, int(layer["filter_number"]),
                                              allint(layer["filter_shape"]),
                                              int(layer["rate"]),
                                              "same",
                                              layer["activation_function"],
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             network = tflearn.layers.core.dropout(network, layer["keep_probability"])
             network = tflearn.layers.normalization.batch_normalization(network)  
             network = tflearn.activation(network, activation="ReLU")
             network = tflearn.layers.conv.atrous_conv_2d(network, int(layer["filter_number"]),
                                              allint(layer["filter_shape"]),
                                              int(layer["rate"]),
                                              "same",
                                              layer["activation_function"],
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             network = tflearn.layers.core.dropout(network, layer["keep_probability"])
             network = tflearn.layers.normalization.batch_normalization(network) 
             network = network + start
         elif layer["type"] == "EIIE_Output":
             width = network.get_shape()[2]
             network = tflearn.layers.conv_2d(network, 1, [1, width], padding="valid",
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
             network = network[:, :, 0, 0]
             btc_bias = tf.ones((self.input_num, 1))
             self.add_layer_to_dict(layer["type"], network)
             network = tf.concat([btc_bias, network], 1)
             network = tflearn.layers.core.activation(network, activation="softmax")
             #network = (network)*2
             self.add_layer_to_dict(layer["type"], network, weights=False)
         elif layer["type"] == "EIIE_ShortSell_Reinvest":
             borrowamount = 1
             width = network.get_shape()[2]
             long = tflearn.layers.conv_2d(network, 1, [1, width], padding="valid",
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             short = tflearn.layers.conv_2d(network, 1, [1, width], padding="valid",
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             #reinvest = tflearn.layers.conv_2d(network, 1, [1, width], padding="valid",
             #                                 regularizer=layer["regularizer"],
             #                                 weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
             long = long[:, :, 0, 0]
             short = short[:, :, 0, 0]
             #reinvest = reinvest[:, :, 0, 0]
             btc_bias = tf.ones((self.input_num, 1))
             self.add_layer_to_dict(layer["type"], network)
             long = tf.concat([btc_bias, long], 1)
             #short = tf.concat([btc_bias, short], 1)
             #reinvest = tf.concat([btc_bias, reinvest], 1)
             
             long = tflearn.layers.core.activation(long, activation="softmax")
             short = tflearn.layers.core.activation(short, activation="softmax")
             short = tf.concat([short, tf.zeros((self.input_num, 1))], 1)
            # reinvest = tflearn.layers.core.activation(reinvest, activation="softmax")
             
             network = (1+layer["borrow_amount"])*long - short*layer["borrow_amount"]
             #network = (network)*2
             self.add_layer_to_dict(layer["type"], network, weights=False)
             
         elif layer["type"] == "Output_WithW":
             network = tflearn.flatten(network)
             network = tf.concat([network,self.previous_w], axis=1)
             network = tflearn.fully_connected(network, self._rows+1,
                                               activation="softmax",
                                               regularizer=layer["regularizer"],
                                               weight_decay=layer["weight_decay"])
         elif layer["type"] == "CNN_LSTM":
             network = tf.transpose(network, [0, 2, 3, 1])
             resultlist = []
             reuse = False
             for i in range(self._rows):
                 if i > 0:
                     reuse = True
                 result = tflearn.layers.simple_rnn(network[:, :, :, i],
                                                  int(layer["neuron_number"]),
                                                  dropout=0,
                                                  scope="lstm"+str(layer_number),
                                                  reuse=reuse)
                 resultlist.append(result)
             network = tf.stack(resultlist)
             network = tf.transpose(network, [1, 0, 2])
             network = tf.reshape(network, [-1, self._rows, 1, int(layer["neuron_number"])])                    
                 
         elif layer["type"] == "TCCBlock":
             start = network 
             """tflearn.layers.conv_2d(network, int(layer["filter_number"]), 
                                                          [1,1],
                                                          padding="same",
                                                          activation="ReLU",
                                                          weight_decay=0.0)"""
             network = tflearn.layers.conv.atrous_conv_2d(network, 
                                                          int(layer["filter_number"]), 
                                                          [1,3], 
                                                          int(layer["dilation_rate"]), 
                                                          padding="same",
                                                          activation="ReLU",
                                                          regularizer=layer["regularizer"],
                                                          weight_decay=0)
             network = tflearn.layers.normalization.local_response_normalization(network)
             network = tflearn.activation(network, activation="ReLU")
             network = tflearn.layers.dropout(network, layer["keep_prob"])
             network = tflearn.layers.conv.atrous_conv_2d(network, 
                                                          int(layer["filter_number"]),
                                                          [1,3], 
                                                          int(layer["dilation_rate"]), 
                                                          padding="same",
                                                          activation="ReLU",
                                                          regularizer=layer["regularizer"],
                                                          weight_decay=0)
             network = tflearn.layers.normalization.local_response_normalization(network)
             network = tflearn.activation(network, activation="ReLU")
             network = tflearn.layers.dropout(network, layer["keep_prob"])
             network = tflearn.layers.conv.conv_2d(network, 
                                                       int(layer["filter_number"]), 
                                                       [self._rows, 1], 
                                                       padding="same",
                                                       activation="ReLU",
                                                       regularizer=layer["regularizer"],
                                                       weight_decay=0)
             network = tflearn.activation(network, activation="ReLU")
             network = tflearn.layers.dropout(network, layer["keep_prob"])
             
             network = tf.concat([network, start], axis=3)
             network = tflearn.layers.conv_2d(network, int(layer["filter_number"]), 
                                                          [1,1],
                                                          padding="same",
                                                          activation="ReLU",
                                                          weight_decay=0)
             #network = tflearn.activation(network+start, activation="ReLU")
             self.add_layer_to_dict(layer["type"], network)
             
         elif layer["type"] == "EIIE_Output_WithW":
             width = network.get_shape()[2]
             height = network.get_shape()[1]
             features = network.get_shape()[3]
             network = tf.reshape(network, [self.input_num, int(height), 1, int(width*features)])
             w = tf.reshape(self.previous_w, [-1, int(height), 1, 1])
             network = tf.concat([network, w], axis=3)
             network = tflearn.layers.conv_2d(network, 1, [1, 1], padding="valid",
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
             network = network[:, :, 0, 0]
             #btc_bias = tf.zeros((self.input_num, 1))
             btc_bias = tf.get_variable("btc_bias", [1, 1], dtype=tf.float32,
                                    initializer=tf.zeros_initializer)
             # self.add_layer_to_dict(layer["type"], network, weights=False)
             btc_bias = tf.tile(btc_bias, [self.input_num, 1])
             network = tf.concat([btc_bias, network], 1)
             self.voting = network
             self.add_layer_to_dict('voting', network, weights=False)
             network = tflearn.layers.core.activation(network, activation="softmax")
             self.add_layer_to_dict('softmax_layer', network, weights=False)
             
         elif layer["type"] == "EIIE_ShortSell_NoReinvest":
             network = tflearn.layers.conv_2d(network, 1, [1, 1], padding="valid",
                                              regularizer=layer["regularizer"],
                                              weight_decay=layer["weight_decay"])
             self.add_layer_to_dict(layer["type"], network)
             network = network[:, :, 0, 0]
             #btc_bias = tf.zeros((self.input_num, 1))
             btc_bias = tf.get_variable("btc_bias", [1, 1], dtype=tf.float32,
                                    initializer=tf.zeros_initializer)
             # self.add_layer_to_dict(layer["type"], network, weights=False)
             btc_bias = tf.tile(btc_bias, [self.input_num, 1])
             network = tf.concat([btc_bias, network], 1)
             self.voting = network
             self.add_layer_to_dict('voting', network, weights=False)
             avg = tfp.stats.percentile(network, layer["short_percentile"])
             sub = network-avg
             normalized = tf.math.abs(sub)
             sign = tf.math.sign(sub)
             network = tf.nn.softmax(normalized)*sign
             #network = tflearn.layers.core.activation(network, activation="softmax")
             self.add_layer_to_dict('softmax_layer', network, weights=False)
         elif layer["type"] == "EIIE_LSTM" or\
                         layer["type"] == "EIIE_RNN":
             network = tf.transpose(network, [0, 2, 3, 1])
             resultlist = []
             reuse = False
             for i in range(self._rows):
                 if i > 0:
                     reuse = True
                 if layer["type"] == "EIIE_LSTM":
                     result = tflearn.layers.lstm(network[:, :, :, i],
                                                  int(layer["neuron_number"]),
                                                  dropout=layer["dropouts"],
                                                  scope="lstm"+str(layer_number),
                                                  reuse=reuse)
                 else:
                     result = tflearn.layers.simple_rnn(network[:, :, :, i],
                                                        int(layer["neuron_number"]),
                                                        dropout=layer["dropouts"],
                                                        scope="rnn"+str(layer_number),
                                                        reuse=reuse)
                 resultlist.append(result)
             network = tf.stack(resultlist)
             network = tf.transpose(network, [1, 0, 2])
             network = tf.reshape(network, [-1, self._rows, 1, int(layer["neuron_number"])])
         else:
             raise ValueError("the layer {} not supported.".format(layer["type"]))
     return network
Esempio n. 19
0
import tflearn
import random
import numpy as np

#X = [[random.random(),random.random()] for x in range(1000)]
X = np.random.random([10,4,4,2])
Y = [[0] for x in X]

g1 = tf.Graph()
g2 = tf.Graph()

with g1.as_default():
  input_layer = tflearn.input_data(shape=[None, 4, 4, 2])
  net = tflearn.conv_2d(input_layer, 256, 3, activation=None)
  net = tflearn.batch_normalization(net)
  net = tflearn.activation(net, activation='relu')
  # block 2
  tmp = tflearn.conv_2d(net, 256, 3, activation=None)
  tmp = tflearn.batch_normalization(tmp)
  tmp = tflearn.activation(tmp, activation='relu')
  tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
  tmp = tflearn.batch_normalization(tmp)
  net = tflearn.activation(net + tmp, activation='relu')
  final = tflearn.fully_connected(net, 1, activation='tanh')
  sgd = tflearn.optimizers.SGD(learning_rate=0.01, lr_decay=0.95, decay_step=200000)
  regression = tflearn.regression(final, optimizer=sgd, loss='mean_square',  metric='R2')
  m = tflearn.DNN(regression)

with g2.as_default():
  input_layer = tflearn.input_data(shape=[None, 4, 4, 2])
  net = tflearn.conv_2d(input_layer, 128, 3, activation=None)
Esempio n. 20
0
	def _create_nn(self):
		with tf.variable_scope(self.scope):
			# type, arrival, progress, resource
			input = tflearn.input_data(shape=[None, self.state_dim[0], self.state_dim[1]], name="input") # row is info type, column is job

			if pm.JOB_CENTRAL_REPRESENTATION or pm.ATTRIBUTE_CENTRAL_REPRESENTATION:
				if pm.JOB_CENTRAL_REPRESENTATION:
					fc_list = []
					for i in range(self.state_dim[1]):
						if pm.FIRST_LAYER_TANH:
							fc1 = tflearn.fully_connected(input[:, :, i], self.state_dim[0], activation="tanh", name="job_" + str(i))
						else:
							fc1 = tflearn.fully_connected(input[:, :, i], self.state_dim[0], activation="relu", name="job_"+str(i))
						if pm.BATCH_NORMALIZATION:
							fc1 = tflearn.batch_normalization(fc1, name="job_"+str(i)+"_bn")
						fc_list.append(fc1)
				else:
					j = 0
					fc_list = []
					for (key, enable) in pm.INPUTS_GATE:  # INPUTS_GATE=[("TYPE",True), ("STAY",False), ("PROGRESS",False), ("DOM_RESR",False), ("WORKERS",True)]
						if enable:
							if pm.FIRST_LAYER_TANH:
								fc1 = tflearn.fully_connected(input[:, j], pm.SCHED_WINDOW_SIZE, activation="tanh", name=key)
							else:
								fc1 = tflearn.fully_connected(input[:, j], pm.SCHED_WINDOW_SIZE, activation="relu", name=key)
							if pm.BATCH_NORMALIZATION:
								fc1 = tflearn.batch_normalization(fc1, name=key+"_bn")
							fc_list.append(fc1)
							j += 1
				if len(fc_list) == 1:
					merge_net = fc_list[0]
					if pm.BATCH_NORMALIZATION:
						merge_net = tflearn.batch_normalization(merge_net)
				else:
					merge_net = tflearn.merge(fc_list, 'concat', name="merge_net_1")
					if pm.BATCH_NORMALIZATION:
						merge_net = tflearn.batch_normalization(merge_net, name="merge_net_1_bn")
				dense_net_1 = tflearn.fully_connected(merge_net, pm.NUM_NEURONS_PER_FCN, activation='relu', name='dense_net_1')
			else:
				dense_net_1 = tflearn.fully_connected(input, pm.NUM_NEURONS_PER_FCN, activation='relu', name='dense_net_1')
			if pm.BATCH_NORMALIZATION:
				dense_net_1 = tflearn.batch_normalization(dense_net_1, name='dense_net_1_bn')

			for i in range(1, pm.NUM_FCN_LAYERS):
				dense_net_1 = tflearn.fully_connected(dense_net_1, pm.NUM_NEURONS_PER_FCN, activation='relu', name='dense_net_' + str(i + 1))
				if pm.BATCH_NORMALIZATION:
					dense_net_1 = tflearn.batch_normalization(dense_net_1, name='dense_net_' + str(i + 1) + 'bn')

			if pm.JOB_CENTRAL_REPRESENTATION and pm.NN_SHORTCUT_CONN:  # add shortcut the last layer
				fc2_list = []
				for fc in fc_list:
					merge_net_2 = tflearn.merge([fc, dense_net_1], 'concat')
					if pm.PS_WORKER:
						if pm.BUNDLE_ACTION:
							fc2 = tflearn.fully_connected(merge_net_2, 3, activation='linear')
						else:
							fc2 = tflearn.fully_connected(merge_net_2, 2, activation='linear')
					else:
						fc2 = tflearn.fully_connected(merge_net_2, 1, activation='linear')
					fc2_list.append(fc2)

				if pm.SKIP_TS:
					fc2 = tflearn.fully_connected(dense_net_1, 1, activation='linear')
					fc2_list.append(fc2)
				merge_net_3 = tflearn.merge(fc2_list, 'concat')
				output = tflearn.activation(merge_net_3, activation="softmax", name="policy_output")
			else:
				output = tflearn.fully_connected(dense_net_1, self.action_dim, activation="softmax", name="policy_output")
			return input, output
Esempio n. 21
0
def get_resnet_feature(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_dim//n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(net, 0.5)
    # net, feature_layer = tflearn.fully_connected(net, 4, activation='softmax', return_logit = True)
    feature_layer = tflearn.fully_connected(net, 4, activation='softmax')
    print('feature_layer: ', feature_layer.get_shape())
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam',#momentum',
                             loss='categorical_crossentropy')
                             #,learning_rate=0.1)
    print('final output: ', net.get_shape())
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/'+run_id)
    
    # print(tflearn.variables.get_all_variables())

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    tmp_feature = []
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            #tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
            tmp_testX = np.array(cur_data, dtype=np.float32)
            tmp_feature.extend(m2.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
            pre.extend(model.predict(tmp_testX))
            print(i, len(tmp_feature), len(tmp_feature[0]))

    tmp_feature = np.array(tmp_feature)

    return tmp_feature
Esempio n. 22
0
def deep_bottleneck(incoming,
                    nb_layers,
                    nb_filter,
                    bottlenet_size,
                    activation='relu',
                    batch_norm=True,
                    bias=False,
                    weights_init='uniform_scaling',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    name="DeepBottleneck"):
    """ Deep Bottleneck.

    As described in MSRA's Deep Residual Network paper.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_layers: `int`. Number of layer blocks.
        nb_filter: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        bottlenet_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    """
    resnet = incoming
    # Build bottleneck block layers, this layer doesn't need
    # `build inference` as other layers, because it only uses
    # pre-existing layers, and doesn't define any new ops.
    with tf.name_scope(name):
        for i in range(nb_layers):
            with tf.name_scope('ResidualLayer'):
                with tf.name_scope("in"):
                    residual = conv_2d(resnet, nb_filter, 1, 1, 'valid',
                                       'linear', bias, weights_init, bias_init,
                                       regularizer, weight_decay, trainable,
                                       restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("bottleneck"):
                    residual = conv_2d(residual, bottlenet_size, 3, 1, 'same',
                                       'linear', bias, weights_init, bias_init,
                                       regularizer, weight_decay, trainable,
                                       restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("out"):
                    residual = conv_2d(residual, nb_filter, 1, 1, 'valid',
                                       'linear', bias, weights_init, bias_init,
                                       regularizer, weight_decay, trainable,
                                       restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)

                resnet = resnet + residual

    return resnet
Esempio n. 23
0
#network = local_response_normalization(network)
network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.batch_normalization(network)
#network = local_response_normalization(network)
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.batch_normalization(network)
#network = local_response_normalization(network)
network = fully_connected(network, 4096)
network = tflearn.batch_normalization(network)
network = dropout(network, 0.5)
network = tflearn.activation(network, activation='tanh')
network = fully_connected(network, 2048)
network = dropout(network, 0.5)
network = tflearn.activation(network, activation='tanh')
network = fully_connected(
    network, 50, activation='softmax')  #50 means the number of speakers
network = regression(network,
                     optimizer='Adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

# Training
model = tflearn.DNN(network,
                    checkpoint_path='model_alexnet',
                    max_checkpoints=1,
                    tensorboard_verbose=2)
Esempio n. 24
0
    def _build_model(self):
        """
        Builds the Tensorflow graph.
        """

        # Placeholders for our input
        # Our input are 4 RGB frames of shape 160, 160 each
        self.X_pl = tf.placeholder(shape=[None, 84, 84, 4],
                                   dtype=tf.uint8,
                                   name="X")
        # The TD target value
        self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
        # Integer id of which action was selected
        self.actions_pl = tf.placeholder(shape=[None],
                                         dtype=tf.int32,
                                         name="actions")

        X = tf.to_float(self.X_pl) / 255.0
        batch_size = tf.shape(self.X_pl)[0]

        # Three convolutional layers
        features = tflearn.conv_2d(X,
                                   32,
                                   8,
                                   strides=4,
                                   activation='relu',
                                   name='conv1')
        features = tflearn.conv_2d(features,
                                   64,
                                   4,
                                   strides=2,
                                   activation='relu',
                                   name='conv2')
        features = tflearn.conv_2d(features,
                                   64,
                                   3,
                                   strides=1,
                                   activation='relu',
                                   name='conv3')

        # rnn
        features_rnn = tflearn.layers.core.flatten(features)
        fc1 = tflearn.fully_connected(features_rnn, 64)
        fc2 = tflearn.fully_connected(fc1, 32)
        fc_fb = tflearn.fully_connected(fc2, 64)

        net = tflearn.activation(tf.matmul(features_rnn, fc1.W) + fc1.b,
                                 activation='relu')
        for i in range(PLAN_LAYERS - 1):
            net = tflearn.activation(tf.matmul(net, fc2.W) + fc2.b,
                                     activation='relu')
            net = tflearn.activation(tf.matmul(net, fc_fb.W) +
                                     tf.matmul(features_rnn, fc1.W) + fc_fb.b +
                                     fc1.b,
                                     activation='relu')
        net = tflearn.activation(tf.matmul(net, fc2.W) + fc2.b,
                                 activation='relu')

        # Fully connected layers
        net = tflearn.fully_connected(net, 512)
        self.predictions = tflearn.fully_connected(net, len(VALID_ACTIONS))

        # Get the predictions for the chosen actions only
        gather_indices = tf.range(batch_size) * tf.shape(
            self.predictions)[1] + self.actions_pl
        self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]),
                                            gather_indices)

        # Calcualte the loss
        self.losses = tf.squared_difference(self.y_pl, self.action_predictions)
        self.loss = tf.reduce_mean(self.losses)

        # Optimizer Parameters from original paper
        self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
        self.train_op = self.optimizer.minimize(
            self.loss, global_step=tf.contrib.framework.get_global_step())

        # Summaries for Tensorboard
        self.summaries = tf.summary.merge([
            tf.summary.scalar("loss", self.loss),
            tf.summary.histogram("loss_hist", self.losses),
            tf.summary.histogram("q_values_hist", self.predictions),
            tf.summary.scalar("max_q_value", tf.reduce_max(self.predictions))
        ])
    def _create_critic(self, prefix=''):
        inputs_shape = [None] + [i for i in self.i_dim]
        inputs = tflearn.input_data(shape=inputs_shape)

        action_shape = [None] + [i for i in self.a_dim]
        action = tflearn.input_data(shape=action_shape)

        layer = inputs
        for i in range(self.num_hidden_layers):
            weights_init = tflearn.initializations.uniform(
                minval=-1 / sqrt(self.layer_size[i]),
                maxval=1 / sqrt(self.layer_size[i]))

            if 'dropout' in self.layer_other[i + 1]:
                dropout = self.dropout
            else:
                dropout = None

            if self.layer_type[i + 1] == 'fc':
                new_layer = tflearn.fully_connected(layer,
                                                    self.layer_size[i + 1],
                                                    name="{}Layer{}".format(
                                                        prefix, i),
                                                    weights_init=weights_init)
            elif self.layer_type[i + 1] == 'rnn':
                new_layer = tflearn.simple_rnn(layer,
                                               self.layer_size[i + 1],
                                               name="{}Layer{}".format(
                                                   prefix, i),
                                               weights_init=weights_init,
                                               return_seq=False,
                                               activation='linear',
                                               dropout=dropout,
                                               dynamic=True)
            else:
                raise ValueError('Unsupported layer {}'.format(i))

            if i == self.num_hidden_layers - 2:  # last layer is actor
                break

            if self.batch_norm:
                new_layer = tflearn.layers.normalization.batch_normalization(
                    new_layer, name="{}Layer{}_norm".format(prefix, i))

            if self.layer_activation[i + 1] == 'linear':
                new_layer = tflearn.activations.linear(new_layer)
            elif self.layer_activation[i + 1] == 'relu':
                new_layer = tflearn.activations.relu(new_layer)
            elif self.layer_activation[i + 1] == 'tanh':
                new_layer = tflearn.activations.tanh(new_layer)
            elif self.layer_activation[i + 1] == 'sigmoid':
                new_layer = tflearn.activations.sigmoid(new_layer)

            if i < self.num_hidden_layers - 1:
                layer = new_layer

        action_init = tflearn.initializations.uniform(
            minval=-1 / sqrt(self.layer_size[-3]),
            maxval=1 / sqrt(self.layer_size[-3]))
        if self.layer_type[-1] == 'fc':
            action_layer = tflearn.fully_connected(
                action,
                self.layer_size[-1],
                name="{}LayerAction".format(prefix),
                weights_init=action_init)
        else:
            raise ValueError('Unsupported actor layer')

        if self.layer_activation[-1] == 'relu':
            net = tflearn.activation(tf.matmul(layer, new_layer.W) +
                                     tf.matmul(action, action_layer.W) +
                                     action_layer.b,
                                     activation='relu')
        else:
            raise ValueError('Unsupported actor activation')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        new_layer = tflearn.fully_connected(net,
                                            1,
                                            weights_init=w_init,
                                            name="{}Output".format(prefix))

        return inputs, action, new_layer
def get_deep_centerwave_feature(test_data):

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    _, n_dim = test_data.shape

    ############################### model
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    net = tflearn.avg_pool_1d(net, kernel_size=5, strides=5)
    print("avg_pool_1d", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn6", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    # print("resn8", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    # print("resn10", net.get_shape())

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)

    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    print("feature_layer", feature_layer.get_shape())
    net = feature_layer
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(net,
                             optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.01)
    ###############################

    ### load
    model = tflearn.DNN(net)
    model.load(
        '../model/model_deep_centerwave_0810_all/model_deep_centerwave_resnet')

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    out_feature = []
    pred = []
    num_of_test = len(test_data)
    for i in range(num_of_test):
        tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
        out_feature.append(m2.predict(tmp_test_data)[0])
        # pred.append(model.predict(tmp_test_data)[0])

    out_feature = np.array(out_feature)

    # ### eval
    # print(len(pred), pred[0], all_label[0])
    # MyEval.F1Score3_num(pred, all_label[:num_of_test])

    return out_feature
Esempio n. 27
0
    def build_net(self, X, reuse=False):
        # Using TFLearn wrappers for network building
        init = tflearn.initializations.xavier()
        with tf.variable_scope("level0_0", reuse=reuse):
            net0_0 = self.conv_layer(X, 8, '0_0', reuse, init)
            pool0_0 = tflearn.layers.conv.max_pool_3d(net0_0, 2, strides=2)

        with tf.variable_scope("level1_0", reuse=reuse):
            net1_0 = self.conv_layer(pool0_0, 16, '1_0', reuse, init)
            pool1_0 = tflearn.layers.conv.max_pool_3d(net1_0, 2, strides=2)

        with tf.variable_scope("level0_1", reuse=reuse):
            up0_1 = self.deconv_layer(net1_0, net0_0, 8, '1_2', reuse, init)
            net0_1 = tflearn.layers.merge_ops.merge([up0_1, net0_0],
                                                    axis=4,
                                                    mode='concat')
            net0_1 = self.conv_layer(net0_1, 8, '1_2', reuse, init, True)

        with tf.variable_scope("level2_0"):
            net2_0 = self.conv_layer(pool1_0, 32, '4', reuse, init)
            pool2_0 = tflearn.layers.conv.max_pool_3d(net2_0, 2, strides=2)

        with tf.variable_scope("level1_1", reuse=reuse):
            up1_1 = self.deconv_layer(net2_0, net1_0, 16, '2_2', reuse, init)
            net1_1 = tflearn.layers.merge_ops.merge([up1_1, net1_0],
                                                    axis=4,
                                                    mode='concat')
            net1_1 = self.conv_layer(net1_1, 16, '2_2', reuse, init, True)

        with tf.variable_scope("level0_2", reuse=reuse):
            up0_2 = self.deconv_layer(net1_1, net0_1, 8, '1_3', reuse, init)
            net0_2 = tflearn.layers.merge_ops.merge([up0_2, net0_0, net0_1],
                                                    axis=4,
                                                    mode='concat')
            net0_2 = self.conv_layer(net0_2, 8, '1_3', reuse, init, True)

        with tf.variable_scope("level3_0"):
            net3_0 = self.conv_layer(pool2_0, 64, '3_0', reuse, init)
            pool3_0 = tflearn.layers.conv.max_pool_3d(net3_0, 2, strides=2)

        with tf.variable_scope("level2_1", reuse=reuse):
            up2_1 = self.deconv_layer(net3_0, net2_0, 32, '2_1', reuse, init)
            net2_1 = tflearn.layers.merge_ops.merge([up2_1, net2_0],
                                                    axis=4,
                                                    mode='concat')
            net2_1 = self.conv_layer(net2_1, 32, '2_1', reuse, init, True)

        with tf.variable_scope("level1_2", reuse=reuse):
            up1_2 = self.deconv_layer(net2_1, net1_1, 16, '1_2', reuse, init)
            net1_2 = tflearn.layers.merge_ops.merge([up1_2, net1_0, net1_1],
                                                    axis=4,
                                                    mode='concat')
            net1_2 = self.conv_layer(net1_2, 16, '1_2', reuse, init, True)

        with tf.variable_scope("level0_3", reuse=reuse):
            up0_3 = self.deconv_layer(net1_2, net0_2, 8, '0_3', reuse, init)
            net0_3 = tflearn.layers.merge_ops.merge(
                [up0_3, net0_0, net0_1, net0_2], axis=4, mode='concat')
            net0_3 = self.conv_layer(net0_3, 8, '0_3', reuse, init, True)

        with tf.variable_scope("level4_0"):
            net4_0 = self.conv_layer(pool3_0, 128, '4_0', reuse, init)

        with tf.variable_scope("level3_1"):
            up3_1 = self.deconv_layer(net4_0, net3_0, 64, '3_1', reuse, init)
            net3_1 = tflearn.layers.merge_ops.merge([up3_1, net3_0],
                                                    axis=4,
                                                    mode='concat')
            net3_1 = self.conv_layer(net3_1, 64, '3_1', reuse, init, True)

        with tf.variable_scope("level2_2"):
            up2_2 = self.deconv_layer(net3_1, net2_1, 32, '2_2', reuse, init)
            net2_2 = tflearn.layers.merge_ops.merge([up2_2, net2_0, net2_1],
                                                    axis=4,
                                                    mode='concat')
            net2_2 = self.conv_layer(net2_2, 32, '2_2', reuse, init, True)

        with tf.variable_scope("level1_3"):
            up1_3 = self.deconv_layer(net2_2, net1_2, 16, '1_3', reuse, init)
            net1_3 = tflearn.layers.merge_ops.merge(
                [up1_3, net1_0, net1_1, net1_2], axis=4, mode='concat')
            net1_3 = self.conv_layer(net1_3, 16, '1_3', reuse, init, True)

        with tf.variable_scope("level0_4"):
            up0_4 = self.deconv_layer(net1_3, net0_3, 8, '0_4', reuse, init)
            net0_4 = tflearn.layers.merge_ops.merge(
                [up0_4, net0_0, net0_1, net0_2, net0_3], axis=4, mode='concat')
            net0_4 = self.conv_layer(net0_4, 8, '0_4', reuse, init, True)

        with tf.variable_scope("out"):
            net_fc1 = tflearn.layers.conv_3d(net0_4,
                                             8,
                                             1,
                                             activation='linear',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='fc1',
                                             weights_init=init)
            net_fc1 = tflearn.layers.normalization.batch_normalization(
                net_fc1, reuse=reuse, scope='batch_fc_1')
            net_fc1 = tflearn.activation(net_fc1, "prelu")
            net_out = tflearn.layers.conv_3d(net_fc1,
                                             2,
                                             1,
                                             activation='linear',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='output',
                                             weights_init=init)

        return net_out, net0_0
Esempio n. 28
0
def construct_dnn():
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.5)
    tflearn.config.init_training_mode()
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_90degrees_rotation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    input_layer = tflearn.input_data(shape=[None, 15, 15, 3],
                                     data_augmentation=img_aug)
    # block 1
    net = tflearn.conv_2d(input_layer, 256, 3, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    # res block 1
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 2
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 3
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 4
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 5
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 6
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 7
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 8
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # value head
    net = tflearn.conv_2d(net, 1, 1, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    final = tflearn.fully_connected(net, 1, activation='tanh')
    # optmizer
    sgd = tflearn.optimizers.SGD(learning_rate=0.01,
                                 lr_decay=0.95,
                                 decay_step=300000)
    regression = tflearn.regression(final,
                                    optimizer=sgd,
                                    loss='mean_square',
                                    metric='R2')
    model = tflearn.DNN(regression)
    return model
Esempio n. 29
0
    def build_net(self, X, reuse=False):
        # Using TFLearn wrappers for network building
        init = tflearn.initializations.xavier()
        with tf.variable_scope("level1", reuse=reuse):
            net1 = tflearn.layers.conv_3d(X,
                                          16,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv1-1',
                                          weights_init=init)
            net1 = tflearn.activation(net1, 'prelu')

        with tf.variable_scope("level2", reuse=reuse):
            net2_in = tflearn.layers.conv_3d(net1,
                                             32,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds1',
                                             weights_init=init)
            net2 = tflearn.layers.conv_3d(net2_in,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv2-1',
                                          weights_init=init)
            net2 = tflearn.activation(net2, 'prelu')

        with tf.variable_scope("level1_2", reuse=reuse):
            net1_2 = tflearn.layers.conv.conv_3d_transpose(net2,
                                                           16,
                                                           2, [36] * 3,
                                                           strides=2,
                                                           activation='prelu',
                                                           padding='same',
                                                           regularizer='L2',
                                                           reuse=reuse,
                                                           scope='trans1',
                                                           weights_init=init)
            net1_2 = tflearn.layers.merge_ops.merge([
                net1_2,
                tf.slice(net1, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16])
            ],
                                                    'elemwise_sum',
                                                    name='merge1')
            net1_2 = tflearn.layers.conv_3d(net1_2,
                                            16,
                                            3,
                                            activation='linear',
                                            padding='valid',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='conv1-2',
                                            weights_init=init)
            net1_2 = tflearn.activation(net1_2, 'prelu')

        with tf.variable_scope("level3"):
            net3_in = tflearn.layers.conv_3d(net2,
                                             64,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds2',
                                             weights_init=init)
            net3 = tflearn.layers.conv_3d(net3_in,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-1',
                                          weights_init=init)
            net3 = tflearn.activation(net3, 'prelu')

        with tf.variable_scope("level2_2", reuse=reuse):
            net2_2 = tflearn.layers.conv.conv_3d_transpose(net2,
                                                           32,
                                                           2, [20] * 3,
                                                           strides=2,
                                                           activation='prelu',
                                                           padding='same',
                                                           regularizer='L2',
                                                           reuse=reuse,
                                                           scope='trans2',
                                                           weights_init=init)
            net2_2 = tflearn.layers.merge_ops.merge([
                net2_2,
                tf.slice(net2, [0] + [(36 - 20) // 2] * 3 + [0],
                         [-1] + [20] * 3 + [32])
            ],
                                                    'elemwise_sum',
                                                    name='merge2')
            net2_2 = tflearn.layers.conv_3d(net2_2,
                                            32,
                                            3,
                                            activation='linear',
                                            padding='valid',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='conv2-2',
                                            weights_init=init)
            net2_2 = tflearn.activation(net2_2, 'prelu')

        with tf.variable_scope("level1_3", reuse=reuse):
            net1_3 = tflearn.layers.conv.conv_3d_transpose(net2_2,
                                                           16,
                                                           2, [36] * 3,
                                                           strides=2,
                                                           activation='prelu',
                                                           padding='same',
                                                           regularizer='L2',
                                                           reuse=reuse,
                                                           scope='trans3',
                                                           weights_init=init)
            net1_3 = tflearn.layers.merge_ops.merge([
                net1_3,
                tf.slice(net1, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16]),
                tf.slice(net1_2, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16])
            ],
                                                    'elemwise_sum',
                                                    name='merge3')
            net1_3 = tflearn.layers.conv_3d(net1_3,
                                            16,
                                            3,
                                            activation='linear',
                                            padding='valid',
                                            regularizer='L2',
                                            reuse=reuse,
                                            scope='conv1-3',
                                            weights_init=init)
            net1_3 = tflearn.activation(net1_3, 'prelu')

        with tf.variable_scope("level4"):
            net4_in = tflearn.layers.conv_3d(net3,
                                             128,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds3',
                                             weights_init=init)
            net4 = tflearn.layers.conv_3d(net4_in,
                                          128,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-1',
                                          weights_init=init)
            net4 = tflearn.activation(net4, 'prelu')

        with tf.variable_scope("level5"):
            net5 = tflearn.layers.conv.conv_3d_transpose(net4,
                                                         64,
                                                         2, [12] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans4',
                                                         weights_init=init)
            net5 = tflearn.layers.merge_ops.merge([
                net5,
                tf.slice(net3, [0] + [(16 - 12) // 2] * 3 + [0],
                         [-1] + [12] * 3 + [64])
            ],
                                                  'elemwise_sum',
                                                  name='merge4')
            net5 = tflearn.layers.conv_3d(net5,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv5-1',
                                          weights_init=init)
            net5 = tflearn.activation(net5, 'prelu')

        with tf.variable_scope("level6"):
            net6 = tflearn.layers.conv.conv_3d_transpose(net5,
                                                         32,
                                                         2, [20] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans5',
                                                         weights_init=init)
            net6 = tflearn.layers.merge_ops.merge([
                net6,
                tf.slice(net2, [0] + [(36 - 20) // 2] * 3 + [0],
                         [-1] + [20] * 3 + [32]),
                tf.slice(net2_2, [0] + [(36 - 20) // 2] * 3 + [0],
                         [-1] + [20] * 3 + [32])
            ],
                                                  'elemwise_sum',
                                                  name='merge5')
            net6 = tflearn.layers.conv_3d(net6,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv6-1',
                                          weights_init=init)
            net6 = tflearn.activation(net6, 'prelu')

        with tf.variable_scope("level7"):
            net7 = tflearn.layers.conv.conv_3d_transpose(net6,
                                                         16,
                                                         2, [36] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans6',
                                                         weights_init=init)
            net7 = tflearn.layers.merge_ops.merge([
                net7,
                tf.slice(net1, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16]),
                tf.slice(net1_2, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16]),
                tf.slice(net1_3, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [16])
            ],
                                                  'elemwise_sum',
                                                  name='merge6')
            net7 = tflearn.layers.conv_3d(net7,
                                          16,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv7-1',
                                          weights_init=init)
            net7 = tflearn.activation(net7, 'prelu')

        with tf.variable_scope("out"):
            net_fc1 = tflearn.layers.conv_3d(net7,
                                             16,
                                             1,
                                             activation='linear',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='fc1',
                                             weights_init=init)
            net_fc1 = tflearn.activation(net_fc1, 'prelu')
            net_out = tflearn.layers.conv_3d(net_fc1,
                                             2,
                                             1,
                                             activation='linear',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='output',
                                             weights_init=init)

        return net_out, net1
Esempio n. 30
0
    def build_net(self,
                  X,
                  reuse=False,
                  segment_size_in=segment_size_in,
                  feats=None):
        # Using TFLearn wrappers for network building
        init = tflearn.initializations.variance_scaling()
        with tf.variable_scope("level1"):
            net1 = tflearn.layers.conv_3d(X,
                                          16,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv1-1',
                                          weights_init=init)
            net1 = tflearn.activation(net1, 'prelu')

        with tf.variable_scope("level2"):
            net2_in = tflearn.layers.conv_3d(net1,
                                             32,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds1',
                                             weights_init=init)
            net2 = tflearn.layers.conv_3d(net2_in,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv2-1',
                                          weights_init=init)
            net2 = tflearn.layers.merge_ops.merge([net2, net2_in],
                                                  'elemwise_sum',
                                                  name='merge2')
            net2 = tflearn.activation(net2, 'prelu')

        with tf.variable_scope("level3"):
            net3_in = tflearn.layers.conv_3d(net2,
                                             64,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds2',
                                             weights_init=init)
            net3 = tflearn.layers.conv_3d(net3_in,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-1',
                                          weights_init=init)
            net3 = tflearn.layers.merge_ops.merge([net3, net3_in],
                                                  'elemwise_sum',
                                                  name='merge3')
            net3 = tflearn.activation(net3, 'prelu')

        with tf.variable_scope("level4"):
            net4_in = tflearn.layers.conv_3d(net3,
                                             128,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds3',
                                             weights_init=init)
            net4 = tflearn.layers.conv_3d(net4_in,
                                          128,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-1',
                                          weights_init=init)
            net4 = tflearn.layers.merge_ops.merge([net4, net4_in],
                                                  'elemwise_sum',
                                                  name='merge4')
            net4 = tflearn.activation(net4, 'prelu')

        with tf.variable_scope("level5"):
            net5_in = tflearn.layers.conv_3d(net4,
                                             256,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds4',
                                             weights_init=init)
            net5 = tflearn.layers.conv_3d(net5_in,
                                          256,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv5-1',
                                          weights_init=init)
            net5 = tflearn.layers.merge_ops.merge([net5, net5_in],
                                                  'elemwise_sum',
                                                  name='merge5')
            net5 = tflearn.activation(net5, 'prelu')

        with tf.variable_scope("level6"):
            net6 = tflearn.layers.conv.conv_3d_transpose(net5,
                                                         128,
                                                         2, (segment_size_in //
                                                             8).tolist(),
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans1',
                                                         weights_init=init)
            net6_in = tf.concat(
                [net6, net4], axis=4)  # =tf.slice(net11,begin,tf.shape(net15))
            net6 = tflearn.layers.conv_3d(net6_in,
                                          256,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv6-1',
                                          weights_init=init)
            net6 = tflearn.layers.merge_ops.merge([net6, net6_in],
                                                  'elemwise_sum',
                                                  name='merge6')
            net6 = tflearn.activation(net6, 'prelu')

        with tf.variable_scope("level7"):
            net7 = tflearn.layers.conv.conv_3d_transpose(net6,
                                                         64,
                                                         2, (segment_size_in //
                                                             4).tolist(),
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans2',
                                                         weights_init=init)
            net7_in = tf.concat(
                [net7, net3], axis=4)  # tf.slice(net8, begin, tf.shape(net18))
            net7 = tflearn.layers.conv_3d(net7_in,
                                          128,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv7-1',
                                          weights_init=init)
            net7 = tflearn.layers.merge_ops.merge([net7, net7_in],
                                                  'elemwise_sum',
                                                  name='merge7')
            net7 = tflearn.activation(net7, 'prelu')

        with tf.variable_scope("level8"):
            net8 = tflearn.layers.conv.conv_3d_transpose(net7,
                                                         32,
                                                         2, (segment_size_in //
                                                             2).tolist(),
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='same',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans3',
                                                         weights_init=init)
            net8_in = tf.concat(
                [net8, net2], axis=4)  # tf.slice(net5, begin, tf.shape(net18))
            net8 = tflearn.layers.conv_3d(net8_in,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv8-1',
                                          weights_init=init)
            net8 = tflearn.layers.merge_ops.merge([net8, net8_in],
                                                  'elemwise_sum',
                                                  name='merge8')
            net8 = tflearn.activation(net8, 'prelu')

        with tf.variable_scope("level9"):
            net9 = tflearn.layers.conv.conv_3d_transpose(
                net8,
                16,
                2,
                segment_size_in.tolist(),
                strides=2,
                activation='prelu',
                padding='same',
                regularizer='L2',
                reuse=reuse,
                scope='trans4',
                weights_init=init)
            net9_in = tf.concat(
                [net9, net1], axis=4)  # tf.slice(net2, begin, tf.shape(net25))
            net9 = tflearn.layers.conv_3d(net9_in,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='same',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv9-1',
                                          weights_init=init)
            net9 = tflearn.layers.merge_ops.merge([net9, net9_in],
                                                  'elemwise_sum',
                                                  name='merge9')
            net9 = tflearn.activation(net9, 'prelu')

        with tf.variable_scope("out"):
            net_fc1 = tflearn.layers.conv_3d(net9,
                                             32,
                                             1,
                                             activation='linear',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='fc1',
                                             weights_init=init)
            net_fc1 = tflearn.activation(net_fc1, 'prelu')
            net_fc1 = tflearn.layers.core.dropout(net_fc1, 0.5)
            net_out = tflearn.layers.conv_3d(net_fc1,
                                             1,
                                             1,
                                             activation='linear',
                                             padding='same',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='output',
                                             weights_init=init)

        return net_out, net_fc1
if __name__ == "__main__":
	parser = argparse.ArgumentParser()
	parser.add_argument('--t', action='store', dest='test_path', type=str, help='Test Data Path')
	config = parser.parse_args()

	#Load Test data
	image_count = (3,6)
	patch_count = 20
	X = generate_patches(img2numpy_arr(config.test_path), image_count, patch_count)

	# Building Residual Network
	net = tl.input_data(shape=[None, 42, 42, 3])
	net = tl.conv_2d(net, 32, 3)
	net = tl.batch_normalization(net)
	net = tl.activation(net, 'relu')
	net = tl.shallow_residual_block(net, 4, 32, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 32, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 4, 64, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 64, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 5, 64, regularizer='L2')
	net = tl.global_avg_pool(net)
	
	# Regression
	net = tl.fully_connected(net, 9, activation='softmax')
	mom = tl.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
	net = tl.regression(net, optimizer=mom,
									 loss='categorical_crossentropy')
	# Training
Esempio n. 32
0
# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data()
# Data pre-processing
X, mean = du.featurewise_zero_center(X)
X, std = du.featurewise_std_normalization(X)
testX = du.featurewise_zero_center(testX, mean)
testX = du.featurewise_std_normalization(testX, std)
Y = du.to_categorical(Y, 10)
testY = du.to_categorical(testY, 10)

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3])
net = tflearn.conv_2d(net, 32, 3)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.shallow_residual_block(net, 4, 32, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 32, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 4, 64, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 64, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 5, 128, regularizer='L2')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
Esempio n. 33
0
    def build_net(self,
                  X,
                  reuse=False,
                  segment_size_in=segment_size_in,
                  feats=None):
        init = tflearn.initializations.xavier()
        with tf.variable_scope("level1"):
            net1 = tflearn.layers.conv_3d(X,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv1-1',
                                          weights_init=init)
            net1 = tflearn.activation(net1, 'prelu')

        with tf.variable_scope("level2"):
            net2_in = tflearn.layers.conv_3d(net1,
                                             64,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds1',
                                             weights_init=init)
            net2 = tflearn.layers.conv_3d(net2_in,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv2-1',
                                          weights_init=init)
            net2 = tflearn.activation(net2, 'prelu')

        with tf.variable_scope("level3"):
            net3_in = tflearn.layers.conv_3d(net2,
                                             128,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds2',
                                             weights_init=init)
            net3 = tflearn.layers.conv_3d(net3_in,
                                          128,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv3-1',
                                          weights_init=init)
            net3 = tflearn.activation(net3, 'prelu')

        with tf.variable_scope("level4"):
            net4_in = tflearn.layers.conv_3d(net3,
                                             256,
                                             2,
                                             strides=2,
                                             activation='prelu',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='ds3',
                                             weights_init=init)
            net4 = tflearn.layers.conv_3d(net4_in,
                                          256,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv4-1',
                                          weights_init=init)
            net4 = tflearn.activation(net4, 'prelu')

        with tf.variable_scope("level5"):
            net5 = tflearn.layers.conv.conv_3d_transpose(net4,
                                                         128,
                                                         2, [12] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='valid',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans2',
                                                         weights_init=init)
            net5 = tflearn.layers.merge_ops.merge([
                net5,
                tf.slice(net3, [0] + [(16 - 12) // 2] * 3 + [0],
                         [-1] + [12] * 3 + [128])
            ],
                                                  'elemwise_sum',
                                                  name='merge5')
            net5 = tflearn.layers.conv_3d(net5,
                                          128,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv5-1',
                                          weights_init=init)
            net5 = tflearn.activation(net5, 'prelu')

        with tf.variable_scope("level6"):
            net6 = tflearn.layers.conv.conv_3d_transpose(net5,
                                                         64,
                                                         2, [20] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='valid',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans3',
                                                         weights_init=init)
            net6 = tflearn.layers.merge_ops.merge([
                net6,
                tf.slice(net2, [0] + [(36 - 20) // 2] * 3 + [0],
                         [-1] + [20] * 3 + [64])
            ],
                                                  'elemwise_sum',
                                                  name='merge6')
            net6 = tflearn.layers.conv_3d(net6,
                                          64,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv6-1',
                                          weights_init=init)
            net6 = tflearn.activation(net6, 'prelu')

        with tf.variable_scope("level7"):
            net7 = tflearn.layers.conv.conv_3d_transpose(net6,
                                                         32,
                                                         2, [36] * 3,
                                                         strides=2,
                                                         activation='prelu',
                                                         padding='valid',
                                                         regularizer='L2',
                                                         reuse=reuse,
                                                         scope='trans4',
                                                         weights_init=init)
            net7 = tflearn.layers.merge_ops.merge([
                net7,
                tf.slice(net1, [0] + [(74 - 36) // 2] * 3 + [0],
                         [-1] + [36] * 3 + [32])
            ],
                                                  'elemwise_sum',
                                                  name='merge9')
            net7 = tflearn.layers.conv_3d(net7,
                                          32,
                                          3,
                                          activation='linear',
                                          padding='valid',
                                          regularizer='L2',
                                          reuse=reuse,
                                          scope='conv9-1',
                                          weights_init=init)
            net7 = tflearn.activation(net7, 'prelu')

        with tf.variable_scope("out"):
            net_fc1 = tflearn.layers.conv_3d(net7,
                                             32,
                                             1,
                                             activation='linear',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='fc1',
                                             weights_init=init)
            # net_fc1 = tflearn.layers.core.dropout(net_fc1, 0.5)
            net_fc1 = tflearn.activation(net_fc1, 'prelu')
            net_out = tflearn.layers.conv_3d(net_fc1,
                                             2,
                                             1,
                                             activation='linear',
                                             padding='valid',
                                             regularizer='L2',
                                             reuse=reuse,
                                             scope='output',
                                             weights_init=init)

        return net_out, net_fc1
Esempio n. 34
0
def shallow_residual_block(incoming, nb_blocks, out_channels,
                           downsample=False, downsample_strides=2,
                           activation='relu', batch_norm=True, bias=False,
                           weights_init='uniform_scaling', bias_init='zeros',
                           regularizer=None, weight_decay=0.0001,
                           trainable=True, restore=True,
                           name="ShallowResidualBlock"):
    """ Shallow Residual Block.

    A shallow residual block as described in MSRA's Deep Residual Network
    paper.

    Notice: Because TensorFlow doesn't support a strides > filter size,
    an average pooling is used as a fix, but decrease performances.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        out_channels: `int`. The number of convolutional filters of the
            convolution layers.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'ShallowBottleneck'.

    References:
        Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
        Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        [http://arxiv.org/pdf/1512.03385v1.pdf]
        (http://arxiv.org/pdf/1512.03385v1.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):
            with tf.name_scope('ResidualBlock'):

                identity = resnet

                if downsample:
                    resnet = conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)
                else:
                    resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                     'linear', bias, weights_init,
                                     bias_init, regularizer, weight_decay,
                                     trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)
                resnet = tflearn.activation(resnet, activation)

                resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                 'linear', bias, weights_init,
                                 bias_init, regularizer, weight_decay,
                                 trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                # TensorFlow can't accept kernel size < strides, so using a
                # average pooling or resizing for downsampling.

                # Downsampling
                if downsample:
                    #identity = avg_pool_2d(identity, downsample_strides,
                    #                       downsample_strides)
                    size = resnet.get_shape().as_list()
                    identity = tf.image.resize_nearest_neighbor(identity,
                                                                [size[1],
                                                                 size[2]])

                # Projection to new dimension
                if in_channels != out_channels:
                    in_channels = out_channels
                    identity = conv_2d(identity, out_channels, 1, 1, 'same',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
Esempio n. 35
0
def residual_bottleneck(incoming, nb_blocks, bottleneck_size, out_channels,
                        downsample=False, downsample_strides=2,
                        activation='relu', batch_norm=True, bias=True,
                        weights_init='variance_scaling', bias_init='zeros',
                        regularizer='L2', weight_decay=0.0001,
                        trainable=True, restore=True, name="ResidualBottleneck"):
    """ Residual Bottleneck.

    A residual bottleneck block as described in MSRA's Deep Residual Network
    paper. Full pre-activation architecture is used here.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        out_channels: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    References:
        - Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.
        - Identity Mappings in Deep Residual Networks. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        - [http://arxiv.org/pdf/1512.03385v1.pdf]
            (http://arxiv.org/pdf/1512.03385v1.pdf)
        - [Identity Mappings in Deep Residual Networks]
            (https://arxiv.org/pdf/1603.05027v2.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 1,
                             downsample_strides, 'valid',
                             'linear', bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 3, 1, 'same',
                             'linear', bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            resnet = conv_2d(resnet, out_channels, 1, 1, 'valid',
                             activation, bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_2d(identity, 1,
                                               downsample_strides)

            # Projection to new dimension
            if in_channels != out_channels:
                ch = (out_channels - in_channels)//2
                identity = tf.pad(identity,
                                  [[0, 0], [0, 0], [0, 0], [ch, ch]])
                in_channels = out_channels

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
Esempio n. 36
0
def get_model():
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/' + run_id)

    all_names = tflearn.variables.get_all_variables()
    print(all_names[0])
    ttt = model.get_weights(all_names[0])
    print(type(ttt))
    print(ttt)

    # tflearn.variables.get_value(all_names[0], xxx)

    return all_names
Esempio n. 37
0
                                nb_blocks=1,
                                out_channels=32,
                                cardinality=32,
                                downsample=True)  # 残差虚线

network = tflearn.resnext_block(network,
                                nb_blocks=n,
                                out_channels=64,
                                cardinality=32)
network = tflearn.resnext_block(network,
                                nb_blocks=1,
                                out_channels=32,
                                cardinality=32,
                                downsample=True)  # 残差虚线

network = tflearn.batch_normalization(network)  # 标准化
network = tflearn.activation(network, 'relu')  # 镶嵌激活函数
network = tflearn.global_avg_pool(network)  # 镶嵌平均池化
# 全连接(输出)
network = tflearn.fully_connected(network, 10, activation='softmax')
# 设置优化器的参数
# 动态学习率,初始值为0.1,每decay_step时乘以lr_decay
opt = tflearn.Momentum(learning_rate=0.1, lr_decay=0.1, decay_step=32000)
network = tflearn.regression(network, optimizer=opt)

# 训练
model = tflearn.DNN(network,
                    checkpoint_path='./model/resnet/model_resnet',
                    tensorboard_dir='./logs')
model.fit(X, Y, n_epoch=200, validation_set=(X_test, Y_test), batch_size=128)
def densenet_block(incoming, nb_layers, growth, bottleneck=True,
                   downsample=True, downsample_strides=2, activation='relu',
                   batch_norm=True, dropout=False, dropout_keep_prob=0.5,
                   weights_init='variance_scaling', regularizer='L2',
                   weight_decay=0.0001, bias=True, bias_init='zeros',
                   trainable=True, restore=True, reuse=False, scope=None,
                   name="DenseNetBlock"):
    densenet = incoming

    with tf.variable_scope(scope, default_name=name, values=[incoming],
                           reuse=reuse) as scope:

        for i in range(nb_layers):

            # Identity
            conn = densenet

            # 1x1 Conv layer of the bottleneck block
            if bottleneck:
                if batch_norm:
                    densenet = tflearn.batch_normalization(densenet)
                densenet = tflearn.activation(densenet, activation)
                densenet = conv_2d(densenet, nb_filter=growth,
                                   filter_size=1,
                                   bias=bias,
                                   weights_init=weights_init,
                                   bias_init=bias_init,
                                   regularizer=regularizer,
                                   weight_decay=weight_decay,
                                   trainable=trainable,
                                   restore=restore)

            # 3x3 Conv layer
            if batch_norm:
                densenet = tflearn.batch_normalization(densenet)
            densenet = tflearn.activation(densenet, activation)
            densenet = conv_2d(densenet, nb_filter=growth,
                               filter_size=3,
                               bias=bias,
                               weights_init=weights_init,
                               bias_init=bias_init,
                               regularizer=regularizer,
                               weight_decay=weight_decay,
                               trainable=trainable,
                               restore=restore)

            # Connections
            densenet = tf.concat([densenet, conn], 3)

        # 1x1 Transition Conv
        if batch_norm:
            densenet = tflearn.batch_normalization(densenet)
        densenet = tflearn.activation(densenet, activation)
        densenet = conv_2d(densenet, nb_filter=growth,
                           filter_size=1,
                           bias=bias,
                           weights_init=weights_init,
                           bias_init=bias_init,
                           regularizer=regularizer,
                           weight_decay=weight_decay,
                           trainable=trainable,
                           restore=restore)
        if dropout:
            densenet = tflearn.dropout(densenet, keep_prob=dropout_keep_prob)

        # Downsampling
        if downsample:
            densenet = tflearn.avg_pool_2d(densenet, kernel_size=2,
                                           strides=downsample_strides)

    return densenet
Esempio n. 39
0
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 1],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)

net = tflearn.fully_connected(net, 2)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'softmax')

# Regression
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=tensorboardVerbose, tensorboard_dir=tensorboardDir,
  checkpoint_path=checkpointPath)

model.fit(X, Y, n_epoch=nEpoch, shuffle=True, validation_set=(X_test, Y_test),
Esempio n. 40
0
    def densenet_block(self,incoming, nb_layers, growth, bottleneck=True,
                       downsample=True, downsample_strides=2, activation='relu',
                       batch_norm=True, dropout=False, dropout_keep_prob=0.5,
                       weights_init='variance_scaling', regularizer='L2',
                       weight_decay=0.0001, bias=True, bias_init='zeros',
                       trainable=True, restore=True, reuse=False, scope=None,
                       name="DenseNetBlock"):
        """ DenseNet Block.
        A DenseNet block as described in DenseNet paper.
        Input:
            4-D Tensor [batch, height, width, in_channels].
        Output:
            4-D Tensor [batch, new height, new width, out_channels].
        Arguments:
            incoming: `Tensor`. Incoming 4-D Layer.
            nb_blocks: `int`. Number of layer blocks.
            growth: `int`. DenseNet 'growth': The number of convolutional
                filters of each convolution.
            bottleneck: `bool`. If True, add a 1x1 convolution before the 3x3
                convolution to reduce the number of input features map.
            downsample: `bool`. If True, apply downsampling using
                'downsample_strides' for strides.
            downsample_strides: `int`. The strides to use when downsampling.
            activation: `str` (name) or `function` (returning a `Tensor`).
                Activation applied to this layer (see tflearn.activations).
                Default: 'linear'.
            batch_norm: `bool`. If True, apply batch normalization.
            dropout: `bool`. If True, apply dropout. Use 'dropout_keep_prob' to
                specify the keep probability.
            dropout_keep_prob: `float`. Keep probability parameter for dropout.
            bias: `bool`. If True, a bias is used.
            weights_init: `str` (name) or `Tensor`. Weights initialization.
                (see tflearn.initializations) Default: 'uniform_scaling'.
            bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
                (see tflearn.initializations) Default: 'zeros'.
            regularizer: `str` (name) or `Tensor`. Add a regularizer to this
                layer weights (see tflearn.regularizers). Default: None.
            weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
            trainable: `bool`. If True, weights will be trainable.
            restore: `bool`. If True, this layer weights will be restored when
                loading a model.
            reuse: `bool`. If True and 'scope' is provided, this layer variables
                will be reused (shared).
            scope: `str`. Define this layer scope (optional). A scope can be
                used to share variables between layers. Note that scope will
                override name.
            name: A name for this layer (optional). Default: 'ResNeXtBlock'.
        References:
            Densely Connected Convolutional Networks, G. Huang, Z. Liu,
            K. Q. Weinberger, L. van der Maaten. 2016.
        Links:
            [https://arxiv.org/abs/1608.06993]
            (https://arxiv.org/abs/1608.06993)
        """
        densenet = incoming

        with tf.variable_scope(scope, default_name=name, values=[incoming],
                               reuse=reuse) as scope:

            for i in range(nb_layers):

                # Identity
                conn = densenet

                # 1x1 Conv layer of the bottleneck block
                if bottleneck:
                    if batch_norm:
                        densenet = tflearn.batch_normalization(densenet)
                    densenet = tflearn.activation(densenet, activation)
                    densenet = conv_2d(densenet, nb_filter=growth,
                                       filter_size=1,
                                       bias=bias,
                                       weights_init=weights_init,
                                       bias_init=bias_init,
                                       regularizer=regularizer,
                                       weight_decay=weight_decay,
                                       trainable=trainable,
                                       restore=restore)

                # 3x3 Conv layer
                if batch_norm:
                    densenet = tflearn.batch_normalization(densenet)
                densenet = tflearn.activation(densenet, activation)
                densenet = conv_2d(densenet, nb_filter=growth,
                                   filter_size=3,
                                   bias=bias,
                                   weights_init=weights_init,
                                   bias_init=bias_init,
                                   regularizer=regularizer,
                                   weight_decay=weight_decay,
                                   trainable=trainable,
                                   restore=restore)

                # Connections
                densenet = tf.concat([densenet, conn], 3)

            # 1x1 Transition Conv
            if batch_norm:
                densenet = tflearn.batch_normalization(densenet)
            densenet = tflearn.activation(densenet, activation)
            densenet = conv_2d(densenet, nb_filter=growth,
                               filter_size=1,
                               bias=bias,
                               weights_init=weights_init,
                               bias_init=bias_init,
                               regularizer=regularizer,
                               weight_decay=weight_decay,
                               trainable=trainable,
                               restore=restore)
            if dropout:
                densenet = tflearn.dropout(densenet, keep_prob=dropout_keep_prob)

            # Downsampling
            if downsample:
                densenet = tflearn.avg_pool_2d(densenet, kernel_size=2,
                                               strides=downsample_strides)

        return densenet
Esempio n. 41
0
def deep_bottleneck(incoming, nb_layers, nb_filter, bottleneck_size,
                    activation='relu', batch_norm=True, bias=False,
                    weights_init='uniform_scaling', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, name="DeepBottleneck"):
    """ Deep Bottleneck.

    As described in MSRA's Deep Residual Network paper.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_layers: `int`. Number of layer blocks.
        nb_filter: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    """
    resnet = incoming
    # Build bottleneck block layers, this layer doesn't need
    # `build inference` as other layers, because it only uses
    # pre-existing layers, and doesn't define any new ops.
    with tf.name_scope(name):
        for i in range(nb_layers):
            with tf.name_scope('ResidualLayer'):
                with tf.name_scope("in"):
                    residual = conv_2d(resnet, bottleneck_size, 1, 1, 'valid',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("bottleneck"):
                    residual = conv_2d(residual, bottleneck_size, 3, 1, 'same',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("out"):
                    residual = conv_2d(residual, nb_filter, 1, 1, 'valid',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)

                resnet = resnet + residual

    return resnet
Esempio n. 42
0
X, mean = du.featurewise_zero_center(X)
testX = du.featurewise_zero_center(testX, mean)

# Building convolutional network_A
netData = input_data(shape=[None, 28, 28, 1], name='input')

network = tflearn.conv_2d(netData, 64, 3, activation='relu', bias=False)
network = tflearn.residual_bottleneck(network, 3, 16, 64)
network = tflearn.residual_bottleneck(network, 1, 32, 128, downsample=True)
network = tflearn.residual_bottleneck(network, 2, 32, 128)
network = tflearn.residual_bottleneck(network, 1, 64, 256, downsample=True)
network = tflearn.residual_bottleneck(network, 2, 64, 256)

network = tflearn.batch_normalization(network)

network = tflearn.activation(network, 'relu')


#network = fully_connected(network, 10, activation='softmax')

#network = regression(network, optimizer='adam', learning_rate=0.01,
#                     loss='categorical_crossentropy', name='target')

# Building Residual Network_B
net = tflearn.conv_2d(netData, 64, 3, activation='relu', bias=False)
net = tflearn.residual_bottleneck(net, 3, 16, 64)
net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=True)
net = tflearn.residual_bottleneck(net, 2, 32, 128)
net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=True)
net = tflearn.residual_bottleneck(net, 2, 64, 256)
Esempio n. 43
0
def shallow_residual_block(incoming, nb_blocks, out_channels,
                           downsample=False, downsample_strides=2,
                           activation='relu', batch_norm=True, bias=False,
                           weights_init='uniform_scaling', bias_init='zeros',
                           regularizer=None, weight_decay=0.0001,
                           trainable=True, restore=True,
                           name="ShallowResidualBlock"):
    """ Shallow Residual Block.

    A shallow residual block as described in MSRA's Deep Residual Network
    paper.

    Notice: Because TensorFlow doesn't support a strides > filter size,
    an average pooling is used as a fix, but decrease performances.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        out_channels: `int`. The number of convolutional filters of the
            convolution layers.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'ShallowBottleneck'.

    References:
        Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
        Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        [http://arxiv.org/pdf/1512.03385v1.pdf]
        (http://arxiv.org/pdf/1512.03385v1.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):
            with tf.name_scope('ResidualBlock'):

                identity = resnet

                if downsample:
                    resnet = conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)
                else:
                    resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                     'linear', bias, weights_init,
                                     bias_init, regularizer, weight_decay,
                                     trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)
                resnet = tflearn.activation(resnet, activation)

                resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                 'linear', bias, weights_init,
                                 bias_init, regularizer, weight_decay,
                                 trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                # TensorFlow can't accept kernel size < strides, so using a
                # average pooling or resizing for downsampling.

                # Downsampling
                if downsample:
                    #identity = avg_pool_2d(identity, downsample_strides,
                    #                       downsample_strides)
                    size = resnet.get_shape().as_list()
                    identity = tf.image.resize_nearest_neighbor(identity,
                                                                [size[1],
                                                                 size[2]])

                # Projection to new dimension
                if in_channels != out_channels:
                    in_channels = out_channels
                    identity = conv_2d(identity, out_channels, 1, 1, 'same',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
Esempio n. 44
0
def deep_residual_block(
    incoming,
    nb_blocks,
    bottleneck_size,
    out_channels,
    downsample=False,
    downsample_strides=2,
    activation="relu",
    batch_norm=True,
    bias=False,
    weights_init="uniform_scaling",
    bias_init="zeros",
    regularizer=None,
    weight_decay=0.001,
    trainable=True,
    restore=True,
    name="DeepResidualBlock",
):
    """ Deep Residual Block.

    A deep residual block as described in MSRA's Deep Residual Network paper.

    Notice: Because TensorFlow doesn't support a strides > filter size,
    an average pooling is used as a fix, but decrease performances.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        out_channels: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        downsample:
        downsample_strides:
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    References:
        Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
        Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        [http://arxiv.org/pdf/1512.03385v1.pdf]
        (http://arxiv.org/pdf/1512.03385v1.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):
            with tf.name_scope("ResidualBlock"):

                identity = resnet

                if downsample:
                    # Use average pooling, because TensorFlow conv_2d can't
                    # accept kernel size < strides.
                    resnet = avg_pool_2d(resnet, downsample_strides, downsample_strides)
                    resnet = conv_2d(
                        resnet,
                        bottleneck_size,
                        1,
                        1,
                        "valid",
                        activation,
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )
                else:
                    resnet = conv_2d(
                        resnet,
                        bottleneck_size,
                        1,
                        1,
                        "valid",
                        activation,
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                resnet = conv_2d(
                    resnet,
                    bottleneck_size,
                    3,
                    1,
                    "same",
                    activation,
                    bias,
                    weights_init,
                    bias_init,
                    regularizer,
                    weight_decay,
                    trainable,
                    restore,
                )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                resnet = conv_2d(
                    resnet,
                    out_channels,
                    1,
                    1,
                    "valid",
                    activation,
                    bias,
                    weights_init,
                    bias_init,
                    regularizer,
                    weight_decay,
                    trainable,
                    restore,
                )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                if downsample:
                    # Use average pooling, because TensorFlow conv_2d can't
                    # accept kernel size < strides.
                    identity = avg_pool_2d(identity, downsample_strides, downsample_strides)

                # Projection to new dimension
                if in_channels != out_channels:
                    in_channels = out_channels
                    identity = conv_2d(
                        identity,
                        out_channels,
                        1,
                        1,
                        "valid",
                        "linear",
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet