示例#1
0
def flatten(x):
    """ Wrapper to convert mutli dimensional tensor into a 
		one-dimension tensor. """
    layer_shape = x.get_shape()
    nfeatures = layer_shape[1:].num_elements()
    watch_msg("Layer flatted to {} parameters".format(nfeatures))
    return tf.reshape(x, [-1, nfeatures])
示例#2
0
def inceptionC(name, x):
    """ Schema of the inception C module for Inception V4 """
    with tf.variable_scope(name):
        watch_msg("Module {}".format(name))
        pool1 = avgpool('{}_avgpool'.format(name), x=x, fsize=3, stride=1)
        conv1 = conv('{}_conv1_1x1'.format(name),
                     x=pool1,
                     fsize=[1, 1],
                     nfilters=256)

        conv2 = conv('{}_conv2_1x1'.format(name),
                     x=x,
                     fsize=[1, 1],
                     nfilters=256)

        conv3_1 = conv('{}_conv3_1_1x1'.format(name),
                       x=x,
                       fsize=[1, 1],
                       nfilters=384)
        conv3_2_1 = conv('{}_conv3_2_1_1x3'.format(name),
                         x=conv3_1,
                         fsize=[1, 3],
                         nfilters=256)
        conv3_2_2 = conv('{}_conv3_2_2_3x1'.format(name),
                         x=conv3_1,
                         fsize=[3, 1],
                         nfilters=256)

        conv4_1 = conv('{}_conv4_1_1x1'.format(name),
                       x=x,
                       fsize=[1, 1],
                       nfilters=384)
        conv4_2 = conv('{}_conv4_2_1x3'.format(name),
                       x=conv4_1,
                       fsize=[1, 3],
                       nfilters=448)
        conv4_3 = conv('{}_conv4_3_3x1'.format(name),
                       x=conv4_2,
                       fsize=[3, 1],
                       nfilters=512)
        conv4_4_1 = conv('{}_conv4_4_1_1x3'.format(name),
                         x=conv4_3,
                         fsize=[1, 3],
                         nfilters=256)
        conv4_4_2 = conv('{}_conv4_4_2_3x1'.format(name),
                         x=conv4_3,
                         fsize=[3, 1],
                         nfilters=256)

        concat = tf.concat(
            [conv1, conv2, conv3_2_1, conv3_2_2, conv4_4_1, conv4_4_2],
            axis=3,
            name='{}_concat'.format(name))
        watch_msg("Layer {} has shape {}".format(name, concat.shape))
        return concat
示例#3
0
def avgpool(name, x, fsize=3, stride=2, padding='SAME'):
    """ Wrapper for Average Pooling layer. """
    with tf.variable_scope(name):
        layer = tf.nn.avg_pool(value=x,
                               ksize=[1, fsize, fsize, 1],
                               strides=[1, stride, stride, 1],
                               padding=padding)

        variable_summary(layer)
        watch_msg("Layer {} has shape {}".format(name, layer.shape))
        return layer
示例#4
0
def reductionB(name, x):
    """ Schema for the reduction B module for the Inception v4 """
    with tf.variable_scope(name):
        watch_msg("Module {}".format(name))
        pool1 = maxpool('{}_maxpool'.format(name),
                        x=x,
                        fsize=3,
                        stride=2,
                        padding='VALID')

        conv2_1 = conv('{}_conv2_1_1x1'.format(name),
                       x=x,
                       fsize=[1, 1],
                       nfilters=192)
        conv2_2 = conv('{}_conv2_2_3x3'.format(name),
                       x=conv2_1,
                       fsize=[3, 3],
                       nfilters=192,
                       stride=2,
                       padding='VALID')

        conv3_1 = conv('{}_conv3_1_1x1'.format(name),
                       x=x,
                       fsize=[1, 1],
                       nfilters=256)
        conv3_2 = conv('{}_conv3_2_1x7'.format(name),
                       x=conv3_1,
                       fsize=[1, 7],
                       nfilters=256)
        conv3_3 = conv('{}_conv3_3_7x1'.format(name),
                       x=conv3_2,
                       fsize=[7, 1],
                       nfilters=320)
        conv3_4 = conv('{}_conv3_4_3x3'.format(name),
                       x=conv3_3,
                       fsize=[3, 3],
                       nfilters=320,
                       stride=2,
                       padding='VALID')

        concat = tf.concat([pool1, conv2_2, conv3_4],
                           axis=3,
                           name='{}_concat'.format(name))
        watch_msg("Layer {} has shape {}".format(name, concat.shape))
        return concat
示例#5
0
    def load_weights(self, sess):
        """ Loads weights from specified layers (cfg) from a pre-trained network. """
        weights = np.load(self.cfg.net_dict["weights"],
                          encoding='bytes').item()
        for layer in weights:
            # Skip layers that will be trained
            if layer in self.cfg.net_dict["train_layers"]:
                continue

            # Load pre-trained weights on layers that won't be trained
            with tf.compat.v1.variable_scope(layer, reuse=True):
                watch_msg("Loading parameters for layer {}".format(layer))
                for data in weights[layer]:
                    if len(data.shape) == 1:
                        var = tf.get_variable('biases', trainable=False)
                    else:
                        var = tf.get_variable('weights', trainable=False)
                    sess.run(var.assign(data))
示例#6
0
    def load_weights(self, sess):
        """ Loads weights from specified layers (cfg) from a pre-trained network. """
        weights = np.load(self.cfg.net_dict["weights"])

        for i, layer_name in enumerate(weights.keys()):
            name_split = layer_name.split('_')  # split w, b from name
            layer = '_'.join(name_split[:-1])

            if layer in self.cfg.net_dict["train_layers"]:
                continue

            with tf.variable_scope(layer, reuse=True):
                watch_msg("Loading parameters for layer {}".format(layer))
                if name_split[-1] == 'W':
                    var = tf.get_variable('weights', trainable=False)
                elif name_split[-1] == 'b':
                    var = tf.get_variable('biases', trainable=False)
                sess.run(var.assign(weights[layer_name]))
示例#7
0
def conv(name,
         x,
         fsize,
         nfilters,
         stride=1,
         groups=1,
         padding='SAME',
         stddev=0.05,
         binit_val=0.05):
    """ Wrapper for convolutional layers """
    ninputs = int(x.get_shape()[-1].value / groups)

    convolve = lambda i, w: tf.nn.conv2d(
        i, w, strides=[1, stride, stride, 1], padding=padding)

    with tf.variable_scope(name) as scope:

        # Create random weights
        w_init = tf.random.truncated_normal(
            shape=[fsize[0], fsize[1], ninputs, nfilters],
            stddev=stddev,
            dtype=tf.float32)
        w = tf.get_variable('weights', initializer=w_init, dtype=tf.float32)

        b_init = tf.constant(binit_val, shape=[nfilters], dtype=tf.float32)
        b = tf.get_variable('biases', initializer=b_init, dtype=tf.float32)

        # Convolution
        if groups == 1:
            layer = convolve(x, w)
        else:
            x_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
            w_groups = tf.split(axis=3, num_or_size_splits=groups, value=w)

            out_groups = [convolve(i, k) for i, k in zip(x_groups, w_groups)]
            layer = tf.concat(axis=3, values=out_groups)

        # layer = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=padding)
        layer += b

        watch_msg("Layer {} has shape {}".format(name, layer.shape))
        return tf.nn.relu(layer, name=scope.name)
示例#8
0
def fc(name, x, noutputs, relu=True, stddev=0.05, binit_val=0.05):
    """ Wrapper for Fully Connected layer. """

    layer_shape = x.get_shape()
    ninputs = layer_shape[1:].num_elements()

    with tf.variable_scope(name) as scope:
        # Create random weights
        w_init = tf.random.truncated_normal(shape=[ninputs, noutputs],
                                            stddev=stddev,
                                            dtype=tf.float32)
        w = tf.get_variable('weights', initializer=w_init, dtype=tf.float32)

        # Initialize bias
        b_init = tf.constant(binit_val, shape=[noutputs], dtype=tf.float32)
        b = tf.get_variable('biases', initializer=b_init, dtype=tf.float32)

        layer = tf.nn.xw_plus_b(x, w, b, name=scope.name)

        if relu:
            layer = tf.nn.relu(layer)
        watch_msg("Layer {} has shape {}".format(name, layer.shape))
        return layer
示例#9
0
def reductionA(name, x, n=384, m=256, l=224, k=192):
    """ Schema for the reduction A module for the Inception v4 """
    with tf.variable_scope(name):
        watch_msg("Module {}".format(name))
        pool1 = maxpool('{}_maxpool'.format(name),
                        x=x,
                        fsize=3,
                        stride=2,
                        padding='VALID')

        conv2 = conv('{}_conv2_3x3'.format(name),
                     x=x,
                     fsize=[3, 3],
                     nfilters=n,
                     stride=2,
                     padding='VALID')

        conv3_1 = conv('{}_conv3_1_1x1'.format(name),
                       x=x,
                       fsize=[1, 1],
                       nfilters=k)
        conv3_2 = conv('{}_conv3_2_3x3'.format(name),
                       x=conv3_1,
                       fsize=[3, 3],
                       nfilters=l)
        conv3_3 = conv('{}_conv3_3_3x3'.format(name),
                       x=conv3_2,
                       fsize=[3, 3],
                       nfilters=m,
                       stride=2,
                       padding='VALID')

        concat = tf.concat([pool1, conv2, conv3_3],
                           axis=3,
                           name='{}_concat'.format(name))
        watch_msg("Layer {} has shape {}".format(name, concat.shape))
        return concat
示例#10
0
def stem(name, x):
    """ Schema of the stem module for InceptionV4 """
    with tf.variable_scope(name):
        watch_msg("Module {}".format(name))
        conv1_1 = conv('{}_conv1_1_3x3'.format(name),
                       x=x,
                       fsize=[3, 3],
                       nfilters=32,
                       stride=2,
                       padding='VALID')
        conv1_2 = conv('{}_conv1_2_3x3'.format(name),
                       x=conv1_1,
                       fsize=[3, 3],
                       nfilters=32,
                       stride=1,
                       padding='VALID')
        conv1_3 = conv('{}_conv1_3_3x3'.format(name),
                       x=conv1_2,
                       fsize=[3, 3],
                       nfilters=64,
                       stride=1)

        pool1_4a = maxpool('{}_mxpool1'.format(name),
                           x=conv1_3,
                           fsize=3,
                           stride=2,
                           padding='VALID')
        conv1_4b = conv('{}_conv1_4_3x3'.format(name),
                        x=conv1_3,
                        fsize=[3, 3],
                        nfilters=96,
                        stride=2,
                        padding='VALID')
        concat1 = tf.concat([pool1_4a, conv1_4b],
                            axis=3,
                            name='{}_concat1'.format(name))
        watch_msg("Filter concatenation {}_1 has shape {}".format(
            name, concat1.shape))

        conv2_1a = conv('{}_conv2_1a_1_1x1'.format(name),
                        x=concat1,
                        fsize=[1, 1],
                        nfilters=64,
                        stride=1)
        conv2_2a = conv('{}_conv2_2a_3x3'.format(name),
                        x=conv2_1a,
                        fsize=[3, 3],
                        nfilters=96,
                        stride=1,
                        padding='VALID')

        conv2_1b = conv('{}_conv2_1b_1x1'.format(name),
                        x=concat1,
                        fsize=[1, 1],
                        nfilters=64,
                        stride=1)
        conv2_2b = conv('{}_conv2_2b_7x1'.format(name),
                        x=conv2_1b,
                        fsize=[7, 1],
                        nfilters=64,
                        stride=1)
        conv2_3b = conv('{}_conv2_3b_1x7'.format(name),
                        x=conv2_2b,
                        fsize=[1, 7],
                        nfilters=64,
                        stride=1)
        conv2_4b = conv('{}_conv2_4b_3x3'.format(name),
                        x=conv2_3b,
                        fsize=[3, 3],
                        nfilters=96,
                        stride=1,
                        padding='VALID')
        concat2 = tf.concat([conv2_2a, conv2_4b],
                            axis=3,
                            name='{}_concat2'.format(name))
        watch_msg("Filter concatenation {}_2 has shape {}".format(
            name, concat2.shape))

        conv3_1a = conv('{}_conv3_1_3x3'.format(name),
                        x=concat2,
                        fsize=[3, 3],
                        nfilters=192,
                        stride=2,
                        padding='VALID')
        pool3_1b = maxpool('{}_mxpool3'.format(name),
                           x=concat2,
                           fsize=3,
                           stride=2,
                           padding='VALID')

        concat3 = tf.concat([conv3_1a, pool3_1b],
                            axis=3,
                            name='{}_concat3'.format(name))
        watch_msg("Filter concatenation {}_3 has shape {}".format(
            name, concat3.shape))
        return concat3