示例#1
0
    def _create_conv(self):
        self._create_input()
        arg_scope = tf.contrib.framework.arg_scope
        with arg_scope([conv], nl=tf.nn.relu,
                       trainable=True, mode=self.mode, graph=self.graph):
            conv1 = conv(self.image, 7, 96, 'conv1')
            mean1, var1 = tf.nn.moments(conv1, [0,1,2])
            conv1_bn = tf.nn.batch_normalization(conv1, mean1, var1, 0, 1, 1e-5)
            pool1 = max_pool(conv1_bn, 'pool1', padding='SAME')

            conv2 = conv(pool1, 5, 256, 'conv2')
            mean2, var2 = tf.nn.moments(conv2, [0,1,2])
            conv2_bn = tf.nn.batch_normalization(conv2, mean2, var2, 0, 1, 1e-5)
            pool2 = max_pool(conv2_bn, 'pool2', padding='SAME')

            conv3 = conv(pool2, 3, 512, 'conv3', stride = 1)

            conv4 = conv(conv3, 3, 512, 'conv4', stride = 1)

            conv5 = conv(conv4, 3, 512, 'conv5', stride = 1)
            pool5 = max_pool(conv5, 'pool5', padding='SAME')

            self.layer['conv1'] = conv1
            self.layer['conv2'] = conv2
            self.layer['conv3'] = conv3
            self.layer['conv4'] = conv4
            self.layer['pool5'] = pool5
            self.layer['conv_out'] = self.layer['conv5'] = conv5

        return pool5
示例#2
0
  def build(self):
    """Create the network graph."""
    # 1st Layer: Conv (w ReLu) -> Lrn -> Pool
    conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
    norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1')
    pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')

    # 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
    conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
    norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2')
    pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')

    # 3rd Layer: Conv (w ReLu)
    conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')

    # 4th Layer: Conv (w ReLu) splitted into two groups
    conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')

    # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
    conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')
    pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')

    # 6th Layer: Flatten -> FC (w ReLu) -> Dropout
    flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])
    fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')

    # 7th Layer: FC (w ReLu) -> Dropout
    fc7 = fc(fc6, 4096, 4096, name='fc7')

    # 8th Layer: FC and return unscaled activations
    self.fc8 = fc(fc7, 4096, self.num_classes, relu=False, name='fc8')
 def encoder(self, x):
     out = conv2d(x, 20, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = conv2d(out, 50, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = tf.layers.flatten(out)
     out = dense(out, 500, activation=tf.nn.relu)
     return out
示例#4
0
 def __build_net(self):
     """
     Introduction
     ------------
         构建ONet模型结构
     """
     with tf.variable_scope('onet'):
         self.input = tf.placeholder(shape=[None, 48, 48, 3],
                                     dtype=tf.float32,
                                     name='input_data')
         layer = conv('conv1',
                      self.input,
                      kernel_size=(3, 3),
                      channels_output=32,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu1', layer)
         layer = max_pool('pool1', layer, kernel_size=(3, 3), stride=(2, 2))
         layer = conv('conv2',
                      layer,
                      kernel_size=(3, 3),
                      channels_output=64,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu2', layer)
         layer = max_pool('pool2',
                          layer,
                          kernel_size=(3, 3),
                          stride=(2, 2),
                          padding='VALID')
         layer = conv('conv3',
                      layer,
                      kernel_size=(3, 3),
                      channels_output=64,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu3', layer)
         layer = max_pool('pool3', layer, kernel_size=(2, 2), stride=(2, 2))
         layer = conv('conv4',
                      layer,
                      kernel_size=(2, 2),
                      channels_output=128,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu4', layer)
         layer = fc('fc1', layer, channels_output=256, relu=False)
         layer = prelu('prelu5', layer)
         fc2 = fc('fc2-1', layer, channels_output=2, relu=False)
         self.prob = tf.nn.softmax(fc2, axis=1, name='prob')
         self.loc = fc('fc2-2', layer, channels_output=4, relu=False)
示例#5
0
    def __init__(self):
        self._epochs = 20
        self._learning_rate = 0.01
        self._batch_size = 20
        self._data = self.getData()
        self._model = layers.Model(lr=self._learning_rate,
                                   blr=self._learning_rate)

        self._model.add_layer(
            layers.conv(ems=1,
                        nodes=20,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=20,
                        nodes=20,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(
            layers.conv(ems=20,
                        nodes=12,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=12,
                        nodes=12,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(
            layers.conv(ems=12,
                        nodes=6,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(
            layers.conv(ems=6,
                        nodes=6,
                        kernel_size=3,
                        padding=1,
                        activation_function_="relu"))
        self._model.add_layer(layers.max_pool(kernel_size=2))

        self._model.add_layer(layers.dense(eis=96, nodes=48, act_func="tanh"))
        self._model.add_layer(layers.dense(eis=48, nodes=3, act_func="none"))
示例#6
0
def logit(h, is_training=True, update_batch_stats=True, stochastic=True, seed=1234, dropout_mask=None, return_mask=False, h_before_dropout=None):
    rng = np.random.RandomState(seed)
    if h_before_dropout is None:
        h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=128, seed=rng.randint(123456), name='c1')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c2')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c3')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

        h = L.max_pool(h, ksize=2, stride=2)
        if stochastic:
            h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden)

        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=256, seed=rng.randint(123456), name='c4')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c5')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c6')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

        h_before_dropout = L.max_pool(h, ksize=2, stride=2)

    # Making it possible to change or return a dropout mask
    if stochastic:
        if dropout_mask is None:
            dropout_mask = tf.cast(
                tf.greater_equal(tf.random_uniform(tf.shape(h_before_dropout), 0, 1, seed=rng.randint(123456)), 1.0 - FLAGS.keep_prob_hidden),
                tf.float32)
        else:
            dropout_mask = tf.reshape(dropout_mask, tf.shape(h_before_dropout))
        h = tf.multiply(h_before_dropout, dropout_mask)
        h = (1.0 / FLAGS.keep_prob_hidden) * h
    else:
        h = h_before_dropout
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=512, seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(L.bn(h, 512, is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=512, f_out=256, seed=rng.randint(123456), name='c8')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=256, f_out=128, seed=rng.randint(123456), name='c9')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling
    h = L.fc(h, 128, 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = L.bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')
    if return_mask:
        return h, tf.reshape(dropout_mask, [-1, 8*8*256]), h_before_dropout
    else:
        return h
示例#7
0
    def build_graph(self):
        self.iterator = tf.data.Iterator.from_structure(
            (tf.float32, tf.int32),
            (tf.TensorShape([None, 224, 224, 3]), tf.TensorShape([None]))
        )
        self.inputs, self.labels = self.iterator.get_next()

        sp, st = [3, 3], [1, 1]
        mp = [2, 2]

        self.conv1_1 = layers.conv(self.inputs, sp, 64, st, name='conv1_1')
        self.conv1_2 = layers.conv(self.conv1_1, sp, 64, st, name='conv1_2')

        pool1 = layers.max_pool(self.conv1_2, mp, mp, name='pool1')

        self.conv2_1 = layers.conv(pool1, sp, 128, st, name='conv2_1')
        self.conv2_2 = layers.conv(self.conv2_1, sp, 128, st, name='conv2_2')

        pool2 = layers.max_pool(self.conv2_2, mp, mp, name='pool2')

        self.conv3_1 = layers.conv(pool2, sp, 256, st, name='conv3_1')
        self.conv3_2 = layers.conv(self.conv3_1, sp, 256, st, name='conv3_2')
        self.conv3_3 = layers.conv(self.conv3_2, sp, 256, st, name='conv3_3')
        self.conv3_4 = layers.conv(self.conv3_3, sp, 256, st, name='conv3_4')

        pool3 = layers.max_pool(self.conv3_3, mp, mp, name='pool3')

        self.conv4_1 = layers.conv(pool3, sp, 512, st, name='conv4_1')
        self.conv4_2 = layers.conv(self.conv4_1, sp, 512, st, name='conv4_2')
        self.conv4_3 = layers.conv(self.conv4_2, sp, 512, st, name='conv4_3')
        self.conv4_4 = layers.conv(self.conv4_3, sp, 512, st, name='conv4_4')

        pool4 = layers.max_pool(self.conv4_3, mp, mp, name='pool4')

        self.conv5_1 = layers.conv(pool4, sp, 512, st, name='conv5_1')
        self.conv5_2 = layers.conv(self.conv5_1, sp, 512, st, name='conv5_2')
        self.conv5_3 = layers.conv(self.conv5_2, sp, 512, st, name='conv5_3')
        self.conv5_4 = layers.conv(self.conv5_3, sp, 512, st, name='conv5_4')

        pool5 = layers.max_pool(self.conv5_3, mp, mp, name='pool5')
        flattened = tf.reshape(pool5, [-1, 25088])

        fc6 = layers.fc(flattened, 4096, name='fc6')
        fc7 = layers.fc(fc6, 4096, name='fc7')

        self.logits = layers.fc(fc7, self.num_classes, relu=False,
                                name='fc8')
        self.probs_op = tf.nn.softmax(self.logits)
        self.pred_op = tf.argmax(input=self.logits, axis=1)
        corrects_op = tf.equal(tf.cast(self.pred_op, tf.int32),
                               self.labels)
        self.acc_op = tf.reduce_mean(tf.cast(corrects_op, tf.float32))
示例#8
0
 def net(self, X, reuse=None):
     with tf.variable_scope('EyeNet', reuse=reuse):
         conv1 = conv2d(X,output_dims=20,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv1')   
         pool1 = max_pool(conv1,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool1')
         conv2 = conv2d(pool1,output_dims=50,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv2')              
         pool2 = max_pool(conv2,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool2') 
         flatten = tf.reshape(pool2,[-1, pool2.get_shape().as_list()[1]
                                         *pool2.get_shape().as_list()[2]
                                         *pool2.get_shape().as_list()[3]], name='conv_reshape')
         fc1 = fc(flatten, output_dims=500, name='fc1')
         relu1 = relu(fc1, name='relu1')
         out = fc(relu1, output_dims=2, name='output')
         return out
示例#9
0
def logit(x, dropout_mask=None, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):

    rng = numpy.random.RandomState(seed)
    
    h = L.gl(x, std=FLAGS.sigma)
    h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=layer_sizes[0], seed=rng.randint(123456), name='c1')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c2')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c3')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    
    h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
    
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[1], seed=rng.randint(123456), name='c4')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c5')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c6')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    
    h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
    
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[2], seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(bn(h, layer_sizes[2], is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[2], f_out=layer_sizes[3], seed=rng.randint(123456), name='c8')
    h = L.lrelu(bn(h, layer_sizes[3], is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[3], f_out=layer_sizes[4], seed=rng.randint(123456), name='c9')
    h = L.lrelu(bn(h, layer_sizes[4], is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling

    # dropout with mask
    if dropout_mask is None:
        # Base dropout mask is 1 (Fully Connected)
        dropout_mask = tf.ones_like(h)

    h = h*dropout_mask 

    h = L.fc(h, layer_sizes[4], 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')
    
    return h, dropout_mask
示例#10
0
    def build_graph(self):
        self.iterator = tf.data.Iterator.from_structure(
            (tf.float32, tf.int32),
            (tf.TensorShape([None, 227, 227, 3]), tf.TensorShape([None]))
        )
        self.inputs, self.labels = self.iterator.get_next()

        self.conv1 = layers.conv(self.inputs, [11, 11], 96, [4, 4],
                                 padding='VALID', name='conv1', mask=True)
        norm1 = layers.lrn(self.conv1, 2, 1e-05, 0.75, name='norm1')
        pool1 = layers.max_pool(norm1, [3, 3], [2, 2], padding='VALID',
                                name='pool1')

        self.conv2 = layers.conv(pool1, [5, 5], 256, [1, 1], groups=2,
                                 name='conv2', mask=True)
        norm2 = layers.lrn(self.conv2, 2, 1e-05, 0.75, name='norm2')
        pool2 = layers.max_pool(norm2, [3, 3], [2, 2], padding='VALID',
                                name='pool2')

        self.conv3 = layers.conv(pool2, [3, 3], 384, [1, 1], name='conv3',
                                 mask=True)

        self.conv4 = layers.conv(self.conv3, [3, 3], 384, [1, 1], groups=2,
                                 name='conv4', mask=True)

        self.conv5 = layers.conv(self.conv4, [3, 3], 256, [1, 1], groups=2,
                                 name='conv5', mask=True)
        pool5 = layers.max_pool(self.conv5, [3, 3], [2, 2], padding='VALID',
                                name='pool5')

        self.keep_prob = tf.get_variable('keep_prob', shape=(),
                                         trainable=False)

        flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])
        fc6 = layers.fc(flattened, 4096, name='fc6')
        dropout6 = layers.dropout(fc6, self.keep_prob)

        fc7 = layers.fc(dropout6, 4096, name='fc7')
        dropout7 = layers.dropout(fc7, self.keep_prob)

        self.logits = layers.fc(dropout7, self.num_classes, relu=False,
                                name='fc8')
        self.probs_op = tf.nn.softmax(self.logits)
        self.pred_op = tf.argmax(input=self.logits, axis=1)
        corrects_op = tf.equal(tf.cast(self.pred_op, tf.int32),
                               self.labels)
        self.acc_op = tf.reduce_mean(tf.cast(corrects_op, tf.float32))
示例#11
0
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out
	def create(self,dropout_keep_prob,is_training=True):
		# 1 layer first 3 means filter_height, second 3 means filter_width. default 1 as stride.  
		# conv1(x,filter_height,filter_width, num_filters, name, stride=1, padding='SAME')
		conv1 = conv_layer(self.X, 3, 3, 16, name = 'conv1',activation_function=self.activation_function,is_batch_normalization=self.is_batch_normalization)
		self.out = conv1
		""" All residual blocks use zero-padding for shortcut connections """
        # No matter how deep the network it is, just be divided into 4-5 Big Block.
        # Every Block can be divided into Block1_1(Block1_ResUnit1), Block1_2(Block1_ResUnit2), Block1_3(Block1_ResUnit3) again.
		# Then every Block1_1 is already residual unit.
		# Every resiudal unit has 2 conv layer.
		# one for loop has 6 conv layer.
		# residual_block should be changed into residual_unit.
		for i in range(self.NUM_CONV): # i=0,1,2.
		# It seems that every Block has 3 Residual Unit(block with lowercase).
			resBlock2 = residual_block(self.out, 16, name = 'resBlock2_{}'.format(i + 1), block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock2
		# 1 max_pool layer
		pool2 = max_pool(self.out, name = 'pool2')
		self.out = pool2
		# It is different from original paper. In original paper, there has no pool operation in the middle layer.
		# Every ResUnit has 2 conv layer.
		# Every Block has 3 Residual Unit(block with lowercase).
		for i in range(self.NUM_CONV):
			resBlock3 = residual_block(self.out, 32, name = 'resBlock3_{}'.format(i + 1),block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock3
		# 1 max_pool layer
		pool3 = max_pool(self.out, name = 'pool3')
		self.out = pool3
		# i=0,1,2 every block has 2 conv layer.
		# one for loop has 6 conv layer.
		# Every Block has 3 Residual Unit(block with lowercase).
		for i in range(self.NUM_CONV):
			resBlock4 = residual_block(self.out, 64, name = 'resBlock4_{}'.format(i + 1),block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock4
		# 1 global pool layer
		# Perform global average pooling to make spatial dimensions as 1x1
		global_pool = global_average(self.out, name = 'gap')
		self.out = global_pool
		# flatten is not layer
		flatten = tf.contrib.layers.flatten(self.out)
		# 1 fully connected layer.
		# @Hazard
		# dropout_keep_prob: float, the fraction to keep before final layer.
		dpot_net = slim.dropout(flatten,dropout_keep_prob,is_training=is_training,scope='Dropout')
		fc5 = fc_layer(dpot_net, input_size = 64, output_size = self.NUM_CLASSES,relu = False, name = 'fc5')
		self.out = fc5
 def __init__(self):
     self.lr = 0.01
     # conv net
     self.c1 = conv(1, 6, kernel=5, learning_rate=self.lr)
     self.relu1 = relu()
     self.s2 = max_pool(kernel=2, stride=2)
     self.c3 = conv(6, 16, kernel=5, learning_rate=self.lr)
     self.relu3 = relu()
     self.s4 = max_pool(kernel=2, stride=2)
     self.c5 = conv(16, 120, kernel=4, learning_rate=self.lr)
     self.relu5 = relu()
     # fc net
     self.f6 = fc(120, 84, learning_rate=self.lr)
     self.relu6 = relu()
     self.f7 = fc(84, 10)
     self.sig7 = softmax()
     # record the shape between the conv net and fc net
     self.conv_out_shape = None
示例#14
0
    def formNet(self, img_ph, ann_ph, base_filter_num=8):
        down_layer_list = {}
        curr_layer = img_ph

        # Down sampling
        for i in range(5):
            num_filter = base_filter_num * 2**i
            if i == 0:
                conv1 = layers.conv2d(
                    curr_layer,
                    W=[3, 3,
                       img_ph.get_shape().as_list()[-1], num_filter],
                    b=[num_filter])
            else:
                conv1 = layers.conv2d(curr_layer,
                                      W=[3, 3, num_filter // 2, num_filter],
                                      b=[num_filter])
            relu1 = tf.nn.relu(conv1)
            conv2 = layers.conv2d(relu1,
                                  W=[3, 3, num_filter, num_filter],
                                  b=[num_filter])
            down_layer_list[i] = tf.nn.relu(conv2)
            print('layer: ', i, '\tsize: ',
                  down_layer_list[i].get_shape().as_list())
            if i < 4:
                curr_layer = layers.max_pool(down_layer_list[i])
        curr_layer = down_layer_list[4]

        # Up sampling
        for i in range(3, -1, -1):
            num_filter = base_filter_num * 2**(i + 1)
            deconv_output_shape = tf.shape(down_layer_list[i])
            deconv1 = layers.conv2d_transpose(
                curr_layer,
                W=[3, 3, num_filter // 2, num_filter],
                b=[num_filter // 2],
                stride=2)
            concat1 = layers.crop_and_concat(tf.nn.relu(deconv1),
                                             down_layer_list[i])
            conv1 = layers.conv2d(concat1,
                                  W=[3, 3, num_filter, num_filter // 2],
                                  b=[num_filter // 2],
                                  strides=[1, 1, 1, 1])
            relu1 = tf.nn.relu(conv1)
            conv2 = layers.conv2d(relu1,
                                  W=[3, 3, num_filter // 2, num_filter // 2],
                                  b=[num_filter // 2],
                                  strides=[1, 1, 1, 1])
            relu2 = tf.nn.relu(conv2)
            curr_layer = relu2

        # Output
        conv = layers.conv2d(curr_layer, W=[1, 1, base_filter_num, 3], b=[3])
        relu = tf.nn.relu(conv)
        print('final relu: ', relu.get_shape().as_list())
        return tf.expand_dims(tf.argmax(relu, axis=-1), axis=-1), relu
示例#15
0
    def create(self):

        conv1 = conv_layer(self.X, 3, 3, 16, name='conv1')
        self.out = conv1
        """ All residual blocks use zer-padding
		for shortcut connections """

        for i in range(self.NUM_CONV):
            resBlock2 = residual_block(self.out,
                                       16,
                                       name='resBlock2_{}'.format(i + 1))
            self.out = resBlock2

        pool2 = max_pool(self.out, name='pool2')
        self.out = pool2

        for i in range(self.NUM_CONV):
            resBlock3 = residual_block(self.out,
                                       32,
                                       name='resBlock3_{}'.format(i + 1))
            self.out = resBlock3

        pool3 = max_pool(self.out, name='pool3')
        self.out = pool3

        for i in range(self.NUM_CONV):
            resBlock4 = residual_block(self.out,
                                       64,
                                       name='resBlock4_{}'.format(i + 1))
            self.out = resBlock4

        # Perform global average pooling to make spatial dimensions as 1x1
        global_pool = global_average(self.out, name='gap')
        self.out = global_pool

        flatten = tf.contrib.layers.flatten(self.out)
        fc5 = fc_layer(flatten,
                       input_size=64,
                       output_size=self.NUM_CLASSES,
                       relu=False,
                       name='fc5')

        self.out = fc5
def forward(data, model):
    """
    Input:
        data  : (N, C, H, W)
        label : (N, K)
        y     : (N, )
    Output:
        cost : 
        rate :
    """
    w1 = "w1"
    b1 = "b1"
    w3 = "w3"
    b3 = "b3"
    w5 = "w5"
    b5 = "b5"
    w6 = "w6"
    b6 = "b6"
    wo = "wo"
    bo = "bo"

    #forward pass
    h1_pre = layers.conv_forward(data, model[w1], model[b1])
    h1 = layers.ReLu_forward(h1_pre)
    #print (h1[0][0])

    h2 = layers.max_pool(h1, 2)

    h3_pre = layers.conv_forward(h2, model[w3], model[b3])
    h3 = layers.ReLu_forward(h3_pre)

    h4 = layers.max_pool(h3, 2)

    h5_pre = layers.conv_forward(h4, model[w5], model[b5])
    h5 = layers.ReLu_forward(h5_pre)

    h6 = layers.full_forward(h5, model[w6], model[b6])

    out = layers.full_forward(h6, model[wo],
                              model[bo])  #after this we need softmax
    return out  #soft max is linear so ok
示例#17
0
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):
    h = x

    rng = numpy.random.RandomState(seed)

    h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=128, seed=rng.randint(123456), name='c1')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c2')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c3')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=256, seed=rng.randint(123456), name='c4')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c5')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c6')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=512, seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(L.bn(h, 512, is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=512, f_out=256, seed=rng.randint(123456), name='c8')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=256, f_out=128, seed=rng.randint(123456), name='c9')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h1 = tf.reduce_mean(h, reduction_indices=[1, 2])  # Features to be aligned
    h = L.fc(h1, 128, 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = L.bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')

    return h, h1
示例#18
0
    def _build_graph(self):
        self.x = tf.placeholder(tf.float32, [None, 227, 227, 3])
        self.y = tf.placeholder(tf.float32, [None, 2])
        self.keep_prob = tf.placeholder_with_default(1.0,
                                                     shape=[],
                                                     name='dropout_keep_prob')

        # 1st Layer: Conv (w ReLu) -> Lrn -> Pool
        conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
        norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1')
        pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')

        # 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
        conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
        norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2')
        pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')

        # 3rd Layer: Conv (w ReLu)
        conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')

        # 4th Layer: Conv (w ReLu) split into two groups
        conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')

        # 5th Layer: Conv (w ReLu) -> Pool split into two groups
        conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')
        pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')

        # 6th Layer: Flatten -> FC (w ReLu) -> Dropout
        flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])
        fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')
        dropout6 = dropout(fc6, self.keep_prob)

        # 7th Layer: FC (w ReLu) -> Dropout
        fc7 = fc(dropout6, 4096, 4096, name='fc7')
        dropout7 = dropout(fc7, self.keep_prob)

        # 8th Layer: FC and return unscaled activations
        self.fc8 = fc(dropout7, 4096, self.num_classes, relu=False, name='fc8')
        self.prob = tf.nn.softmax(self.fc8, name='prob')
示例#19
0
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):

    h = x
    rng = numpy.random.RandomState(seed)
    print h


    h = L.conv(h, ksize=5, stride=1, f_in=1, f_out=32, padding='VALID', seed=rng.randint(123456), name='c1')
    h = L.max_pool(h, ksize=2, stride=2, padding='VALID')
    # h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

    h = L.conv(h, ksize=5, stride=1, f_in=32, f_out=64, padding='VALID',seed=rng.randint(123456), name='c2')
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
    h = L.max_pool(h, ksize=2, stride=2, padding='VALID')
#    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

#    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling
    h = tf.layers.flatten(h)
    print h
    
    h = L.fc(h, 64*4*4, 512, seed=rng.randint(123456), name='fc1')
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
    h = L.fc(h, 512,  10, seed=rng.randint(123456), name='fc2')
    print h

#    if FLAGS.top_bn:
#        h = L.bn(h, 10, is_training=is_training,
#                 update_batch_stats=update_batch_stats, name='bfc')

    return h
示例#20
0
  def build(self):
    """Create the network graph."""
    # 1st Layer: Conv (w ReLu) -> Lrn -> Pool
    conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
    norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1')
    pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')

    # 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
    conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
    norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2')
    pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')

    # 3rd Layer: Conv (w ReLu)
    conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')

    # 4th Layer: Conv (w ReLu) splitted into two groups
    conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')

    # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
    conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')
    pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')

    # 6th Layer: Flatten -> FC (w ReLu) -> Dropout
    flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])
    fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')

    fc7_factor = fc(fc6, 4096, self.num_factor_units, 'fc7_factor')
    fc7_shared = fc(fc6, 4096, 4096 - self.num_factor_units, 'fc7_shared')

    with tf.variable_scope('fc7_factor', reuse=True):
      self.assign_factor = tf.group(tf.get_variable('weights').assign(self.factor_weights),
                                    tf.get_variable('biases').assign(self.factor_biases))

    fc7_concat = tf.concat([fc7_factor, fc7_shared], axis=1, name='fc7_concat')

    # 8th Layer: FC and return unscaled activations
    self.fc8 = fc(fc7_concat, 4096, self.num_classes, relu=False, name='fc8')
示例#21
0
 def __build_net(self):
     """
     Introduction
         构建mtcnn模型级联第一层
     """
     with tf.variable_scope('pnet'):
         self.input = tf.placeholder(name='input_data',
                                     shape=[None, None, None, 3],
                                     dtype=tf.float32)
         layer = conv('conv1',
                      self.input,
                      kernel_size=(3, 3),
                      channels_output=10,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu1', layer)
         layer = max_pool('pool1', layer, kernel_size=[2, 2], stride=(2, 2))
         layer = conv('conv2',
                      layer,
                      kernel_size=(3, 3),
                      channels_output=16,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu2', layer)
         layer = conv('conv3',
                      layer,
                      kernel_size=(3, 3),
                      channels_output=32,
                      stride=(1, 1),
                      padding='VALID',
                      relu=False)
         layer = prelu('prelu3', layer)
         conv4_1 = conv('conv4-1',
                        layer,
                        kernel_size=(1, 1),
                        channels_output=2,
                        stride=(1, 1),
                        relu=False)
         self.prob = tf.nn.softmax(conv4_1, axis=3, name='prob')
         self.loc = conv('conv4-2',
                         layer,
                         kernel_size=(1, 1),
                         channels_output=4,
                         stride=(1, 1),
                         relu=False)
示例#22
0
    def step_down(name, input_, filter_size=3, res_blocks=2, keep_prob=1., training=False):

        with tf.variable_scope(name):
            
            with tf.variable_scope("res_block_0"):
                conv_out, tiled_input = layers.res_block(input_, filter_size, channel_multiplier=2, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
            
            for i in xrange(1, res_blocks):
                with tf.variable_scope("res_block_" + str(i)):
                    conv_out = layers.res_block(conv_out, filter_size, channel_multiplier=1, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
            
            conv_out = conv_out + tiled_input

            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            
            bottom_out = layers.dropout(pool_out, keep_prob)
            side_out = layers.dropout(conv_out, keep_prob)

        return bottom_out, side_out
示例#23
0
    def create(self, imageHeight, imageWidth, num_classes, evalFLAG):

        graph = tf.Graph()

        with graph.as_default():

            num_hidden = 256

            training = not evalFLAG
            with tf.name_scope('Inputs'):
                inputs = tf.placeholder(tf.float32,
                                        [None, imageHeight, imageWidth, 1],
                                        name='inputs')
                if evalFLAG:
                    tf.summary.image('inputs', inputs, max_outputs=1)

            seq_len = tf.placeholder(tf.int32, [None], name='seq_len')

            targets = tf.sparse_placeholder(tf.int32, name='targets')

            targets_len = tf.placeholder(tf.int32, name='targets_len')

            conv_keep_prob = 0.8
            lstm_keep_prob = 0.5

            # Layer 1
            with tf.name_scope('Layer_Conv_1'):
                h_conv1 = CNN(x=inputs,
                              filters=16,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              name='conv1',
                              activation=tf.nn.leaky_relu,
                              evalFLAG=evalFLAG,
                              initializer=tf.contrib.layers.xavier_initializer(
                                  uniform=False))
                h_pool1, seq_len_1, imageHeight, imageWidth = max_pool(
                    h_conv1, [2, 2], seq_len, imageHeight, imageWidth,
                    evalFLAG)
                h_pool1 = tf.layers.dropout(h_pool1,
                                            rate=0.0,
                                            training=training)

            # Layer 2
            with tf.name_scope('Layer_Conv_2'):
                h_conv2 = CNN(x=h_pool1,
                              filters=32,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              name='conv2',
                              activation=tf.nn.leaky_relu,
                              evalFLAG=evalFLAG,
                              initializer=tf.contrib.layers.xavier_initializer(
                                  uniform=False))
                h_pool2, seq_len_2, imageHeight, imageWidth = max_pool(
                    h_conv2, [2, 2], seq_len_1, imageHeight, imageWidth,
                    evalFLAG)
                h_pool2 = tf.layers.dropout(h_pool2,
                                            rate=(1 - conv_keep_prob),
                                            training=training)

            # Layer 3
            with tf.name_scope('Layer_Conv_3'):
                h_conv3 = CNN(x=h_pool2,
                              filters=48,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              name='conv3',
                              activation=tf.nn.leaky_relu,
                              evalFLAG=evalFLAG,
                              initializer=tf.contrib.layers.xavier_initializer(
                                  uniform=False))
                h_pool3, seq_len_3, imageHeight, imageWidth = max_pool(
                    h_conv3, [2, 2], seq_len_2, imageHeight, imageWidth,
                    evalFLAG)
                h_pool3 = tf.layers.dropout(h_pool3,
                                            rate=(1 - conv_keep_prob),
                                            training=training)

            # Layer 4
            with tf.name_scope('Layer_Conv_4'):
                h_conv4 = CNN(x=h_pool3,
                              filters=64,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              name='conv4',
                              activation=tf.nn.leaky_relu,
                              evalFLAG=evalFLAG,
                              initializer=tf.contrib.layers.xavier_initializer(
                                  uniform=False))
                h_pool4, seq_len_4, imageHeight, imageWidth = max_pool(
                    h_conv4, [1, 1], seq_len_3, imageHeight, imageWidth,
                    evalFLAG)
                h_pool4 = tf.layers.dropout(h_pool4,
                                            rate=(1 - conv_keep_prob),
                                            training=training)

            # Layer 5
            with tf.name_scope('Layer_Conv_5'):
                h_conv5 = CNN(x=h_pool4,
                              filters=80,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              name='conv5',
                              activation=tf.nn.leaky_relu,
                              evalFLAG=evalFLAG,
                              initializer=tf.contrib.layers.xavier_initializer(
                                  uniform=False))
                h_pool5, seq_len_5, imageHeight, imageWidth = max_pool(
                    h_conv5, [1, 1], seq_len_4, imageHeight, imageWidth,
                    evalFLAG)
                h_pool5 = tf.layers.dropout(h_pool5,
                                            rate=(1 - lstm_keep_prob),
                                            training=training)

            with tf.name_scope('Reshaping_step'):
                h_cw_concat = tf.transpose(h_pool5, (2, 0, 1, 3))
                h_cw_concat = tf.reshape(
                    h_cw_concat, (int(imageWidth), -1, int(imageHeight * 80)))
                h_cw_concat = tf.transpose(h_cw_concat, (1, 0, 2))

            with tf.name_scope('Layer_BLSTM_1'):

                h_bilstm1 = bidirectionalLSTM(h_cw_concat, num_hidden,
                                              seq_len_5, '1', evalFLAG)
                h_bilstm1 = tf.concat(h_bilstm1, 2)
                h_bilstm1 = tf.layers.dropout(h_bilstm1,
                                              rate=(1 - lstm_keep_prob),
                                              training=training)

            with tf.name_scope('Layer_BLSTM_2'):

                h_bilstm2 = bidirectionalLSTM(h_bilstm1, num_hidden, seq_len_5,
                                              '2', evalFLAG)
                h_bilstm2 = tf.concat(h_bilstm2, 2)
                h_bilstm2 = tf.layers.dropout(h_bilstm2,
                                              rate=(1 - lstm_keep_prob),
                                              training=training)

            with tf.name_scope('Layer_BLSTM_3'):

                h_bilstm3 = bidirectionalLSTM(h_bilstm2, num_hidden, seq_len_5,
                                              '3', evalFLAG)
                h_bilstm3 = tf.concat(h_bilstm3, 2)
                h_bilstm3 = tf.layers.dropout(h_bilstm3,
                                              rate=(1 - lstm_keep_prob),
                                              training=training)

            with tf.name_scope('Layer_BLSTM_4'):

                h_bilstm4 = bidirectionalLSTM(h_bilstm3, num_hidden, seq_len_5,
                                              '4', evalFLAG)
                h_bilstm4 = tf.concat(h_bilstm4, 2)
                h_bilstm4 = tf.layers.dropout(h_bilstm4,
                                              rate=(1 - lstm_keep_prob),
                                              training=training)

            with tf.name_scope('Layer_BLSTM_5'):

                h_bilstm5 = bidirectionalLSTM(h_bilstm4, num_hidden, seq_len_5,
                                              '5', evalFLAG)
                h_bilstm5 = tf.concat(h_bilstm5, 2)
                h_bilstm5 = tf.layers.dropout(h_bilstm5,
                                              rate=(1 - lstm_keep_prob),
                                              training=training)

            with tf.name_scope('Layer_Linear') as ns:
                outputs = tf.transpose(h_bilstm5, (1, 0, 2))
                outputs = tf.reshape(outputs, (-1, 2 * num_hidden))
                logits = FNN(outputs, num_classes, ns, None, evalFLAG)

            with tf.name_scope('Logits'):
                logits = tf.reshape(logits, (int(imageWidth), -1, num_classes))

            seq_len_5 = tf.maximum(seq_len_5, targets_len)

            n_batches = tf.placeholder(tf.float32, name='n_batches')
            previousCost = tf.placeholder(tf.float32, name='previous_cost')

            with tf.name_scope('CTC_Loss'):
                loss = tf.nn.ctc_loss(targets,
                                      logits,
                                      seq_len_5,
                                      preprocess_collapse_repeated=False,
                                      ctc_merge_repeated=True)
                with tf.name_scope('total'):
                    batch_cost = tf.reduce_mean(loss)
                    cost = batch_cost / n_batches + previousCost

            tf.summary.scalar('CTC_loss', cost)

            with tf.name_scope('train'):
                learning_rate = tf.placeholder(tf.float32,
                                               name='learning_rate')
                optimizer = tf.train.AdamOptimizer(
                    learning_rate=learning_rate).minimize(batch_cost)

            with tf.name_scope('predictions'):
                predictions, log_prob = tf.nn.ctc_beam_search_decoder(
                    logits, seq_len_5, merge_repeated=False)

            with tf.name_scope('CER'):
                with tf.name_scope('Mean_CER_per_word'):
                    previousEDnorm = tf.placeholder(tf.float32,
                                                    name='previousEDnorm')
                    EDnorm = tf.reduce_mean(
                        tf.edit_distance(
                            tf.cast(predictions[0], tf.int32),
                            targets,
                            normalize=True)) / n_batches + previousEDnorm

                    if evalFLAG:
                        tf.summary.scalar('EDnorm', EDnorm)

                with tf.name_scope('Absolute_CER_total_set'):
                    setTotalChars = tf.placeholder(tf.float32,
                                                   name='setTotalChars')
                    previousEDabs = tf.placeholder(tf.float32,
                                                   name='previousEDabs')
                    errors = tf.edit_distance(tf.cast(predictions[0],
                                                      tf.int32),
                                              targets,
                                              normalize=False)
                    EDabs = tf.reduce_sum(
                        errors) / setTotalChars + previousEDabs
                    if evalFLAG:
                        tf.summary.scalar('EDabs', EDabs)

            ED = [EDnorm, EDabs]

            saver = tf.train.Saver(tf.global_variables(),
                                   max_to_keep=5,
                                   keep_checkpoint_every_n_hours=1)

            merged = tf.summary.merge_all()

            all_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

            num_param = sum([
                np.prod(np.array(var.op.outputs[0].shape.as_list()))
                for var in all_vars
            ])

            print(num_param)

            return graph, saver, inputs, seq_len, targets, targets_len, learning_rate, n_batches, setTotalChars, previousEDabs, previousEDnorm, previousCost, optimizer, batch_cost, cost, errors, ED, predictions, merged
示例#24
0
def inference(inputs, num_classes=34, is_training=False):

    conv1_1 = layers.conv2d(inputs, 3, 64, name='conv1_1')
    conv1_2 = layers.conv2d(conv1_1, 3, 64, name='conv1_2')

    pool1 = layers.max_pool(conv1_2, name='pool1')

    conv2_1 = layers.conv2d(pool1, 3, 128, name='conv2_1')
    conv2_2 = layers.conv2d(conv2_1, 3, 128, name='conv2_2')

    pool2 = layers.max_pool(conv2_2, name='pool2')

    conv3_1 = layers.conv2d(pool2, 3, 256, name='conv3_1')
    conv3_2 = layers.conv2d(conv3_1, 3, 256, name='conv3_2')
    conv3_3 = layers.conv2d(conv3_2, 3, 256, name='conv3_3')

    pool3 = layers.max_pool(conv3_3, name='pool3')

    conv4_1 = layers.conv2d(pool3, 3, 512, name='conv4_1')
    conv4_2 = layers.conv2d(conv4_1, 3, 512, name='conv4_2')
    conv4_3 = layers.conv2d(conv4_2, 3, 512, name='conv4_3')

    pool4 = layers.max_pool(conv4_3, name='pool4')

    conv5_1 = layers.conv2d(pool4, 3, 512, name='conv5_1')
    conv5_2 = layers.conv2d(conv5_1, 3, 512, name='conv5_2')
    conv5_3 = layers.conv2d(conv5_2, 3, 512, name='conv5_3')

    pool5 = layers.max_pool(conv5_3, name='pool5')

    fc6 = layers.conv2d(pool5, 7, 4096, name='fc6')

    if is_training:
        fc6 = layers.dropout(fc6, keep_prob=0.5, name='drop6')

    fc7 = layers.conv2d(fc6, 1, 4096, name='fc7')

    if is_training:
        fc7 = layers.dropout(fc7, keep_prob=0.5, name='drop7')

    score_fr = layers.conv2d(fc7, 1, num_classes, name='score_fr')

    upscore2 = layers.deconv2d(score_fr,
                               4,
                               num_classes,
                               stride=2,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore2')

    score_pool4 = layers.conv2d(pool4,
                                1,
                                num_classes,
                                activation=None,
                                name='score_pool4')
    fuse_pool4 = tf.add(upscore2, score_pool4, name='fuse_pool4')
    upscore4 = layers.deconv2d(fuse_pool4,
                               4,
                               num_classes,
                               stride=2,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore4')

    score_pool3 = layers.conv2d(pool3,
                                1,
                                num_classes,
                                activation=None,
                                name='score_pool3')
    fuse_pool3 = tf.add(upscore4, score_pool3, name='fuse_pool3')
    upscore8 = layers.deconv2d(fuse_pool3,
                               16,
                               num_classes,
                               stride=8,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore8')

    return upscore8
示例#25
0
def segnet_bayes_vgg(images, labels, batch_size, training_state, keep_prob,
                     training_state_drop):
    """
    images: the training and validation image
    labels: corresponding labels
    batch_size:
    training_state:
    keep_prob: for the training time, it's 0.5, but for the validation time is 1.0, all the units are utlized for the validation time. The rate input in tf.layers.dropout
    represent the dropout rate, so the for the validation time, the dropout rate should be 0, which is the reason that keep_prob = 1.
    output:
    logits 
    """
    #Before enter the images into the archetecture, we need to do Local Contrast Normalization
    #But it seems a bit complicated, so we use Local Response Normalization which implement in Tensorflow
    #Reference page:https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization
    norm1 = tf.nn.lrn(images,
                      depth_radius=5,
                      bias=1.0,
                      alpha=0.0001,
                      beta=0.75,
                      name='norm1')
    #first box of convolution layer,each part we do convolution two times, so we have conv1_1, and conv1_2
    conv1_1 = conv_layer_enc(norm1, "conv1_1", [3, 3, 3, 64], training_state)
    conv1_2 = conv_layer_enc(conv1_1, "conv1_2", [3, 3, 64, 64],
                             training_state)
    pool1, pool1_index, shape_1 = max_pool(conv1_2, 'pool1')

    #Second box of covolution layer(4)
    conv2_1 = conv_layer_enc(pool1, "conv2_1", [3, 3, 64, 128], training_state)
    conv2_2 = conv_layer_enc(conv2_1, "conv2_2", [3, 3, 128, 128],
                             training_state)
    pool2, pool2_index, shape_2 = max_pool(conv2_2, 'pool2')

    #Third box of covolution layer(7)
    conv3_1 = conv_layer_enc(pool2, "conv3_1", [3, 3, 128, 256],
                             training_state)
    conv3_2 = conv_layer_enc(conv3_1, "conv3_2", [3, 3, 256, 256],
                             training_state)
    conv3_3 = conv_layer_enc(conv3_2, "conv3_3", [3, 3, 256, 256],
                             training_state)
    pool3, pool3_index, shape_3 = max_pool(conv3_3, 'pool3')
    dropout1 = tf.layers.dropout(pool3,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout1")

    #Fourth box of covolution layer(10)
    conv4_1 = conv_layer_enc(dropout1, "conv4_1", [3, 3, 256, 512],
                             training_state)
    conv4_2 = conv_layer_enc(conv4_1, "conv4_2", [3, 3, 512, 512],
                             training_state)
    conv4_3 = conv_layer_enc(conv4_2, "conv4_3", [3, 3, 512, 512],
                             training_state)
    pool4, pool4_index, shape_4 = max_pool(conv4_3, 'pool4')
    dropout2 = tf.layers.dropout(pool4,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout2")

    #Fifth box of covolution layers(13)
    conv5_1 = conv_layer_enc(dropout2, "conv5_1", [3, 3, 512, 512],
                             training_state)
    conv5_2 = conv_layer_enc(conv5_1, "conv5_2", [3, 3, 512, 512],
                             training_state)
    conv5_3 = conv_layer_enc(conv5_2, "conv5_3", [3, 3, 512, 512],
                             training_state)
    pool5, pool5_index, shape_5 = max_pool(conv5_3, 'pool5')
    dropout3 = tf.layers.dropout(pool5,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout3")

    #---------------------So Now the encoder process has been Finished--------------------------------------#
    #------------------Then Let's start Decoder Process-----------------------------------------------------#

    #First box of decovolution layers(3)
    deconv5_1 = up_sampling(dropout3,
                            pool5_index,
                            shape_5,
                            name="unpool_5",
                            ksize=[1, 2, 2, 1])
    deconv5_2 = conv_layer(deconv5_1, "deconv5_2", [3, 3, 512, 512],
                           training_state)
    deconv5_3 = conv_layer(deconv5_2, "deconv5_3", [3, 3, 512, 512],
                           training_state)
    deconv5_4 = conv_layer(deconv5_3, "deconv5_4", [3, 3, 512, 512],
                           training_state)
    dropout4 = tf.layers.dropout(deconv5_4,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout4")

    #Second box of deconvolution layers(6)
    deconv4_1 = up_sampling(dropout4,
                            pool4_index,
                            shape_4,
                            name="unpool_4",
                            ksize=[1, 2, 2, 1])
    deconv4_2 = conv_layer(deconv4_1, "deconv4_2", [3, 3, 512, 512],
                           training_state)
    deconv4_3 = conv_layer(deconv4_2, "deconv4_3", [3, 3, 512, 512],
                           training_state)
    deconv4_4 = conv_layer(deconv4_3, "deconv4_4", [3, 3, 512, 256],
                           training_state)
    dropout5 = tf.layers.dropout(deconv4_4,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout5")

    #Third box of deconvolution layers(9)
    deconv3_1 = up_sampling(dropout5,
                            pool3_index,
                            shape_3,
                            name="unpool_3",
                            ksize=[1, 2, 2, 1])
    deconv3_2 = conv_layer(deconv3_1, "deconv3_2", [3, 3, 256, 256],
                           training_state)
    deconv3_3 = conv_layer(deconv3_2, "deconv3_3", [3, 3, 256, 256],
                           training_state)
    deconv3_4 = conv_layer(deconv3_3, "deconv3_4", [3, 3, 256, 128],
                           training_state)
    dropout6 = tf.layers.dropout(deconv3_4,
                                 rate=(1 - keep_prob),
                                 training=training_state_drop,
                                 name="dropout6")

    #Fourth box of deconvolution layers(11)
    deconv2_1 = up_sampling(dropout6,
                            pool2_index,
                            shape_2,
                            name="unpool_2",
                            ksize=[1, 2, 2, 1])
    deconv2_2 = conv_layer(deconv2_1, "deconv2_2", [3, 3, 128, 128],
                           training_state)
    deconv2_3 = conv_layer(deconv2_2, "deconv2_3", [3, 3, 128, 64],
                           training_state)
    #Fifth box of deconvolution layers(13)
    deconv1_1 = up_sampling(deconv2_3,
                            pool1_index,
                            shape_1,
                            name="unpool_1",
                            ksize=[1, 2, 2, 1])
    deconv1_2 = conv_layer(deconv1_1, "deconv1_2", [3, 3, 64, 64],
                           training_state)
    deconv1_3 = conv_layer(deconv1_2, "deconv1_3", [3, 3, 64, 64],
                           training_state)

    with tf.variable_scope('conv_classifier') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[1, 1, 64, NUM_CLASS],
                                             initializer=_initialization(
                                                 1, 64),
                                             wd=False,
                                             enc=False)
        conv = tf.nn.conv2d(deconv1_3, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [NUM_CLASS],
                                  tf.constant_initializer(0.0),
                                  enc=False)
        conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name)

    return conv_classifier
示例#26
0
def segnet_scratch(images, labels, batch_size, training_state, keep_prob,
                   training_state_drop):
    """
    images: is the input images, Training data also Test data
    labels: corresponding labels for images
    batch_size
    phase_train: is utilized to noticify if the parameter should keep as a constant or keep updating 
    """
    #Before enter the images into the archetecture, we need to do Local Contrast Normalization
    #But it seems a bit complicated, so we use Local Response Normalization which implement in Tensorflow
    #Reference page:https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization
    norm1 = tf.nn.lrn(images,
                      depth_radius=5,
                      bias=1.0,
                      alpha=0.0001,
                      beta=0.75,
                      name='norm1')
    #first box of convolution layer,each part we do convolution two times, so we have conv1_1, and conv1_2
    conv1_1 = conv_layer(norm1, "conv1_1", [3, 3, 3, 64], training_state)
    conv1_2 = conv_layer(conv1_1, "conv1_2", [3, 3, 64, 64], training_state)
    pool1, pool1_index, shape_1 = max_pool(conv1_2, 'pool1')

    #Second box of covolution layer(4)
    conv2_1 = conv_layer(pool1, "conv2_1", [3, 3, 64, 128], training_state)
    conv2_2 = conv_layer(conv2_1, "conv2_2", [3, 3, 128, 128], training_state)
    pool2, pool2_index, shape_2 = max_pool(conv2_2, 'pool2')

    #Third box of covolution layer(7)
    conv3_1 = conv_layer(pool2, "conv3_1", [3, 3, 128, 256], training_state)
    conv3_2 = conv_layer(conv3_1, "conv3_2", [3, 3, 256, 256], training_state)
    conv3_3 = conv_layer(conv3_2, "conv3_3", [3, 3, 256, 256], training_state)
    pool3, pool3_index, shape_3 = max_pool(conv3_3, 'pool3')

    #Fourth box of covolution layer(10)
    conv4_1 = conv_layer(pool3, "conv4_1", [3, 3, 256, 512], training_state)
    conv4_2 = conv_layer(conv4_1, "conv4_2", [3, 3, 512, 512], training_state)
    conv4_3 = conv_layer(conv4_2, "conv4_3", [3, 3, 512, 512], training_state)
    pool4, pool4_index, shape_4 = max_pool(conv4_3, 'pool4')

    #Fifth box of covolution layers(13)
    conv5_1 = conv_layer(pool4, "conv5_1", [3, 3, 512, 512], training_state)
    conv5_2 = conv_layer(conv5_1, "conv5_2", [3, 3, 512, 512], training_state)
    conv5_3 = conv_layer(conv5_2, "conv5_3", [3, 3, 512, 512], training_state)
    pool5, pool5_index, shape_5 = max_pool(conv5_3, 'pool5')

    #---------------------So Now the encoder process has been Finished--------------------------------------#
    #------------------Then Let's start Decoder Process-----------------------------------------------------#

    #First box of decovolution layers(3)
    deconv5_1 = up_sampling(pool5,
                            pool5_index,
                            shape_5,
                            name="unpool_5",
                            ksize=[1, 2, 2, 1])
    deconv5_2 = conv_layer(deconv5_1, "deconv5_2", [3, 3, 512, 512],
                           training_state)
    deconv5_3 = conv_layer(deconv5_2, "deconv5_3", [3, 3, 512, 512],
                           training_state)
    deconv5_4 = conv_layer(deconv5_3, "deconv5_4", [3, 3, 512, 512],
                           training_state)
    #Second box of deconvolution layers(6)
    deconv4_1 = up_sampling(deconv5_4,
                            pool4_index,
                            shape_4,
                            name="unpool_4",
                            ksize=[1, 2, 2, 1])
    deconv4_2 = conv_layer(deconv4_1, "deconv4_2", [3, 3, 512, 512],
                           training_state)
    deconv4_3 = conv_layer(deconv4_2, "deconv4_3", [3, 3, 512, 512],
                           training_state)
    deconv4_4 = conv_layer(deconv4_3, "deconv4_4", [3, 3, 512, 256],
                           training_state)
    #Third box of deconvolution layers(9)
    deconv3_1 = up_sampling(deconv4_4,
                            pool3_index,
                            shape_3,
                            name="unpool_3",
                            ksize=[1, 2, 2, 1])
    deconv3_2 = conv_layer(deconv3_1, "deconv3_2", [3, 3, 256, 256],
                           training_state)
    deconv3_3 = conv_layer(deconv3_2, "deconv3_3", [3, 3, 256, 256],
                           training_state)
    deconv3_4 = conv_layer(deconv3_3, "deconv3_4", [3, 3, 256, 128],
                           training_state)
    #Fourth box of deconvolution layers(11)
    deconv2_1 = up_sampling(deconv3_4,
                            pool2_index,
                            shape_2,
                            name="unpool_2",
                            ksize=[1, 2, 2, 1])
    deconv2_2 = conv_layer(deconv2_1, "deconv2_2", [3, 3, 128, 128],
                           training_state)
    deconv2_3 = conv_layer(deconv2_2, "deconv2_3", [3, 3, 128, 64],
                           training_state)
    #Fifth box of deconvolution layers(13)
    deconv1_1 = up_sampling(deconv2_3,
                            pool1_index,
                            shape_1,
                            name="unpool_1",
                            ksize=[1, 2, 2, 1])
    deconv1_2 = conv_layer(deconv1_1, "deconv1_2", [3, 3, 64, 64],
                           training_state)
    deconv1_3 = conv_layer(deconv1_2, "deconv1_3", [3, 3, 64, 64],
                           training_state)

    with tf.variable_scope('conv_classifier') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[1, 1, 64, NUM_CLASS],
                                             initializer=_initialization(
                                                 1, 64),
                                             wd=False,
                                             enc=False)
        conv = tf.nn.conv2d(deconv1_3, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [NUM_CLASS],
                                  tf.constant_initializer(0.0),
                                  enc=False)
        conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name)

    return conv_classifier
示例#27
0
train_images = cdl.GetTrainDataByLabel("data")
train_labels = cdl.GetTrainDataByLabel("labels")

test_images = cdl.GetTestDataByLabel("data")
test_labels = cdl.GetTestDataByLabel("labels")

input_images = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))  # bfloat16
input_labels = tf.placeholder(tf.float32, shape=(None, 10))  # one-hot encoding

# ResNet 是 add  , DenseNet 是 concatenate

# section 1
W_conv1 = layers.weight_variable([7, 7, 3, 16])
b_conv1 = layers.bias_variable([16])
h_conv1 = tf.nn.relu(conv2d(input_images, W_conv1) + b_conv1)
h_pool1 = layers.max_pool(h_conv1, 3, 2)

# Dense Block 1

# Transition Layer 1

# Dense Block 2

# Trainsition Layer 2

# Dense Block 3

# Trainsition Layer 3

# Dense Block 4
示例#28
0
def create_conv_net(x,
                    keep_prob,
                    channels_in,
                    channels_out,
                    n_class,
                    layers=2,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels_in]
    :param keep_prob: dropout probability tensor
    :param channels_in: number of channels in the input image
    :param channels_out: number of channels in the output image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels_in]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels_in, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:  #because after it's the end of the U
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[
        layers - 1]  #it's the last layer the bottom of the U but it's because
    #of the definition of range we have layers -1 and not layers

    # up layers
    for layer in range(layers - 2, -1,
                       -1):  #we don't begin at the bottom of the U
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])  # weights and bias for upsampling
        #from a layer to another !!
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        #recall that in_node is the last layer
        #bottom of the U

        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)  #layer
        #before the bottom of the  U
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class * channels_out],
                             stddev)
    bias = bias_variable([n_class * channels_out])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
示例#29
0
def inference(inputs, num_classes=34, keep_prob=0.5, is_training=False):

    conv1_1 = layers.conv2d(inputs, ksize=3, depth=64, name='conv1_1')
    conv1_2 = layers.conv2d(conv1_1, ksize=3, depth=64, name='conv1_2')

    pool1 = layers.max_pool(conv1_2, ksize=3, stride=2, name='pool1')

    conv2_1 = layers.conv2d(pool1, ksize=3, depth=128, name='conv2_1')
    conv2_2 = layers.conv2d(conv2_1, ksize=3, depth=128, name='conv2_2')

    pool2 = layers.max_pool(conv2_2, ksize=3, stride=2, name='pool2')

    conv3_1 = layers.conv2d(pool2, ksize=3, depth=256, name='conv3_1')
    conv3_2 = layers.conv2d(conv3_1, ksize=3, depth=256, name='conv3_2')
    conv3_3 = layers.conv2d(conv3_2, ksize=3, depth=256, name='conv3_3')

    pool3 = layers.max_pool(conv3_3, ksize=3, stride=2, name='pool3')

    conv4_1 = layers.conv2d(pool3, ksize=3, depth=512, name='conv4_1')
    conv4_2 = layers.conv2d(conv4_1, ksize=3, depth=512, name='conv4_2')
    conv4_3 = layers.conv2d(conv4_2, ksize=3, depth=512, name='conv4_3')

    pool4 = layers.max_pool(conv4_3, ksize=3, stride=1, name='pool4')

    conv5_1 = layers.conv2d(pool4, ksize=3, depth=512, rate=2, name='conv5_1')
    conv5_2 = layers.conv2d(conv5_1,
                            ksize=3,
                            depth=512,
                            rate=2,
                            name='conv5_2')
    conv5_3 = layers.conv2d(conv5_2,
                            ksize=3,
                            depth=512,
                            rate=2,
                            name='conv5_3')

    pool5 = layers.max_pool(conv5_3, ksize=3, stride=1, name='pool5')

    # hole 6
    fc6_1 = layers.conv2d(pool5, ksize=3, depth=1024, rate=6, name='fc6_1')
    if is_training:
        fc6_1 = layers.dropout(fc6_1, keep_prob=keep_prob, name='drop6_1')

    fc7_1 = layers.conv2d(fc6_1, ksize=1, depth=1024, name='fc7_1')
    if is_training:
        fc7_1 = layers.dropout(fc7_1, keep_prob=keep_prob, name='drop7_1')

    fc8_1 = layers.conv2d(fc7_1,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_1')

    # hole 12
    fc6_2 = layers.conv2d(pool5, ksize=3, depth=1024, rate=12, name='fc6_2')
    if is_training:
        fc6_2 = layers.dropout(fc6_2, keep_prob=keep_prob, name='drop6_2')

    fc7_2 = layers.conv2d(fc6_2, ksize=1, depth=1024, name='fc7_2')
    if is_training:
        fc7_2 = layers.dropout(fc7_2, keep_prob=keep_prob, name='drop7_2')

    fc8_2 = layers.conv2d(fc7_2,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_2')

    # hole 18
    fc6_3 = layers.conv2d(pool5, ksize=3, depth=1024, rate=18, name='fc6_3')
    if is_training:
        fc6_3 = layers.dropout(fc6_3, keep_prob=keep_prob, name='drop6_3')

    fc7_3 = layers.conv2d(fc6_3, ksize=1, depth=1024, name='fc7_3')
    if is_training:
        fc7_3 = layers.dropout(fc7_3, keep_prob=keep_prob, name='drop7_3')

    fc8_3 = layers.conv2d(fc7_3,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_3')

    #hole 24
    fc6_4 = layers.conv2d(pool5, ksize=3, depth=1024, rate=24, name='fc6_4')
    if is_training:
        fc6_4 = layers.dropout(fc6_4, keep_prob=keep_prob, name='drop6_4')

    fc7_4 = layers.conv2d(fc6_4, ksize=1, depth=1024, name='fc7_4')
    if is_training:
        fc7_4 = layers.dropout(fc7_4, keep_prob=keep_prob, name='drop7_4')

    fc8_4 = layers.conv2d(fc7_4,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_4')

    fuse = tf.add_n([fc8_1, fc8_2, fc8_3, fc8_4], name='add')

    logits = layers.deconv2d(fuse,
                             16,
                             num_classes,
                             stride=8,
                             bias=False,
                             activation=None,
                             init='bilinear',
                             name='logits')

    return logits
示例#30
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
        Creates a new convolutional unet for the given parametrization.
        :param x: input tensor, shape [?,nx,ny,channels]
        :param keep_prob: dropout probability tensor
        :param channels: number of channels in the input image
        :param n_class: number of output labels
        :param layers: number of layers in the net
        :param features_root: number of features in the first layer
        :param filter_size: size of the convolution filter
        :param pool_size: size of the max pooling operation
        :param summaries: Flag if summaries should be created
        """
    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 128
    size = in_size
    # Down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root  # output features number
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Set weights and bias of 2 convolution
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")
            # Build 2conv model
            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.leaky_relu(conv2)
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))
            # Do pooling and calculate image processing size
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]
    # Up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Up convolution and skip connection    # shape[kernelx, kernely, out features, in features]
            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.leaky_relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat
            # Set weights and bias of 2 convolution
            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")
            # Build 2conv model
            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.leaky_relu(conv2)
            up_h_convs[layer] = in_node
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
    # Output map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.leaky_relu(conv)
        up_h_convs["out"] = output_map

    # blur map
    with tf.name_scope("output_blur"):
        weight = weight_variable([1, 1, features_root, 1], stddev)
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_blur = tf.nn.leaky_relu(conv)
        up_h_convs["blur"] = output_blur

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)
    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, output_blur, variables, int(in_size - size)