Пример #1
0
def mobilenet_v2_arg_scope(weight_decay=0.00004, use_batch_norm=True):
    """Defines the default MobilenetV2 arg scope.
    Args:
      weight_decay: The weight decay to use for regularizing the model.
      stddev: The standard deviation of the trunctated normal weight initializer.
    Returns:
      An `arg_scope` to use for the mobilenet v2 model.
    """
    if use_batch_norm:
        normalizer_fn = slim.batch_norm
        normalizer_params = batch_norm_params
    else:
        normalizer_fn = None
        normalizer_params = {}

    #weights_initializer = tf.truncated_normal_initializer(stddev=0.02)
    weights_initializer = slim.xavier_initializer_conv2d()
    regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
    depthwise_regularizer = None

    with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected], weights_initializer=weights_initializer):
        with slim.arg_scope([slim.conv2d, slim.separable_conv2d], 
                        normalizer_fn=normalizer_fn,
                        normalizer_params=normalizer_params,
                        biases_initializer=None):
            with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=regularizer):
                with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer)as sc:
                    return sc
Пример #2
0
def inference(inputs,
              feature_length=128,
              phase_train=True,
              dropout_keep_prob=0.5,
              weight_decay=5e-5,
              scope='vgg_a',
              w_init=slim.xavier_initializer_conv2d(uniform=True)):

    with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                weights_initializer=w_init,
                weights_regularizer=slim.l2_regularizer(weight_decay)):

            net = slim.repeat(inputs,
                              1,
                              slim.conv2d,
                              64, [3, 3],
                              scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.flatten(net)
            net = slim.fully_connected(net,
                                       feature_length,
                                       activation_fn=None,
                                       scope='Bottleneck',
                                       reuse=False)
            return net
Пример #3
0
def inference(inputs,
              phase_train=True,
              keep_probability=0.5,
              weight_decay=0.0005,
              scope='vgg_16',
              w_init=slim.xavier_initializer_conv2d(uniform=True)):
    end_points = {}
    with tf.variable_scope(scope, 'vgg_16', [inputs]):
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                weights_initializer=w_init,
                weights_regularizer=slim.l2_regularizer(weight_decay)):

            net = slim.repeat(inputs,
                              1,
                              slim.conv2d,
                              64, [3, 3],
                              scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
            net = slim.max_pool2d(net, [2, 2], scope='pool5')
            net = slim.conv2d(net, 4096, [1, 1], padding="VALID", scope='fc6')
            net = slim.dropout(net,
                               keep_probability,
                               is_training=phase_train,
                               scope='dropout6')
            net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
            net = slim.dropout(net,
                               keep_probability,
                               is_training=phase_train,
                               scope='dropout7')

            yaw = slim.conv2d(net,
                              68, [1, 1],
                              activation_fn=None,
                              normalizer_fn=None,
                              scope='yaw_fc8')
            pitch = slim.conv2d(net,
                                68, [1, 1],
                                activation_fn=None,
                                normalizer_fn=None,
                                scope='pitch_fc8')
            roll = slim.conv2d(net,
                               68, [1, 1],
                               activation_fn=None,
                               normalizer_fn=None,
                               scope='roll_fc8')

            yaw = tf.squeeze(yaw, [1, 2], name='squeezed_yaw')
            pitch = tf.squeeze(pitch, [1, 2], name='squeezed_pitch')
            roll = tf.squeeze(roll, [1, 2], name='squeezed_roll')

            return yaw, pitch, roll
Пример #4
0
def inference(inputs,
              meanshape=None,
              num_classes=136,
              is_training=True,
              dropout_keep_prob=0.5,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
        'fused': False
    }
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [inputs], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=is_training):
                net = slim.conv2d(inputs, 64, [3, 3], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, dropout_keep_prob)
                net = slim.conv2d(net,
                                  1000, [1, 1],
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='conv10')
                net = slim.avg_pool2d(net,
                                      net.get_shape()[1:3],
                                      scope='avgpool10')
                net = tf.squeeze(net, [1, 2], name='logits')
                net = slim.fully_connected(net,
                                           num_classes,
                                           activation_fn=None,
                                           scope='Bottleneck',
                                           reuse=False)
                if meanshape is not None:
                    meanShape = tf.constant(meanshape)
                    net = net + meanShape
    return net, None
Пример #5
0
def inference(images,
              keep_probability=0.8,
              phase_train=True,
              feature_length=128,
              weight_decay=0.0,
              reuse=None,
              w_init=slim.xavier_initializer_conv2d(uniform=True)):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=w_init,
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with tf.variable_scope('inference', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, keep_probability)
                net = slim.conv2d(net,
                                  1000, [1, 1],
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='conv10')
                net = slim.avg_pool2d(net,
                                      net.get_shape()[1:3],
                                      scope='avgpool10')
                print(net.shape)
                #net = tf.squeeze(net, [1, 2], name='logits')
                #print (net.shape)
                net = tf.reshape(
                    net, (net.shape[0],
                          (net.shape[1] * net.shape[2] * net.shape[3])))
                net = slim.fully_connected(net,
                                           bottleneck_layer_size,
                                           activation_fn=None,
                                           scope='Bottleneck',
                                           reuse=False)
    return net
Пример #6
0
def block4(net):
    with slim.arg_scope([slim.conv2d],
                    weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                    weights_regularizer=slim.l2_regularizer(0.1) ):
        conv4_x_1=slim.conv2d(net,256,[1,1],scope="conv4_x_1",padding="VALID")
        conv4_x_2=slim.conv2d(conv4_x_1,256,[3,3],scope="conv4_x_2",padding="SAME")
        conv4_x_3=slim.conv2d(conv4_x_2,1024,[1,1],scope="conv4_x_3",padding="SAME")
        net = tf.concat([net, conv4_x_3], 2)
    return net
def squeezenetv1_arg_scope(is_training=True, weight_decay=0.0):
    weights_initializer = slim.xavier_initializer_conv2d(uniform=True)
    regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        trainable=is_training,
                        weights_initializer=weights_initializer,
                        weights_regularizer=regularizer,
                        biases_initializer=tf.constant_initializer(0.0),
                        activation_fn=tf.nn.relu,
                        padding='SAME') as sc:
        return sc
Пример #8
0
def inference(image, phase_train=True, feature_length=128, weight_decay=5e-5):
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            activation_fn=None,
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(0.1)):
        #conv1
        net = slim.conv2d(image,
                          96, [9, 9],
                          stride=1,
                          scope="conv1",
                          padding="SAME")
        #MFM1
        net = MFM(net)
        #pool1
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool1")
        #conv2
        net = slim.conv2d(net,
                          192, [5, 5],
                          stride=1,
                          scope="conv2",
                          padding="SAME")
        #MFM2
        net = MFM(net)
        #pool2
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool2")
        #conv3
        net = slim.conv2d(net,
                          256, [5, 5],
                          stride=1,
                          scope="conv3",
                          padding="SAME")
        #MFM3
        net = MFM(net)
        #pool3
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool3")
        #conv4
        net = slim.conv2d(net,
                          384, [4, 4],
                          stride=1,
                          scope="conv4",
                          padding="SAME")
        #MFM4
        net = MFM(net)
        #pool4
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool4")
        #fc1
        net = slim.flatten(net)
        net = slim.fully_connected(net, 512, activation_fn=None)
        #MFM_FC1
        net = MFM(net)

    return net
Пример #9
0
 def model(self,inputs):
     with slim.arg_scope([slim.conv2d, slim.fully_connected],
                       activation_fn=tf.nn.relu,
                       weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                       weights_regularizer=slim.l2_regularizer(5e-5)):
    
         net = slim.conv2d(inputs, 32, [3, 3], scope='conv1')
         net = slim.max_pool2d(net, [2, 2], scope='pool1')
         net = slim.conv2d(net, 32, [3, 3], scope='conv2')
         net = slim.max_pool2d(net, [2, 2], scope='pool2')
         net = slim.conv2d(net, 64, [3, 3], scope='conv3')
         net = slim.max_pool2d(net, [2, 2], scope='pool3')
         net = slim.conv2d(net, 128, [3, 3],padding="VALID",scope='conv4')
         net = slim.flatten(net)
         net=slim.fully_connected(net,self.feature_length,activation_fn=None,scope='fc1', reuse=False)
         return net
Пример #10
0
    def _arg_scope(self, is_training):
        batch_norm_params = {
            'is_training': is_training,
            # Decay for the moving averages.
            'decay': 0.995,
            # epsilon to prevent 0s in variance.
            'epsilon': 0.00001
        }

        with slim.arg_scope([slim.conv2d],
                            activation_fn=tf.nn.leaky_relu,
                            weights_regularizer=slim.l2_regularizer(0.0001),
                            weights_initializer=slim.xavier_initializer_conv2d(
                                uniform=True),
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            with slim.arg_scope([slim.dropout], is_training=is_training) as sc:
                return sc
Пример #11
0
def build_graph(phase_train=True, weight_decay=0.0001, reuse=None):
  images = tf.placeholder(tf.float32, shape=(None, FLAGS.image_height, FLAGS.image_width, 1 if FLAGS.gray else 3), name='image_batch')
  labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
  with slim.arg_scope([slim.conv2d], 
      weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
      weights_regularizer=slim.l2_regularizer(weight_decay)):
        net = slim.conv2d(images, 32, [3, 3], 1, scope='conv1_1')
        net = slim.conv2d(net, 64, [3, 3], 1, scope='conv1_2')
        net = slim.max_pool2d(net, [2, 2], stride=2)
        net = residual_block(net, 64)
        net = residual_block(net, 64)
        net = slim.conv2d(net, 128, [3, 3], 1)
        net = slim.max_pool2d(net, [2, 2], stride=2)
        net = residual_block(net, 128)
        net = residual_block(net, 128)
        net = residual_block(net, 128)
        net = slim.conv2d(net, 256, [3, 3], 1)
        net = slim.max_pool2d(net, [2, 2], stride=2)
        net = residual_block(net, 256)
        net = residual_block(net, 256)
        net = residual_block(net, 256)
        net = residual_block(net, 256)
  features = slim.fully_connected(slim.flatten(net), 512, activation_fn=None)
  logits = slim.fully_connected(features, FLAGS.classes, activation_fn=None, scope='classification')
  #softmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
  softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
  regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
  loss = tf.add_n([softmax_loss] + regularization_losses, name='total_loss')

  global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
  learning_rate = tf.train.exponential_decay(FLAGS.base_lr, global_step, decay_steps=FLAGS.decay_steps, decay_rate=FLAGS.decay_rate, staircase=True)
  train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step) #  AdamOptimizer

  tf.summary.scalar('loss', loss)
  tf.summary.scalar('learning_rate', learning_rate)
  merged_summary_op = tf.summary.merge_all()

  return {'images': images,
          'labels': labels,
          'features': features,
          'loss': loss,
          'global_step': global_step,
          'train_op': train_op,
          'merged_summary_op': merged_summary_op}
Пример #12
0
def inference(inputs,
              phase_train=True,
              keep_probability=0.5,
              weight_decay=0.0,
              feature_length=128,
              scope='vgg_16',
              w_init=slim.xavier_initializer_conv2d(uniform=True)):
    end_points = {}
    with tf.variable_scope(scope, 'vgg_19', [inputs]):
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                weights_initializer=w_init,
                weights_regularizer=slim.l2_regularizer(weight_decay)):

            net = slim.repeat(inputs,
                              2,
                              slim.conv2d,
                              64, [3, 3],
                              scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
            net = slim.max_pool2d(net, [2, 2], scope='pool5')
            # Use conv2d instead of fully_connected layers.
            net = slim.fully_connected(net, 4096, scope='fc6')
            net = slim.dropout(net,
                               feature_length,
                               is_training=phase_train,
                               scope='dropout6')
            net = slim.fully_connected(net, bottleneck_layer_size, scope='fc7')
            net = slim.dropout(net,
                               feature_length,
                               is_training=phase_train,
                               scope='dropout7')

            return net, end_points
Пример #13
0
    def _arg_scope(self, is_training, reuse=None):
        weight_decay = 0.0
        keep_probability = 1.0

        batch_norm_params = {
            'is_training': is_training,
            # Decay for the moving averages.
            'decay': 0.995,
            # epsilon to prevent 0s in variance.
            'epsilon': 0.001
        }

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                            weights_regularizer=slim.l2_regularizer(weight_decay),
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            with tf.variable_scope(self._scope, self._scope, reuse=reuse):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=is_training) as sc:
                    return sc
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, keep_probability)
                net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
                net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10')
                net = tf.squeeze(net, [1, 2], name='logits')
                net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
                        scope='Bottleneck', reuse=False)
    return net, None
Пример #15
0
def inference(images,keep_probability=0.8,phase_train=True,scope="inference",weight_decay=0.0,bottleneck_layer_size=512,num_layers=36,reuse=None):
    units,filters=layer_setup(num_layers)
    end_poins={}
    body = images
    with tf.variable_scope('inference', [images], reuse=reuse):
        with slim.arg_scope([slim.conv2d],activation_fn=tflearn.prelu,
                        weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                        weights_regularizer=slim.l2_regularizer(0.1) ):

            for i in xrange(len(units)):
                f = filters[i]

                body = slim.conv2d(body,f, [3, 3], stride=2,scope= "conv%d_%d"%(i+1, 1))
                idx = 2
                for j in xrange(units[i]):
                    _body = slim.conv2d(body,f, [3, 3], stride=1,scope= "conv%d_%d"%(i+1, idx))
                    idx+=1
                    _body = slim.conv2d(_body, f, [3, 3], stride=1,scope= "conv%d_%d"%(i+1, idx))
                    idx+=1
                    body = body+_body
            body=slim.flatten(body)
            body = slim.fully_connected(body, bottleneck_layer_size,scope='Bottleneck', reuse=False)
            return body,end_poins
Пример #16
0
    def _arg_scope(self, is_training, reuse=None):
        weight_decay = 0.0
        keep_probability = 1.0

        batch_norm_params = {
            'is_training': is_training,
            # Decay for the moving averages.
            'decay': 0.995,
            # epsilon to prevent 0s in variance.
            'epsilon': 0.001
        }

        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                weights_initializer=slim.xavier_initializer_conv2d(
                    uniform=True),
                weights_regularizer=slim.l2_regularizer(weight_decay),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params):
            with tf.variable_scope(self._scope, self._scope, reuse=reuse):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=is_training) as sc:
                    return sc
Пример #17
0
    def _build_network(self, is_training=True):
        # select initializers
        if cfg.TRAIN.TRUNCATED:
            initializer = tf.truncated_normal_initializer(mean=0.0,
                                                          stddev=0.01)
        else:
            initializer = slim.xavier_initializer_conv2d(uniform=True)

        timer = Timer()
        timer.tic()
        net_conv = self._image_to_head(is_training)
        timer.toc()
        print('base_network took {:.3f}s'.format(timer.total_time))
        with tf.variable_scope(self._scope, self._scope):
            fc_flatten = slim.flatten(net_conv, scope='flatten')
            yaw_pred = slim.fully_connected(fc_flatten,
                                            self._num_bins,
                                            weights_initializer=initializer,
                                            trainable=is_training,
                                            activation_fn=None,
                                            scope='yaw_fc')
            pitch_pred = slim.fully_connected(fc_flatten,
                                              self._num_bins,
                                              weights_initializer=initializer,
                                              trainable=is_training,
                                              activation_fn=None,
                                              scope='pitch_fc')
            roll_pred = slim.fully_connected(fc_flatten,
                                             self._num_bins,
                                             weights_initializer=initializer,
                                             trainable=is_training,
                                             activation_fn=None,
                                             scope='roll_fc')
            self._predictions['yaw'] = yaw_pred
            self._predictions['pitch'] = pitch_pred
            self._predictions['roll'] = roll_pred
            self._score_summaries.update(self._predictions)
Пример #18
0
def inference(images, keep_probability=1.0, training=True, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    endpoints = {}
    with slim.arg_scope([slim.conv2d],
                        weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=training):
                net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, keep_probability)
                net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
                net = slim.avg_pool2d(net, net.get_shape()[1:3], stride=1, scope='avgpool10')
                logits = tf.squeeze(net, [1, 2], name='logits')
    return logits, endpoints
Пример #19
0
def inference(images,
              keep_probability=0.8,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None,
              w_init=slim.xavier_initializer_conv2d(uniform=True)):
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(0.1),
            activation_fn=None):
        #conv1
        net_conv_1_1 = slim.conv2d(image,
                                   64, [3, 3],
                                   stride=2,
                                   scope="conv1_1",
                                   padding="VALID")
        net = slim.conv2d(net_conv_1_1,
                          64, [3, 3],
                          stride=1,
                          scope="conv1_2",
                          padding="VALID")
        net = slim.conv2d(net,
                          64, [3, 3],
                          stride=1,
                          scope="conv1_3",
                          padding="VALID")

        tf.concat(net_conv_1_1, net)

        #conv2
        net_conv_2_1 = slim.conv2d(net,
                                   128, [3, 3],
                                   stride=2,
                                   scope="conv2_1",
                                   padding="SAME")
        net = slim.conv2d(net_conv_2_1,
                          128, [3, 3],
                          stride=1,
                          scope="conv2_2",
                          padding="SAME")
        net = slim.conv2d(net,
                          128, [3, 3],
                          stride=1,
                          scope="conv2_3",
                          padding="SAME")
        net_res_3_3 = tf.concat(net_conv_2_1, net)

        net = slim.conv2d(net,
                          128, [3, 3],
                          stride=1,
                          scope="conv2_4",
                          padding="SAME")
        net = slim.conv2d(net,
                          128, [3, 3],
                          stride=1,
                          scope="conv2_5",
                          padding="SAME")
        #MFM2
        tf.concat(net_res_3_3, net)

        #conv3
        conv3_1 = slim.conv2d(res_2_5,
                              256, [3, 3],
                              stride=2,
                              scope="conv3_1",
                              padding="SAME")
        net = slim.conv2d(conv3_1,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_2",
                          padding="SAME")
        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_3",
                          padding="SAME")

        res3_3 = tf.concat(conv3_1, net)

        net = slim.conv2d(res3_3,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_4",
                          padding="SAME")

        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_5",
                          padding="SAME")
        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_6",
                          padding="SAME")
        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_7",
                          padding="SAME")
        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_8",
                          padding="SAME")
        net = slim.conv2d(net,
                          256, [3, 3],
                          stride=1,
                          scope="conv3_9",
                          padding="SAME")

        #conv4
        net = slim.conv2d(net, 512, [3, 3], stride=2, scope="conv4_1")
        net = slim.conv2d(net, 512, [3, 3], stride=1, scope="conv4_2")
        net = slim.conv2d(net, 512, [3, 3], stride=1, scope="conv4_3")

        #fc1
        net = slim.flatten(net)
        net = slim.fully_connected(net, 512, activation_fn=None)
        #MFM_sfc1
        net = MFM(net)

    return net
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    print("improved mobilenet small")
    width_multiplier = 0.25
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with tf.variable_scope('mobilenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.convolution2d(images,
                                         round(64 * width_multiplier), [3, 3],
                                         stride=2,
                                         padding='SAME',
                                         scope='conv_1')
                net = _depthwise_separable_conv(net,
                                                96,
                                                width_multiplier,
                                                sc='conv_ds_2_1')
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_3')
                net = _squeeze_excitation_layer(net,
                                                int(128 * width_multiplier), 8)
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                sc='conv_ds_4')
                net = _squeeze_excitation_layer(net,
                                                int(128 * width_multiplier), 8)
                net = _depthwise_separable_conv(net,
                                                256,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_5')
                net = _squeeze_excitation_layer(net,
                                                int(256 * width_multiplier), 8)

                net1 = slim.convolution2d(net,
                                          int(256 * width_multiplier),
                                          kernel_size=[1, 1],
                                          scope='conv6-1')
                net2 = _depthwise_separable_conv(net,
                                                 256,
                                                 width_multiplier,
                                                 sc='conv_ds_6_1')
                net3 = _depthwise_separable_conv(net2,
                                                 256,
                                                 width_multiplier,
                                                 sc='conv_ds_6_2')
                net3 = _squeeze_excitation_layer(net3,
                                                 int(256 * width_multiplier),
                                                 8)
                net = tf.add_n([net1, net3])
                net = tf.nn.relu6(net)

                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_7')
                net_512_1 = _squeeze_excitation_layer(
                    net, int(512 * width_multiplier), 8)

                net_1_1 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    128,
                                                    width_multiplier,
                                                    sc='conv_ds_8')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(128 * width_multiplier), 8)

                net_1_2 = slim.convolution2d(net_1_2,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_512_1 + net_1_3])
                net_512_1 = tf.nn.relu6(net)

                net_1_1 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    128,
                                                    width_multiplier,
                                                    sc='conv_ds_9')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(128 * width_multiplier), 8)
                net_1_2 = slim.convolution2d(net_1_2,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_512_1 + net_1_3])
                net_512_1 = tf.nn.relu6(net)

                net_1_1 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    128,
                                                    width_multiplier,
                                                    sc='conv_ds_10')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(128 * width_multiplier), 8)
                net_1_2 = slim.convolution2d(net_1_2,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_512_1 + net_1_3])
                net_512_1 = tf.nn.relu6(net)

                net_1_1 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    128,
                                                    width_multiplier,
                                                    sc='conv_ds_11')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(128 * width_multiplier), 8)
                net_1_2 = slim.convolution2d(net_1_2,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_512_1 + net_1_3])
                net_512_1 = tf.nn.relu6(net)

                net_1_1 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_512_1,
                                             round(128 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    128,
                                                    width_multiplier,
                                                    sc='conv_ds_12')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(128 * width_multiplier), 8)
                net_1_2 = slim.convolution2d(net_1_2,
                                             round(512 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_512_1 + net_1_3])
                net = tf.nn.relu6(net)

                net = _depthwise_separable_conv(net,
                                                768,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_13')
                net_768_1 = _squeeze_excitation_layer(
                    net, int(768 * width_multiplier), 8)

                net_1_1 = slim.convolution2d(net_768_1,
                                             round(256 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_1 = slim.convolution2d(net_1_1,
                                             round(768 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)

                net_1_2 = slim.convolution2d(net_768_1,
                                             round(256 * width_multiplier),
                                             [1, 1],
                                             padding='SAME')
                net_1_2 = _depthwise_separable_conv(net_1_2,
                                                    256,
                                                    width_multiplier,
                                                    sc='conv_ds_14')
                net_1_2 = _squeeze_excitation_layer(
                    net_1_2, int(256 * width_multiplier), 8)
                net_1_2 = slim.convolution2d(net_1_2,
                                             round(768 * width_multiplier),
                                             [1, 1],
                                             padding='SAME',
                                             activation_fn=None)
                net_1_3 = tf.add_n([net_1_1, net_1_2])
                net = tf.add_n([net_768_1 + net_1_3])
                net = tf.nn.relu6(net)

                net = _depthwise_separable_conv(net,
                                                1024,
                                                width_multiplier,
                                                sc='conv_ds_15')
                net = _squeeze_excitation_layer(net,
                                                int(1024 * width_multiplier),
                                                8)
                # net = slim.avg_pool2d(net, [7, 7], scope='avg_pool_15')
                # net = tf.reduce_mean(net, [1, 2], name='avg_pool_15', keep_dims=True)
                kernel_size = _reduced_kernel_size_for_small_input(net, [3, 3])

                # Global depthwise conv2d
                net = slim.separable_conv2d(inputs=net,
                                            num_outputs=None,
                                            kernel_size=kernel_size,
                                            stride=1,
                                            depth_multiplier=1.0,
                                            activation_fn=None,
                                            padding='VALID')
                net = slim.conv2d(inputs=net,
                                  num_outputs=int(1024 * width_multiplier),
                                  kernel_size=[1, 1],
                                  stride=1,
                                  activation_fn=None,
                                  padding='VALID')

                net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
                net = slim.fully_connected(net,
                                           bottleneck_layer_size,
                                           activation_fn=None,
                                           scope='fc_16')

                sess = K.get_session()
                graph = sess.graph
                stats_graph(graph)

    return net, None
Пример #21
0
def inference(image):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                    weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                    weights_regularizer=slim.l2_regularizer(0.1) ):
        #conv1
        net=slim.conv2d(image,96,[5,5],stride=1,scope="conv1",padding="SAME")
        #MFM1
        net=MFM(net)
        #pool1
        net=slim.max_pool2d(net,[2,2],stride=2,scope="pool1")
        #conv2_X
        for i in range(3):
            net=block2(net)
        #conv2a
        net=slim.conv2d(net,96,[1,1],stride=1,scope="conv2a",padding="SAME")
        #MFM2a
        net=MFM(net)
        #conv2
        net=slim.conv2d(net,192,[3,3],stride=1,scope="conv2",padding="SAME")
        #MFM2
        net=MFM(net)
        #pool2
        net=slim.max_pool2d(net,[2,2],stride=2,scope="pool2")
        #conv3_x
        for i in range(4):
            net=block4(net)
        #conv3a
        net=slim.conv2d(net,192,[1,1],stride=1,scope="conv3a",padding="SAME")
        #MFM3a
        net=MFM(net)
        #conv3
        net=slim.conv2d(net,384,[3,3],stride=1,scope="conv3",padding="SAME")
        #MFM3
        net=MFM(net)
        #pool3
        net=slim.max_pool2d(net,[2,2],stride=2,scope="pool3")
        #conv4_x
        for i in range(6):
            net=block4(net)
        #conv4a
        net=slim.conv2d(net,384,[1,1],stride=1,scope="conv4a")
        #MFM4a
        net=MFM(net)
        #conv4
        net=slim.conv2d(net,256,[3,3],stride=1,scope="conv4")
        #MFMF4
        net=MFM(net)
        #conv5_x
        for i in range(3):
            net=block5(net)
        #conv5a
        net=slim.conv2d(net,256,[1,1],stride=1,scope="conv5a")
        #MFM5a
        net=MFM(net)
        #conv5
        net=slim.conv2d(net,256,[3,3],stride=1,scope="conv5")
        #MFMF5
        net=MFM(net)
        #pool4
        net=slim.max_pool2d(net,[2,2],stride=2,scope="pool4")
        #fc1
        net=slim.flatten(net)
        net=slim.fully_connected(net,512,activation_fn=None)
        #MFM_sfc1
        net=MFM(net)

    return net
Пример #22
0
def mobilenet(inputs,
              num_classes=1000,
              is_training=True,
              width_multiplier=1,
              reuse=None,
              weight_decay=0.0,
              scope='MobileNet'):
    """ MobileNet
  More detail, please refer to Google's paper(https://arxiv.org/abs/1704.04861).
  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    scope: Optional scope for the variables.
  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, `num_classes`]
    end_points: a dictionary from components of the network to the corresponding
      activation.
  """
    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        """ Helper function to build the depth-wise separable convolution layer.
    """
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        # skip pointwise by setting num_outputs=None
        depthwise_conv = slim.separable_convolution2d(
            inputs,
            num_outputs=None,
            stride=_stride,
            depth_multiplier=1,
            kernel_size=[3, 3],
            normalizer_fn=slim.batch_norm,
            scope=sc + '/depthwise_conv')

        bn = depthwise_conv  #slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
        print(bn)
        pointwise_conv = slim.convolution2d(bn,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            normalizer_fn=slim.batch_norm,
                                            scope=sc + '/pointwise_conv')
        bn = pointwise_conv  #slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
        return bn

    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
        'is_training': is_training
    }
    end_points = {}
    with slim.arg_scope(
        [slim.convolution2d, slim.separable_convolution2d],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params,
            activation_fn=None):  #,
        #outputs_collections=[end_points_collection]):
        with tf.variable_scope(scope, [inputs], reuse=reuse):
            #end_points_collection = sc.name + '_end_points'

            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                activation_fn=tf.nn.relu):
                print(inputs)  #batch_size*67*67*3 or  65~80
                net = slim.convolution2d(
                    inputs,
                    round(32 * width_multiplier), [3, 3],
                    stride=1,
                    padding='SAME',
                    scope='conv_1')  # padding='SAME', padding='VALID',
                print(net)
                #net = slim.batch_norm(net, scope='conv_1/batch_norm')
                net = _depthwise_separable_conv(net,
                                                48,
                                                width_multiplier,
                                                sc='conv_ds_2')
                print(net)
                net = _depthwise_separable_conv(net,
                                                96,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_3')
                print(net)
                net = _depthwise_separable_conv(net,
                                                96,
                                                width_multiplier,
                                                sc='conv_ds_4')
                print(net)
                net = _depthwise_separable_conv(
                    net,
                    192,
                    width_multiplier,
                    downsample=True,
                    sc='conv_ds_5')  #downsample=True,
                print(net)
                net = _depthwise_separable_conv(net,
                                                192,
                                                width_multiplier,
                                                sc='conv_ds_6')
                print(net)
                net = _depthwise_separable_conv(net,
                                                192,
                                                width_multiplier,
                                                sc='conv_ds_7')
                print(net)
                net = _depthwise_separable_conv(net,
                                                384,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_8')
                print(net)
                net = _depthwise_separable_conv(net,
                                                384,
                                                width_multiplier,
                                                sc='conv_ds_9')
                print(net)
                net = _depthwise_separable_conv(net,
                                                384,
                                                width_multiplier,
                                                sc='conv_ds_10')
                print(net)
                net = _depthwise_separable_conv(net,
                                                384,
                                                width_multiplier,
                                                sc='conv_ds_11')
                print(net)
                net = _depthwise_separable_conv(net,
                                                384,
                                                width_multiplier,
                                                sc='conv_ds_12')
                print(net)
                net = _depthwise_separable_conv(net,
                                                768,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_13')
                print(net)
                end_points['conv_ds_13'] = net
                net = _depthwise_separable_conv(net,
                                                768,
                                                width_multiplier,
                                                sc='conv_ds_14')
                print(net)
                net = slim.avg_pool2d(net, [5, 5], scope='avg_pool_15')
                print(net)
                end_points['avg_pool_15'] = net

        logits = tf.squeeze(net, [1, 2], name='logits')
        print(logits)
        end_points['logits'] = logits
    pre_embeddings = slim.fully_connected(
        logits,
        128,
        activation_fn=None,
        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
        weights_regularizer=slim.l2_regularizer(0.0),
        normalizer_fn=slim.batch_norm,
        normalizer_params=batch_norm_params,
        scope='Bottleneck',
        reuse=False)
    #pre_embeddings = slim.batch_norm(pre_embeddings , scope='Bottleneck/batch_norm')
    end_points['Bottleneck'] = pre_embeddings
    print(pre_embeddings)
    embeddings = tf.nn.l2_normalize(pre_embeddings,
                                    1,
                                    1e-10,
                                    name='embeddings')
    print(embeddings)
    end_points['embeddings'] = embeddings
    return embeddings, end_points
Пример #23
0
def inference(images,
              num_classes,
              dropout_rate=0.8,
              is_training=True,
              weight_decay=4e-4,
              scope="My_Net"):
    """
    :param images: Input images, tensor for[batch_size, x, x, 3]
    :param num_classes: number of image class
    :param dropout_rate: rate for dropout
    :param is_training: Whether or not training
    :param weight_decay: regularing args for weight
    :return: logits, endpoints of the defined network
    """
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    with tf.variable_scope(scope, [images]):
        end_point = {}
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu6,
                weights_regularizer=slim.l2_regularizer(weight_decay),
                weights_initializer=slim.xavier_initializer_conv2d(),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=is_training):
                net = slim.conv2d(images, 64, [3, 3], stride=2, scope='conv1')
                end_point['conv1'] = net
                net = slim.conv2d(net, 64, [3, 3], stride=2, scope='conv2')
                end_point['conv2'] = net
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool2')
                end_point['maxpool2'] = net
                net = slim.conv2d(net, 96, [3, 3], scope='conv3')
                end_point['conv3'] = net
                net = slim.conv2d(net, 96, [3, 3], scope='conv4')
                end_point['conv4'] = net
                net = slim.conv2d(net, 96, [3, 3], scope='conv5')
                end_point['conv5'] = net
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool5')
                end_point['maxpool5'] = net
                net = slim.conv2d(net, 128, [3, 3], stride=2, scope='conv6')
                end_point['conv6'] = net
                net = slim.dropout(net, dropout_rate)
                net = slim.conv2d(net,
                                  num_classes, [1, 1],
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='conv7')
                end_point['conv7'] = net
                net = slim.avg_pool2d(net,
                                      kernel_size=net.get_shape()[1:-1],
                                      stride=1,
                                      scope='global_avg_pool7')
                end_point['avgpool7'] = net
                net = tf.squeeze(net, [1, 2], name='logits')
                end_point['out'] = net
    return net, end_point
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse) as sc:
            end_points_collection = sc.original_name_scope + '_end_points'
            with slim.arg_scope(
                [slim.conv2d, slim.avg_pool2d, slim.max_pool2d, fire_module],
                    outputs_collections=[end_points_collection]):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=phase_train):
                    net = slim.conv2d(images,
                                      96, [7, 7],
                                      stride=2,
                                      scope='conv1')
                    net = slim.max_pool2d(net, [3, 3],
                                          stride=2,
                                          scope='maxpool1')
                    net = fire_module(net, 16, 64, scope='fire2')
                    net = fire_module(net, 16, 64, scope='fire3')
                    net = fire_module(net, 32, 128, scope='fire4')
                    net = slim.max_pool2d(net, [2, 2],
                                          stride=2,
                                          scope='maxpool4')
                    net = fire_module(net, 32, 128, scope='fire5')
                    net = fire_module(net, 48, 192, scope='fire6')
                    net = fire_module(net, 48, 192, scope='fire7')
                    net = fire_module(net, 64, 256, scope='fire8')
                    net = slim.max_pool2d(net, [3, 3],
                                          stride=2,
                                          scope='maxpool8')
                    net = fire_module(net, 64, 256, scope='fire9')
                    net = slim.dropout(net, keep_probability)
                    net = slim.conv2d(net,
                                      1000, [1, 1],
                                      activation_fn=None,
                                      normalizer_fn=None,
                                      scope='conv10')
                    net = slim.avg_pool2d(net,
                                          net.get_shape()[1:3],
                                          scope='avgpool10')
                    net = tf.squeeze(net, [1, 2], name='logits')
                    net = slim.fully_connected(net,
                                               bottleneck_layer_size,
                                               activation_fn=None,
                                               scope='Bottleneck',
                                               reuse=False)
                    # Convert end_points_collection into a dictionary of end_points.
                    end_points = slim.utils.convert_collection_to_dict(
                        end_points_collection)
    return net, end_points
Пример #25
0
def P_Net_new(inputs, label=None, bbox_target=None, training=True):
    #define common param
    is_training = training
    #dropout_keep_prob=0.8,
    #bottleneck_layer_size=1000,
    #width_multiplier=1
    weight_decay = 0.0005
    #reuse = None,
    #scope='mtcnn'
    ###########
    #
    '''
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    '''
    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        """ Helper function to build the depth-wise separable convolution layer.
        """
        #num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1
        # skip pointwise by setting num_outputs=None
        depthwise_conv = slim.separable_convolution2d(
            inputs,
            num_outputs=None,
            stride=_stride,
            depth_multiplier=1,
            kernel_size=[3, 3],
            scope=sc + '/depthwise_conv',
        )

        #bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')
        pointwise_conv = slim.convolution2d(
            depthwise_conv,
            num_pwc_filters,
            kernel_size=[1, 1],
            scope=sc + '/pointwise_conv',
        )
        #bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')
        return pointwise_conv

    #with tf.variable_scope(scope) as sc:
    #end_points_collection = sc.name + '_end_points'
    with slim.arg_scope(
        [slim.convolution2d, slim.separable_convolution2d],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            biases_initializer=slim.init_ops.zeros_initializer(),
            weights_regularizer=slim.l2_regularizer(
                weight_decay),  #l1_regularizer
            normalizer_fn=None,
            #normalizer_params=batch_norm_params,
            #activation_fn = prelu,
            #outputs_collections=[end_points_collection],
            padding='SAME'):
        print(inputs.get_shape())
        with slim.arg_scope(
            [slim.batch_norm],
                is_training=is_training,
        ):
            #net = slim.conv2d(inputs, 10, 3, stride=1,scope='conv1')

            net = slim.convolution2d(inputs, 12, 3, stride=1, scope='conv1')
            print(net.get_shape())
            net = _depthwise_separable_conv(net,
                                            12,
                                            1,
                                            downsample=True,
                                            sc='down_1')
            #net = slim.max_pool2d(net, kernel_size=[2,2], stride=2, scope='pool1', padding='SAME')
            print(net.get_shape())
            net = _depthwise_separable_conv(net, 16, 1, sc='conv3')
            net = _depthwise_separable_conv(net,
                                            16,
                                            1,
                                            downsample=True,
                                            sc='down_2')
            print(net.get_shape())
            net = _depthwise_separable_conv(net, 32, 1, sc='conv4')
            net = _depthwise_separable_conv(net,
                                            32,
                                            1,
                                            downsample=True,
                                            sc='down_3')
            net = _depthwise_separable_conv(net,
                                            32,
                                            1,
                                            downsample=True,
                                            sc='down_4')
            print(net.get_shape())

            #net = slim.conv2d(inputs, num_outputs=10, kernel_size=[3,3], stride=1,scope='conv1')
            #print (net.get_shape())
            '''
            net = slim.max_pool2d(net, kernel_size=[2,2], stride=2, scope='pool1', padding='SAME')
            print (net.get_shape())
            net = slim.conv2d(net,num_outputs=16,kernel_size=[3,3],stride=1,scope='conv2')
            print (net.get_shape())
            net = slim.conv2d(net,num_outputs=32,kernel_size=[3,3],stride=1,scope='conv3')
            print (net.get_shape())
            '''
            #batch*H*W*2
            #conv5_1 = slim.convolution2d(net, 2 , 1, stride=1,  scope='conv5_1')
            #print (conv5_1.get_shape())
            conv6 = slim.conv2d(net,
                                num_outputs=2,
                                kernel_size=[1, 1],
                                stride=1,
                                scope='conv4_1',
                                activation_fn=tf.nn.softmax)
            #conv6 = slim.fully_connected(conv5_1, 2, activation_fn=None, scope='fc_1',activation_fn=tf.nn.softmax)
            print(conv6.get_shape())
            #batch*H*W*4
            bbox_pred = slim.conv2d(net,
                                    num_outputs=4,
                                    kernel_size=[1, 1],
                                    stride=1,
                                    scope='conv4_2',
                                    activation_fn=None)
            #conv5_2 =  slim.convolution2d(net, 4 , 1, stride=1,  scope='conv5_2')
            #print(conv5_2.get_shape())
            #bbox_pred = slim.fully_connected(conv5_2, 4, activation_fn=None, scope='fc_2')
            print(bbox_pred.get_shape())

            #cls_prob_original = conv4_1
            #bbox_pred_original = bbox_pred
            if training:
                #batch*2
                cls_prob = tf.squeeze(conv6, [1, 2], name='cls_prob')
                cls_loss = cls_ohem(cls_prob, label)
                #batch
                bbox_pred = tf.squeeze(bbox_pred, [1, 2], name='bbox_pred')
                bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

                accuracy = cal_accuracy(cls_prob, label)
                L2_loss = tf.add_n(slim.losses.get_regularization_losses())
                return cls_loss, bbox_loss, L2_loss, accuracy
            #test
            else:
                #when test,batch_size = 1
                cls_pro_test = tf.squeeze(conv6, axis=0)
                bbox_pred_test = tf.squeeze(bbox_pred, axis=0)
                return cls_pro_test, bbox_pred_test,
Пример #26
0
def inference(image,
              dropout_keep_prob=0.8,
              phase_train=True,
              scope="inference",
              weight_decay=0.0,
              bottleneck_layer_size=256):
    end_poins = {}
    with tf.variable_scope(scope, 'inference', [image]):
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
                activation_fn=None,
                weights_initializer=slim.xavier_initializer_conv2d(
                    uniform=True),
                weights_regularizer=slim.l2_regularizer(weight_decay)):
            #conv1
            net = slim.conv2d(image,
                              96, [5, 5],
                              stride=1,
                              scope="conv1",
                              padding="SAME")
            #MFM1
            net = MFM(net)
            #pool1
            net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool1")
            #conv2a
            net = slim.conv2d(net,
                              96, [1, 1],
                              stride=1,
                              scope="conv2a",
                              padding="SAME")
            #MFM2a
            net = MFM(net)
            #conv2
            net = slim.conv2d(net,
                              192, [3, 3],
                              stride=1,
                              scope="conv2",
                              padding="SAME")
            #MFM2
            net = MFM(net)
            #pool2
            net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool2")
            #conV3a
            net = slim.conv2d(net,
                              192, [1, 1],
                              stride=1,
                              scope="conv3a",
                              padding="SAME")
            #MFM3a
            net = MFM(net)
            #conv3
            net = slim.conv2d(net,
                              384, [3, 3],
                              stride=1,
                              scope="conv3",
                              padding="SAME")
            #MFM3
            net = MFM(net)
            #pool3
            net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool3")
            #conv4a
            net = slim.conv2d(net,
                              384, [1, 1],
                              stride=1,
                              scope="conv4a",
                              padding="SAME")
            #MFM4a
            net = MFM(net)
            #conv4
            net = slim.conv2d(net,
                              256, [3, 3],
                              stride=1,
                              scope="conv4",
                              padding="SAME")
            #MFM4
            net = MFM(net)
            #conv5a
            net = slim.conv2d(net,
                              256, [1, 1],
                              stride=1,
                              scope="conv5a",
                              padding="SAME")
            #MFM5a
            net = MFM(net)
            #conv5
            net = slim.conv2d(net, 256, [3, 3], stride=1, scope="conv5")
            #pool4
            net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool4")
            #fc1
            net = slim.flatten(net)
            #droupout
            net = slim.dropout(net,
                               dropout_keep_prob,
                               is_training=phase_train,
                               scope='Dropout')
            net = slim.fully_connected(net,
                                       bottleneck_layer_size * 2,
                                       activation_fn=None,
                                       scope="fc1")
            #MFM_FC1
            net = MFM(net)

            net = slim.flatten(net)
    return net, end_poins
Пример #27
0
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    layers = [
        [16, 16, 3, 2, "RE", True, 16],
        [16, 24, 3, 2, "RE", False, 72],
        [24, 24, 3, 1, "RE", False, 88],
        [24, 40, 5, 2, "RE", True, 96],
        [40, 40, 5, 1, "RE", True, 240],
        [40, 40, 5, 1, "RE", True, 240],
        [40, 48, 5, 1, "HS", True, 120],
        [48, 48, 5, 1, "HS", True, 144],
        [48, 96, 5, 2, "HS", True, 288],
        [96, 96, 5, 1, "HS", True, 576],
        [96, 96, 5, 1, "HS", True, 576],
    ]
    multiplier = 1
    end_points = {}
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                x = slim.convolution2d(images,
                                       int(16 * multiplier), [3, 3],
                                       stride=2,
                                       activation_fn=hard_swish)
                for idx, (in_channels, out_channels, kernel_size, stride,
                          activatation, se, exp_size) in enumerate(layers):
                    in_channels = int(in_channels * multiplier)
                    out_channels = int(out_channels * multiplier)
                    exp_size = int(exp_size * multiplier)
                    x = mobilenet_v3_block(
                        x, [kernel_size, kernel_size],
                        batch_norm_params,
                        exp_size,
                        out_channels,
                        stride,
                        "bneck{}".format(idx),
                        shortcut=(in_channels == out_channels),
                        activatation=activatation,
                        se=se)
                    end_points["bneck{}".format(idx)] = x
                x = slim.convolution2d(x,
                                       int(576 * multiplier), [1, 1],
                                       stride=1)
                net = slim.avg_pool2d(x, x.get_shape()[1:3], scope='avgpool10')
                net = tf.squeeze(net, [1, 2], name='logits')
                net = slim.fully_connected(net,
                                           bottleneck_layer_size,
                                           activation_fn=None,
                                           scope='Bottleneck',
                                           reuse=False)
                sess = K.get_session()
                graph = sess.graph
                stats_graph(graph)

    return net, None
Пример #28
0
def inference(image):
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(0.1),
            activation_fn=None):
        #conv1
        net = slim.conv2d(image,
                          96, [5, 5],
                          stride=1,
                          scope="conv1",
                          padding="SAME")
        #MFM1
        net = MFM(net)
        #pool1
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool1")
        #conv2_X
        net_conv2_x = slim.repeat(net,
                                  2,
                                  slim.conv2d,
                                  48, [3, 3],
                                  scope="conv2_x",
                                  padding="SAME")
        net = tf.concat([net, net_conv2_x], 2)
        #conv2a
        net = slim.conv2d(net,
                          96, [1, 1],
                          stride=1,
                          scope="conv2a",
                          padding="SAME")
        #MFM2a
        net = MFM(net)
        #conv2
        net = slim.conv2d(net,
                          192, [3, 3],
                          stride=1,
                          scope="conv2",
                          padding="SAME")
        #MFM2
        net = MFM(net)
        #pool2
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool2")
        #conv3_x
        net_conv3_x_1 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    96, [3, 3],
                                    scope="conv3_x_1",
                                    padding="SAME")
        net = tf.concat([net, net_conv3_x_1], 2)
        net_conv3_x_2 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    96, [3, 3],
                                    scope="conv3_x_2",
                                    padding="SAME")
        net = tf.concat([net, net_conv3_x_2], 2)
        #conv3a
        net = slim.conv2d(net,
                          192, [1, 1],
                          stride=1,
                          scope="conv3a",
                          padding="SAME")
        #MFM3a
        net = MFM(net)
        #conv3
        net = slim.conv2d(net,
                          384, [3, 3],
                          stride=1,
                          scope="conv3",
                          padding="SAME")
        #MFM3
        net = MFM(net)
        #pool3
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool3")
        #conv4_x
        net_conv4_x_1 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    192, [3, 3],
                                    scope="conv4_x_1",
                                    padding="SAME")
        net = tf.concat([net, net_conv4_x_1], 2)
        net_conv4_x_2 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    192, [3, 3],
                                    scope="conv4_x_2",
                                    padding="SAME")
        net = tf.concat([net, net_conv4_x_2], 2)
        net_conv4_x_3 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    192, [3, 3],
                                    scope="conv4_x_3",
                                    padding="SAME")
        net = tf.concat([net, net_conv4_x_3], 2)
        #conv4a
        net = slim.conv2d(net, 384, [1, 1], stride=1, scope="conv4a")
        #MFM4a
        net = MFM(net)
        #conv4
        net = slim.conv2d(net, 256, [3, 3], stride=1, scope="conv4")
        #MFMF4
        net = MFM(net)
        #conv5_x
        net_conv5_x_1 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    128, [3, 3],
                                    scope="conv5_x_1",
                                    padding="SAME")
        net = tf.concat([net, net_conv5_x_1], 2)
        net_conv5_x_2 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    128, [3, 3],
                                    scope="conv5_x_2",
                                    padding="SsAME")
        net = tf.concat([net, net_conv5_x_2], 2)
        net_conv5_x_3 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    128, [3, 3],
                                    scope="conv5_x_3",
                                    padding="SAME")
        net = tf.concat([net, net_conv5_x_3], 2)
        net_conv5_x_4 = slim.repeat(net,
                                    2,
                                    slim.conv2d,
                                    128, [3, 3],
                                    scope="conv5_x_4",
                                    padding="SAME")
        net = tf.concat([net, net_conv5_x_4], 2)
        #conv5a
        net = slim.conv2d(net, 256, [1, 1], stride=1, scope="conv5a")
        #MFM5a
        net = MFM(net)
        #conv5
        net = slim.conv2d(net, 256, [3, 3], stride=1, scope="conv5")
        #MFMF5
        net = MFM(net)
        #pool4
        net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool4")
        #fc1
        net = slim.flatten(net)
        net = slim.fully_connected(net, 512, activation_fn=None)
        #MFM_sfc1
        net = MFM(net)

    return net
Пример #29
0
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }
    width_multiplier = 0.25
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with tf.variable_scope('mobilenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.convolution2d(images,
                                         round(32 * width_multiplier), [3, 3],
                                         stride=2,
                                         padding='SAME',
                                         scope='conv_1')
                net = _depthwise_separable_conv(net,
                                                64,
                                                width_multiplier,
                                                sc='conv_ds_2')
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_3')
                net = _depthwise_separable_conv(net,
                                                128,
                                                width_multiplier,
                                                sc='conv_ds_4')
                net = _depthwise_separable_conv(net,
                                                256,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_5')
                net = _depthwise_separable_conv(net,
                                                256,
                                                width_multiplier,
                                                sc='conv_ds_6')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_7')

                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_8')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_9')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_10')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_11')
                net = _depthwise_separable_conv(net,
                                                512,
                                                width_multiplier,
                                                sc='conv_ds_12')

                net = _depthwise_separable_conv(net,
                                                1024,
                                                width_multiplier,
                                                downsample=True,
                                                sc='conv_ds_13')
                net = _depthwise_separable_conv(net,
                                                1024,
                                                width_multiplier,
                                                sc='conv_ds_14')
                # net = slim.avg_pool2d(net, [7, 7], scope='avg_pool_15')
                net = tf.reduce_mean(net, [1, 2],
                                     name='avg_pool_15',
                                     keep_dims=True)

                net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
                net = slim.fully_connected(net,
                                           bottleneck_layer_size,
                                           activation_fn=None,
                                           scope='fc_16')

                sess = K.get_session()
                graph = sess.graph
                stats_graph(graph)

    return net, None
Пример #30
0
def O_Net_new1(inputs,
               label=None,
               bbox_target=None,
               landmark_target=None,
               training=True):
    #define common param
    #is_training=training
    #dropout_keep_prob=0.8
    #bottleneck_layer_size=1000,
    width_multiplier = 1
    weight_decay = 0.0005
    #reuse = None,

    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        #scale
        #'scale': True,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }

    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        """ Helper function to build the depth-wise separable convolution layer.
        """
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        # skip pointwise by setting num_outputs=None
        depthwise_conv = slim.separable_convolution2d(inputs,
                                                      num_outputs=None,
                                                      stride=_stride,
                                                      depth_multiplier=1,
                                                      kernel_size=[3, 3],
                                                      scope=sc +
                                                      '/depthwise_conv')
        #padding='VALID')
        #print(depthwise_conv.get_shape())
        #bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')

        pointwise_conv = slim.convolution2d(depthwise_conv,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            scope=sc + '/pointwise_conv',
                                            padding='SAME')
        #print(pointwise_conv.get_shape())
        #bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')
        return pointwise_conv

    with slim.arg_scope(
        [slim.convolution2d, slim.separable_convolution2d],
            weights_initializer=slim.xavier_initializer_conv2d(),
            biases_initializer=slim.init_ops.zeros_initializer(),
            weights_regularizer=slim.l2_regularizer(
                weight_decay),  #l1_regularizer?
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params,
            #activation_fn = prelu,
            #outputs_collections=[end_points_collection],
    ):
        #padding = 'VALID'):
        with slim.arg_scope(
            [slim.batch_norm],
                is_training=training,
        ):

            print(inputs.get_shape())
            net = slim.convolution2d(inputs,
                                     round(16 * width_multiplier), [3, 3],
                                     stride=1,
                                     padding='SAME',
                                     scope='conv_1')
            #net = _depthwise_separable_conv(inputs, 32,  width_multiplier, sc='conv_ds_1')
            print(net.get_shape())

            net = _depthwise_separable_conv(net,
                                            32,
                                            width_multiplier,
                                            downsample=True,
                                            sc='conv_ds_2')
            print(net.get_shape())

            net = _depthwise_separable_conv(net,
                                            64,
                                            width_multiplier,
                                            downsample=True,
                                            sc='conv_ds_3')
            print(net.get_shape())
            net = _depthwise_separable_conv(net,
                                            64,
                                            width_multiplier,
                                            downsample=True,
                                            sc='conv_ds_4')

            print(net.get_shape())
            net = _depthwise_separable_conv(net,
                                            128,
                                            width_multiplier,
                                            downsample=True,
                                            sc='conv_ds_5')
            print(net.get_shape())
            #net = _depthwise_separable_conv(net, 64, width_multiplier, sc='conv_ds_4')
            #print(net.get_shape())
            net = slim.avg_pool2d(net, [3, 3], scope='avg_pool_15')
            print(net.get_shape())
            #net = _depthwise_separable_conv(net, 256, width_multiplier, downsample=True, sc='conv_ds_6')
            #print(net.get_shape())

            fc_flatten = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
            print(fc_flatten.get_shape())

            #fc_flatten = slim.flatten(net)
            #print (fc_flatten.get_shape())
            fc1 = slim.fully_connected(fc_flatten,
                                       num_outputs=256 * width_multiplier,
                                       scope="fc1")
            print(fc1.get_shape())

            #cls_prob = slim.conv2d(net,num_outputs=2,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.softmax)
            cls_prob = slim.fully_connected(fc1,
                                            2,
                                            activation_fn=tf.nn.softmax,
                                            scope='cls_fc')
            print(cls_prob.get_shape())

        #train
        if training:
            cls_loss = cls_ohem(cls_prob, label)
            accuracy = cal_accuracy(cls_prob, label)
            L2_loss = tf.add_n(slim.losses.get_regularization_losses())
            return cls_loss, L2_loss, accuracy
        else:
            return cls_prob  #,bbox_pred,landmark_pred
Пример #31
0
    def siamesenet(self, input, reuse=False, is_training=False):
        with tf.name_scope("model"):
            # 使用原图像尺寸的大小
            with tf.variable_scope("conv1") as scope:
                net = slim.layers.conv2d(
                    input,
                    32, [7, 7],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)

                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_1")
            with tf.variable_scope("conv2") as scope:
                net = slim.layers.conv2d(
                    net,
                    64, [5, 5],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)
                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_2")
            with tf.variable_scope("conv3") as scope:
                net = slim.layers.conv2d(
                    net,
                    128, [3, 3],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)
                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_3")
            # with tf.variable_scope("conv4") as scope:
            #     net = slim.layers.conv2d(net, 256, [3, 3], activation_fn=tf.nn.relu, padding='SAME',
            #                                    weights_initializer=slim.xavier_initializer_conv2d(),
            #                                    scope=scope, reuse=reuse)

            with tf.variable_scope("conv4") as scope:
                net = slim.layers.conv2d(
                    net,
                    256, [3, 3],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)
                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_4")
                output_0 = slim.layers.flatten(net)
            # with tf.variable_scope("conv6") as scope:
            #     net = slim.layers.conv2d(net, 512, [3, 3], activation_fn=tf.nn.relu, padding='SAME',
            #                                    weights_initializer=slim.xavier_initializer_conv2d(),
            #                                    scope=scope, reuse=reuse)

            with tf.variable_scope("conv5") as scope:
                net = slim.layers.conv2d(
                    net,
                    512, [3, 3],
                    activation_fn=tf.nn.relu,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)
                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_5")
                output_1 = slim.flatten(net)

            with tf.variable_scope("conv6") as scope:
                net = slim.layers.conv2d(
                    net,
                    32, [3, 3],
                    activation_fn=None,
                    padding='SAME',
                    weights_initializer=slim.xavier_initializer_conv2d(),
                    scope=scope,
                    reuse=reuse)
                # net = slim.layers.max_pool2d(net, [2, 2], padding='SAME')
                net = tf.nn.max_pool(net, [1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME',
                                     name="conv_6")
            output_2 = slim.layers.flatten(net)

            net = tf.concat([output_0, output_1, output_2], 1, name='concat')

            # add hidden layer1
            hidden_Weights = tf.Variable(
                tf.truncated_normal([11136, 2048], stddev=0.1))
            hidden_biases = tf.Variable(tf.constant(0.1, shape=[2048]))
            net = slim.dropout(net, is_training=True, keep_prob=0.5)
            net = tf.nn.relu(tf.matmul(net, hidden_Weights) + hidden_biases,
                             name="hidden_layer1")

            with tf.variable_scope("dropout") as scope:
                net = slim.layers.dropout(net, keep_prob=0.7, scope=scope)

            # add hidden layer2
            hidden_Weights = tf.Variable(
                tf.truncated_normal([2048, 128], stddev=0.1))
            hidden_biases = tf.Variable(tf.constant(0.1, shape=[128]))
            net = slim.dropout(net, is_training=True, keep_prob=0.5)
            net = tf.nn.relu(tf.matmul(net, hidden_Weights) + hidden_biases,
                             name="final_out")

            tf.nn.l2_normalize(net)

            output = tf.layers.dense(
                net,
                2048,
                activation=tf.nn.relu,
                kernel_initializer=slim.xavier_initializer_conv2d())
            output = tf.layers.dense(
                output,
                128,
                activation=tf.nn.relu,
                kernel_initializer=slim.xavier_initializer_conv2d())
            # with tf.variable_scope("local") as scope:
            #     output = tf.nn.relu(tf.matmul(flatten, variables_dict["hidden_Weights"]) +
            #                         variables_dict["hidden_biases"], name=scope.name)

        return output