Beispiel #1
0
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.

  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.

  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    #return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
    return layers.pool(inputs, [2, 2, 2], stride=factor, scope=scope, data_format = 'NDHWC', pooling_type='MAX')
Beispiel #2
0
def teacher(input_images, 
            keep_prob,
            is_training=True,
            weight_decay=0.00004,
            batch_norm_decay=0.997,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):     
        net, endpoints = mobilenet_v2(inputs=input_images,
                                num_classes=FLAGS.num_class,
                                is_training=True,
                                spatial_squeeze=True,
                                scope='mobilenet_v2')
        
        base_var_list = slim.get_model_variables()
        for _ in range(2):
             base_var_list.pop()

        # feature & attention
        t_g0 = endpoints["InvertedResidual_{}_{}".format(64, 3)]
        t_at0 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g0), -1), axis=0, name='t_at0')
        t_g1 = endpoints["InvertedResidual_{}_{}".format(96, 2)]
        t_at1 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g1), -1), axis=0, name='t_at1')
        part_feature = endpoints["InvertedResidual_{}_{}".format(160, 2)]
        t_at2 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(part_feature), -1), axis=0, name='t_at3')
        t_p_o = endpoints["InvertedResidual_{}_{}".format(320, 0)]
        t_at3 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_p_o), -1), axis=0, name='t_at4')
        object_feature = endpoints["Conv2d_8"]
        t_at4 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(object_feature), -1), axis=0, name='t_at5')
       
        t_g = (t_g0, t_g1, part_feature, object_feature)
        t_at = (t_at0, t_at1, t_at2, t_at3, t_at4)
        
        fc_obj = slim.max_pool2d(object_feature, (6, 8), scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }
        
        fc_obj = slim.conv2d(fc_obj,
                            M,
                            [1, 1],
                            activation_fn=None,    
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        fc_part = slim.conv2d(part_feature,
                            M * k,          
                            [1, 1],         
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,                               
                            normalizer_params=batch_norm_params,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay)
                            )

        fc_part = slim.max_pool2d(fc_part, (6, 8), scope="GMP2")
        ft_list = tf.split(fc_part,
                        num_or_size_splits=FLAGS.num_class,
                        axis=-1)            
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft,
                                [1, 10],
                                "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1) 

        fc_part = slim.conv2d(fc_part,
                            FLAGS.num_class,
                            [1, 1],
                            activation_fn=None,                         
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        t_var_list = slim.get_model_variables()
    return t_g, t_at, fc_obj, fc_part, fc_ccp, base_var_list, t_var_list
Beispiel #3
0
def student(input_images, 
            keep_prob,
            is_training=True,
            weight_decay=0.00004,
            batch_norm_decay=0.997,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Student_model"):
        net, endpoints = studentnet_v2.studentnet_v2(inputs=input_images,
                                num_classes=FLAGS.num_class,
                                is_training=True,
                                scope='student')
        
        # feature & attention
        s_g0 = endpoints["InvertedResidual_{}_{}".format(64, 1)]
        s_at0 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(s_g0), -1), axis=0, name='s_at0')
        s_g1 = endpoints["InvertedResidual_{}_{}".format(96, 1)]
        s_at1 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(s_g1), -1), axis=0, name='s_at1')
        s_g2 = endpoints["InvertedResidual_{}_{}".format(160, 1)]
        s_at2 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(s_g2), -1), axis=0, name='s_at2')
        s_g3 = endpoints["InvertedResidual_{}_{}".format(256, 0)]
        s_at3 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(s_g3), -1), axis=0, name='s_at4')
        s_g4 = endpoints["Conv2d_8"]
        s_at4 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(s_g4), -1), axis=0, name='s_at5')
        
        s_g = (s_g0, s_g1, s_g2, s_g4)
        s_at = (s_at0, s_at1, s_at2, s_at3, s_at4)
    
        s_part_feature = s_g2
        s_object_feature = s_g4
        
        base_var_list = slim.get_model_variables('Student_model/student')

        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }   
        # obj 
        s_fc_obj = slim.max_pool2d(s_object_feature, (6, 8), scope="s_GMP1")       
        s_fc_obj = slim.conv2d(s_fc_obj,
                            num_outputs=M,
                            kernel_size=[1, 1],
                            activation_fn=None,                       
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope='s_fc_obj')
        s_fc_obj = tf.nn.dropout(s_fc_obj, keep_prob=keep_prob)
        s_fc_obj = layers.flatten(s_fc_obj)

        
        s_fc_part = slim.conv2d(s_part_feature,
                            M * k,                
                            [1, 1],               
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,                               
                            normalizer_params=batch_norm_params,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay)
                            )
        s_fc_part = slim.max_pool2d(s_fc_part, (6, 8), scope="s_GMP2")
        s_ft_list = tf.split(s_fc_part,
                        num_or_size_splits=FLAGS.num_class,
                        axis=-1)                  
        s_cls_list = []
        for i in range(M):
            s_ft = tf.transpose(s_ft_list[i], [0, 1, 3, 2])
            s_cls = layers_lib.pool(s_ft,
                                [1, 10],
                                "AVG")
            s_cls = layers.flatten(s_cls)
            s_cls_list.append(s_cls)
        s_fc_ccp = tf.concat(s_cls_list, axis=-1)    

        s_fc_part = slim.conv2d(s_fc_part,
                            FLAGS.num_class,
                            [1, 1],
                            activation_fn=None,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope="s_fc_part")
        s_fc_part = tf.nn.dropout(s_fc_part, keep_prob=keep_prob)
        s_fc_part = slim.flatten(s_fc_part)

        s_fc_list = []
        s_var_list = slim.get_model_variables('Student_model')
        for var in s_var_list:
            if var not in base_var_list:
                s_fc_list.append(var)
        # print('base_var_list',base_var_list)
        # exit()
    return s_g, s_at, s_fc_obj, s_fc_part, s_fc_ccp, base_var_list, s_var_list
def teacher(input_images, 
            keep_prob,
            lambda_decay=FLAGS.lambda_decay,
            is_training=True,
            weight_decay=0.00004,
            batch_norm_decay=0.99,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):  
        net, endpoints = resnet.resnet_v2(inputs=input_images,
                                lambda_decay=lambda_decay,
                                num_classes=FLAGS.num_class,
                                is_training=True,                               
                                scope='resnet_v2_50')   
        # co_trained layers
        var_scope = 'Teacher_model/resnet_v2_50/'
        co_list_0 = slim.get_model_variables(var_scope + 'Conv2d_0')
        # co_list_1 = slim.get_model_variables(var_scope +'InvertedResidual_16_')
        # co_list_2 = slim.get_model_variables(var_scope +'InvertedResidual_24_')
        t_co_list = co_list_0 
        
        base_var_list = slim.get_variables()
        # for _ in range(2):
        #      base_var_list.pop()
        lambda_c_list = slim.get_variables_by_name('lambda_c')       
        lambda_b_list = slim.get_variables_by_name('lambda_b')
        t_lambda_list = lambda_c_list + lambda_b_list
        # print(lambda_b_list)
        # exit()
        t_net_var_list =[]
        for v in base_var_list:
            if v not in t_co_list and v not in t_lambda_list:
                t_net_var_list.append(v)
        # feature & attention
        t_g0 = endpoints["InvertedResidual_{}_{}".format(256, 2)]
        t_at0 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g0), -1), axis=0, name='t_at0')
        t_g1 = endpoints["InvertedResidual_{}_{}".format(512, 3)]
        t_at1 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g1), -1), axis=0, name='t_at1')
        part_feature = endpoints["InvertedResidual_{}_{}".format(1024, 5)]
        t_at2 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(part_feature), -1), axis=0, name='t_at2')
        object_feature = endpoints["InvertedResidual_{}_{}".format(2048, 2)]
        t_at3 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(object_feature), -1), axis=0, name='t_at3')
        # print(t_at1.get_shape().as_list())
        # exit()
        t_g = (t_g0, t_g1, part_feature, object_feature)
        t_at = (t_at0, t_at1, t_at2, t_at3)
        
        fc_obj = slim.max_pool2d(object_feature, (6, 8), scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }
        
        fc_obj = slim.conv2d(fc_obj,
                            M,
                            [1, 1],
                            activation_fn=None,    
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        # 
        fc_part = slim.conv2d(part_feature,
                            M * k,          #卷积核个数
                            [1, 1],         #卷积核高宽
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,                               # 标准化器设置为BN
                            normalizer_params=batch_norm_params,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay)
                            )
        # print('part',fc_part.get_shape())
        fc_part = slim.max_pool2d(fc_part, (12, 16), scope="GMP2")
        ft_list = tf.split(fc_part,
                        num_or_size_splits=FLAGS.num_class,
                        axis=-1)            #最后一维度(C)
        
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft,
                                [1, 10],
                                "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1) #cross_channel_pooling (N, M)

        fc_part = slim.conv2d(fc_part,
                            FLAGS.num_class,
                            [1, 1],
                            activation_fn=None,                         
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        
        t_var_list = slim.get_model_variables()
        t_fc_var_list = []
        for var in t_var_list:
            if var not in base_var_list:
                t_fc_var_list.append(var)
                
    return t_g, t_at, fc_obj, fc_part, fc_ccp, t_co_list, t_net_var_list, t_fc_var_list, t_lambda_list, t_var_list
Beispiel #5
0
def dfb(input_images, 
            keep_prob,
            is_training=True,
            weight_decay=5e-5,
            batch_norm_decay=0.99,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):     
        net, endpoints = resnet_v2(inputs=input_images,
                                num_classes=M,
                                is_training=True,
                                scope='resnet_v2')
        
        base_var_list = slim.get_model_variables('Teacher_model/resnet_v2')

        part_feature = endpoints["InvertedResidual_{}_{}".format(1024, 3)]
        object_feature = endpoints["InvertedResidual_{}_{}".format(1024, 5)]

        object_feature_h = object_feature.get_shape().as_list()[1]
        object_feature_w = object_feature.get_shape().as_list()[2]
        fc_obj = slim.max_pool2d(object_feature, (object_feature_h, object_feature_w), scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }

        fc_obj = slim.conv2d(fc_obj,
                            M,
                            [1, 1],
                            activation_fn=None,    
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        fc_part = slim.conv2d(part_feature,
                            M * k,          #卷积核个数
                            [1, 1],         #卷积核高宽
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,                               # 标准化器设置为BN
                            normalizer_params=batch_norm_params,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay)
                            )
        fc_part_h = fc_part.get_shape().as_list()[1]
        fc_part_w = fc_part.get_shape().as_list()[2]
        fc_part = slim.max_pool2d(fc_part, (fc_part_h, fc_part_w), scope="GMP2")
        ft_list = tf.split(fc_part,
                        num_or_size_splits=M,
                        axis=-1)            #最后一维度(C)
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft,
                                [1, k],
                                "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1) #cross_channel_pooling (N, M)

        fc_part = slim.conv2d(fc_part,
                            M,
                            [1, 1],
                            activation_fn=None,                         
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        t_var_list = slim.get_model_variables()
    return fc_obj, fc_part, fc_ccp, base_var_list, t_var_list
Beispiel #6
0
def teacher(input_images,
            keep_prob,
            is_training=True,
            weight_decay=5e-5,
            batch_norm_decay=0.99,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):
        net, endpoints = resnet_v2(inputs=input_images,
                                   num_classes=M,
                                   is_training=True,
                                   scope='resnet_v2')
        # co_trained layers
        var_scope = 'Teacher_model/resnet_v2/'
        co_list_0 = slim.get_model_variables(var_scope + 'Conv2d_0')
        # co_list_1 = slim.get_model_variables(var_scope +'InvertedResidual_16_0/conv')
        # co_list_2 = slim.get_model_variables(var_scope +'InvertedResidual_24_')
        t_co_list = co_list_0

        base_var_list = slim.get_model_variables('Teacher_model/resnet_v2')

        # feature & attention
        t_g0 = endpoints["InvertedResidual_{}_{}".format(256, 2)]
        t_at0 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g0), -1),
                                   axis=0,
                                   name='t_at0')
        t_g1 = endpoints["InvertedResidual_{}_{}".format(512, 3)]
        t_at1 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g1), -1),
                                   axis=0,
                                   name='t_at1')
        part_feature = endpoints["InvertedResidual_{}_{}".format(1024, 3)]
        t_at2 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(part_feature), -1),
                                   axis=0,
                                   name='t_at2')
        t_g3 = endpoints["InvertedResidual_{}_{}".format(1024, 4)]
        t_at3 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g3), -1),
                                   axis=0,
                                   name='t_at3')
        object_feature = endpoints["InvertedResidual_{}_{}".format(1024, 5)]
        t_at4 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(object_feature),
                                                 -1),
                                   axis=0,
                                   name='t_at4')

        t_g = (t_g0, t_g1, part_feature, object_feature)
        t_at = (t_at0, t_at1, t_at2, t_at3, t_at4)

        object_feature_h = object_feature.get_shape().as_list()[1]
        object_feature_w = object_feature.get_shape().as_list()[2]
        fc_obj = slim.max_pool2d(object_feature,
                                 (object_feature_h, object_feature_w),
                                 scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }

        fc_obj = slim.conv2d(
            fc_obj,
            M, [1, 1],
            activation_fn=None,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        fc_part = slim.conv2d(
            part_feature,
            M * k,  #卷积核个数
            [1, 1],  #卷积核高宽
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,  # 标准化器设置为BN
            normalizer_params=batch_norm_params,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
        fc_part_h = fc_part.get_shape().as_list()[1]
        fc_part_w = fc_part.get_shape().as_list()[2]
        fc_part = slim.max_pool2d(fc_part, (fc_part_h, fc_part_w),
                                  scope="GMP2")
        ft_list = tf.split(fc_part, num_or_size_splits=M, axis=-1)  #最后一维度(C)
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft, [1, k], "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1)  #cross_channel_pooling (N, M)

        fc_part = slim.conv2d(
            fc_part,
            M, [1, 1],
            activation_fn=None,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        t_var_list = slim.get_model_variables()
    return t_co_list, t_g, t_at, fc_obj, fc_part, fc_ccp, base_var_list, t_var_list