Exemple #1
0
    def resnet_layer(self, x, in_channel, out_channel, stride, dim_match,
                     block_name):
        conv_name_base = 'res' + block_name + '_branch'
        bn_name_base = 'bn' + block_name + '_branch'
        prelu_name_base = 'prelu' + block_name + '_branch'

        short_cut = x
        if not dim_match:
            short_cut = common_layers.conv2d(short_cut,
                                             conv_name_base + '1',
                                             filter_size=(1, 1),
                                             in_channels=in_channel,
                                             out_channels=out_channel,
                                             strides=stride,
                                             bias=False)
            short_cut = tf.layers.batch_normalization(short_cut,
                                                      axis=-1,
                                                      momentum=0.9,
                                                      training=self.train,
                                                      name=bn_name_base + '1')

        x = tf.layers.batch_normalization(x,
                                          axis=-1,
                                          momentum=0.9,
                                          training=self.train,
                                          name=bn_name_base + '2a')
        x = common_layers.conv2d(x,
                                 conv_name_base + '2a', (3, 3),
                                 in_channel,
                                 out_channel, [1, 1],
                                 bias=False)
        x = tf.layers.batch_normalization(x,
                                          axis=-1,
                                          momentum=0.9,
                                          training=self.train,
                                          name=bn_name_base + '2b')
        x = self.prelu_layer(x, name=prelu_name_base + '2b')
        x = common_layers.conv2d(x,
                                 conv_name_base + '2b', (3, 3),
                                 out_channel,
                                 out_channel,
                                 stride,
                                 bias=False)
        res = tf.layers.batch_normalization(x,
                                            axis=-1,
                                            momentum=0.9,
                                            training=self.train,
                                            name=bn_name_base + '2c')

        return tf.add(short_cut, res, name='add_' + block_name)
Exemple #2
0
    def se_resnet_layer(self, x, in_channel, out_channel, stride, dim_match,
                        block_name):
        conv_name_base = 'res_' + block_name + '_branch'
        bn_name_base = 'bn_' + block_name + '_branch'
        prelu_name_base = 'prelu_' + block_name + '_branch'
        se_name_base = 'se_' + block_name + '_branch'

        short_cut = x
        if not dim_match:
            short_cut = common_layers.conv2d(short_cut, conv_name_base + '1',
                                             (1, 1), in_channel, out_channel,
                                             stride)
            short_cut = tf.layers.batch_normalization(short_cut,
                                                      axis=-1,
                                                      momentum=0.9,
                                                      training=self.train,
                                                      name=bn_name_base + '1')
        x = tf.layers.batch_normalization(x,
                                          axis=-1,
                                          momentum=0.9,
                                          training=self.train,
                                          name=bn_name_base + '2a')
        x = common_layers.conv2d(x, conv_name_base + '2a', (3, 3), in_channel,
                                 out_channel, [1, 1])
        x = tf.layers.batch_normalization(x,
                                          axis=-1,
                                          momentum=0.9,
                                          training=self.train,
                                          name=bn_name_base + '2b')
        x = self.prelu_layer(x, name=prelu_name_base + '2b')
        x = common_layers.conv2d(x, conv_name_base + '2b', (3, 3), out_channel,
                                 out_channel, stride)
        x = tf.layers.batch_normalization(x,
                                          axis=-1,
                                          momentum=0.9,
                                          training=self.train,
                                          name=bn_name_base + '2c')
        res = self.se_moudle(x, out_channel, 16, name=se_name_base)

        return tf.add(short_cut, res, name='add_' + block_name)
Exemple #3
0
def arcface_loss(embedding,
                 labels,
                 out_num,
                 weights=None,
                 s=64.,
                 m=0.5,
                 limit_to_pi=True):
    '''
  https://github.com/auroua/InsightFace_TF/blob/master/losses/face_losses.py
  :param embedding: the input embedding vectors
  :param labels:  the input labels, the shape should be eg: (batch_size, 1)
  :param s: scalar value default is 64
  :param out_num: output class num
  :param weights: a tf.variable with shape (embedding.shape[-1], out_num)
                  or None to make a new one internally. default = None
  :param m: the margin value, default is 0.5
  :return: the final cacualted output, this output is send into the tf.nn.softmax directly
  '''
    cos_m = math.cos(m)
    sin_m = math.sin(m)
    mm = sin_m * m  # issue 1
    threshold = math.cos(math.pi - m)
    with tf.variable_scope('arcface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        if weights is None:
            weights = tf.get_variable(
                name='weights',
                shape=[embedding.shape[-1].value, out_num],
                initializer=tf.initializer.glorot_unifrom())
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos(theta+m)
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t2 = tf.square(cos_t, name='cos_2')
        sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
        sin_t = tf.sqrt(sin_t2, name='sin_t')
        cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m),
                                 tf.multiply(sin_t, sin_m),
                                 name='cos_mt')

        if limit_to_pi:
            # this condition controls the theta+m should in range [0, pi]
            #      0<=theta+m<=pi
            #     -m<=theta<=pi-m
            cond_v = cos_t - threshold
            cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)

            keep_val = s * (cos_t - mm)
            cos_mt_temp = tf.where(cond, cos_mt, keep_val)
        else:
            cos_mt_temp = cos_mt

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        # mask = tf.squeeze(mask, 1)
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')

        output = tf.add(tf.multiply(s_cos_t, inv_mask),
                        tf.multiply(cos_mt_temp, mask),
                        name='arcface_loss_output')
    return output