def __init__(self,
                 embbeding_size=128,
                 classnum=270762,
                 s=32,
                 a=1.0,
                 m=0.3,
                 b=0.2):
        super(CombineMarginFCFp16, self).__init__()
        weight_shape = [classnum, embbeding_size]
        weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
        self.weight = Parameter(weight_init, name='weight')

        self.m = m
        self.s = s
        self.a = a
        self.b = b
        self.m_const = Tensor(self.m, dtype=mstype.float16)
        self.a_const = Tensor(self.a, dtype=mstype.float16)
        self.b_const = Tensor(self.b, dtype=mstype.float16)
        self.s_const = Tensor(self.s, dtype=mstype.float16)
        self.m_const_zero = Tensor(0, dtype=mstype.float16)
        self.a_const_one = Tensor(1, dtype=mstype.float16)
        self.normalize = P.L2Normalize(axis=1)
        self.fc = P.MatMul(transpose_b=True)

        self.onehot = P.OneHot()
        self.transpose = P.Transpose()
        self.acos = P.ACos()
        self.cos = P.Cos()
        self.cast = P.Cast()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
Exemple #2
0
    def __init__(self, num_layers=36, feature_dim=128, shape=(96, 64)):
        super(SphereNet_float32, self).__init__()
        assert num_layers in [12, 20, 36, 64], 'SphereNet num_layers should be 12, 20 or 64'
        if num_layers == 12:
            layers = [1, 1, 1, 1]
            filter_list = [3, 16, 32, 64, 128]
            fc_size = 128 * 6 * 4
        elif num_layers == 20:
            layers = [1, 2, 4, 1]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        elif num_layers == 36:
            layers = [2, 4, 4, 2]
            filter_list = [3, 32, 64, 128, 256]
            fc_size = 256 * 6 * 4
        elif num_layers == 64:
            layers = [3, 7, 16, 3]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        else:
            raise ValueError('sphere' + str(num_layers) + " IS NOT SUPPORTED! (sphere20 or sphere64)")
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        block = BaseBlock

        self.layer1 = MakeLayer(block, filter_list[0], filter_list[1], layers[0], stride=2)
        self.layer2 = MakeLayer(block, filter_list[1], filter_list[2], layers[1], stride=2)
        self.layer3 = MakeLayer(block, filter_list[2], filter_list[3], layers[2], stride=2)
        self.layer4 = MakeLayer(block, filter_list[3], filter_list[4], layers[3], stride=2)

        self.fc = fc_with_initialize(fc_size, feature_dim)
        self.last_bn = nn.BatchNorm1d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.last_bn_sub = nn.BatchNorm2d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.cast = P.Cast()
        self.l2norm = P.L2Normalize(axis=1)

        for _, cell in self.cells_and_names():
            if isinstance(cell, (nn.Conv2d, nn.Dense)):
                if cell.bias is not None:
                    cell.weight.set_data(initializer(me_init.ReidKaimingUniform(a=math.sqrt(5), mode='fan_out'),
                                                     cell.weight.shape))
                    cell.bias.set_data(initializer('zeros', cell.bias.shape))
                else:
                    cell.weight.set_data(initializer(me_init.ReidXavierUniform(), cell.weight.shape))
        self.device_target = context.get_context('device_target')
Exemple #3
0
 def __init__(self, args):
     super(CombineMarginFC, self).__init__()
     weight_shape = [args.num_classes, args.emb_size]
     weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
     self.weight = Parameter(weight_init, name='weight')
     self.m = args.margin_m
     self.s = args.margin_s
     self.a = args.margin_a
     self.b = args.margin_b
     self.m_const = Tensor(self.m, dtype=mstype.float16)
     self.a_const = Tensor(self.a, dtype=mstype.float16)
     self.b_const = Tensor(self.b, dtype=mstype.float16)
     self.s_const = Tensor(self.s, dtype=mstype.float16)
     self.m_const_zero = Tensor(0, dtype=mstype.float16)
     self.a_const_one = Tensor(1, dtype=mstype.float16)
     self.normalize = P.L2Normalize(axis=1)
     self.fc = P.MatMul(transpose_b=True)
     self.onehot = P.OneHot()
     self.transpose = P.Transpose()
     self.acos = P.ACos()
     self.cos = P.Cos()
     self.cast = P.Cast()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)