Пример #1
0
    def __init__(self,
                 in_channels=3,
                 classes=1000,
                 k=192,
                 l=224,
                 m=256,
                 n=384,
                 is_train=True):
        super(Inceptionv4, self).__init__()
        blocks = []
        blocks.append(Stem(in_channels))
        for _ in range(4):
            blocks.append(InceptionA(384))
        blocks.append(ReductionA(384, k, l, m, n))
        for _ in range(7):
            blocks.append(InceptionB(1024))
        blocks.append(ReductionB(1024))
        for _ in range(3):
            blocks.append(InceptionC(1536))
        self.features = nn.SequentialCell(blocks)

        self.avgpool = P.ReduceMean(keep_dims=False)
        self.softmax = nn.DenseBnAct(1536,
                                     classes,
                                     weight_init="XavierUniform",
                                     has_bias=True,
                                     has_bn=True,
                                     activation="logsoftmax")

        if is_train:
            self.dropout = nn.Dropout(0.20)
        else:
            self.dropout = nn.Dropout(1)
        self.bn0 = nn.BatchNorm1d(1536, eps=0.001, momentum=0.1)
Пример #2
0
 def __init__(self):
     super(BNReshapeDenseBNNet, self).__init__()
     self.batch_norm = bn_with_initialize(2)
     self.reshape = P.Reshape()
     self.cast = P.Cast()
     self.batch_norm2 = nn.BatchNorm1d(512, affine=False)
     self.fc = fc_with_initialize(2 * 32 * 32, 512)
Пример #3
0
 def __init__(self):
     super(BNReshapeDenseBNNet, self).__init__()
     self.batch_norm = bn_with_initialize(2)
     self.reshape = P.Reshape()
     self.batch_norm2 = nn.BatchNorm1d(512, affine=False)
     self.fc = fc_with_initialize(2 * 32 * 32, 512)
     self.loss = SemiAutoOneHotNet(args=Args(), strategy=StrategyBatch())
Пример #4
0
def test_bn1d():
    """ut of nn.BatchNorm1d"""
    bn = nn.BatchNorm1d(3)
    input_data = Tensor(np.random.randint(0, 1, [1, 3]).astype(np.float32))
    output = bn(input_data)
    output_np = output.asnumpy()
    assert isinstance(output_np[0][0], (np.float32, np.float64))
Пример #5
0
 def __init__(self, num_class=10):  # 一共分十类,图片通道数是1
     super(ForwardFashionRegularization, self).__init__()
     self.num_class = num_class
     self.conv1 = nn.Conv2d(1,
                            32,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.conv2 = nn.Conv2d(32,
                            64,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.conv3 = nn.Conv2d(64,
                            128,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.maxpool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.relu = nn.ReLU()
     self.dropout = nn.Dropout()
     self.flatten = nn.Flatten()
     self.fc1 = nn.Dense(3200, 128)
     self.bn = nn.BatchNorm1d(128)
     self.fc2 = nn.Dense(128, self.num_class)
Пример #6
0
def _bn1_kaiming(channel):
    return nn.BatchNorm1d(channel,
                          eps=1e-4,
                          momentum=0.9,
                          gamma_init=1,
                          beta_init=0,
                          moving_mean_init=0,
                          moving_var_init=1,
                          use_batch_statistics=None)
Пример #7
0
    def __init__(self, flat_dim, fc_dim, attri_num_list):
        super(AttriHead, self).__init__()
        self.fc1 = fc_with_initialize(flat_dim, fc_dim)
        self.fc1_relu = P.ReLU()
        self.fc1_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc1 = fc_with_initialize(fc_dim, attri_num_list[0])
        self.attri_fc1_relu = P.ReLU()
        self.attri_bn1 = nn.BatchNorm1d(attri_num_list[0], affine=False)
        self.softmax1 = P.Softmax()

        self.fc2 = fc_with_initialize(flat_dim, fc_dim)
        self.fc2_relu = P.ReLU()
        self.fc2_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc2 = fc_with_initialize(fc_dim, attri_num_list[1])
        self.attri_fc2_relu = P.ReLU()
        self.attri_bn2 = nn.BatchNorm1d(attri_num_list[1], affine=False)
        self.softmax2 = P.Softmax()

        self.fc3 = fc_with_initialize(flat_dim, fc_dim)
        self.fc3_relu = P.ReLU()
        self.fc3_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc3 = fc_with_initialize(fc_dim, attri_num_list[2])
        self.attri_fc3_relu = P.ReLU()
        self.attri_bn3 = nn.BatchNorm1d(attri_num_list[2], affine=False)
        self.softmax3 = P.Softmax()
Пример #8
0
 def __init__(self, mul_size, test_size, strategy=None, strategy2=None):
     super().__init__()
     mul_np = np.full(mul_size, 0.5, dtype=np.float32)
     floordiv_np = np.full(test_size, 0.1, dtype=np.float32)
     self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
     self.floordiv_weight = Parameter(Tensor(floordiv_np),
                                      name="floordiv_weight")
     self.mul = TwoInputBpropOperator()
     self.floor_div = P.FloorDiv()
     self.bn = nn.BatchNorm1d(num_features=96)
     if strategy is not None:
         self.mul.op.shard(strategy2)
         self.mul.bp.shard(strategy2)
         self.floor_div.shard(strategy)
Пример #9
0
    def __init__(self, num_layers=36, feature_dim=128, shape=(96, 64)):
        super(SphereNet_float32, self).__init__()
        assert num_layers in [12, 20, 36, 64], 'SphereNet num_layers should be 12, 20 or 64'
        if num_layers == 12:
            layers = [1, 1, 1, 1]
            filter_list = [3, 16, 32, 64, 128]
            fc_size = 128 * 6 * 4
        elif num_layers == 20:
            layers = [1, 2, 4, 1]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        elif num_layers == 36:
            layers = [2, 4, 4, 2]
            filter_list = [3, 32, 64, 128, 256]
            fc_size = 256 * 6 * 4
        elif num_layers == 64:
            layers = [3, 7, 16, 3]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        else:
            raise ValueError('sphere' + str(num_layers) + " IS NOT SUPPORTED! (sphere20 or sphere64)")
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        block = BaseBlock

        self.layer1 = MakeLayer(block, filter_list[0], filter_list[1], layers[0], stride=2)
        self.layer2 = MakeLayer(block, filter_list[1], filter_list[2], layers[1], stride=2)
        self.layer3 = MakeLayer(block, filter_list[2], filter_list[3], layers[2], stride=2)
        self.layer4 = MakeLayer(block, filter_list[3], filter_list[4], layers[3], stride=2)

        self.fc = fc_with_initialize(fc_size, feature_dim)
        self.last_bn = nn.BatchNorm1d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.last_bn_sub = nn.BatchNorm2d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.cast = P.Cast()
        self.l2norm = P.L2Normalize(axis=1)

        for _, cell in self.cells_and_names():
            if isinstance(cell, (nn.Conv2d, nn.Dense)):
                if cell.bias is not None:
                    cell.weight.set_data(initializer(me_init.ReidKaimingUniform(a=math.sqrt(5), mode='fan_out'),
                                                     cell.weight.shape))
                    cell.bias.set_data(initializer('zeros', cell.bias.shape))
                else:
                    cell.weight.set_data(initializer(me_init.ReidXavierUniform(), cell.weight.shape))
        self.device_target = context.get_context('device_target')
Пример #10
0
    def __init__(self,
                 low_dim,
                 class_num=200,
                 drop=0.2,
                 part=0,
                 alpha=0.2,
                 nheads=4,
                 arch="resnet50"):
        super(embed_net, self).__init__()
        # print("class_num is :", class_num)
        self.thermal_module = thermal_module(arch=arch)
        self.visible_module = visible_module(arch=arch)
        self.base_resnet = base_resnet(arch=arch)
        pool_dim = 2048
        self.dropout = drop
        self.part = part

        self.l2norm = Normalize(2)
        self.bottleneck = nn.BatchNorm1d(num_features=pool_dim)
        self.bottleneck.requires_grad = False  # Maybe problematic? Original in PyTorch bottleneck.bias.requires_grad(False)

        self.classifier = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier1 = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier2 = nn.Dense(pool_dim, class_num, has_bias=False)

        # TODO:add weights initialization module
        # self.bottleneck.apply(weights_init_kaiming)
        # self.classifier.apply(weights_init_classifier)
        # self.classifier1.apply(weights_init_classifier)
        # self.classifier2.apply(weights_init_classifier)


        self.classifier.weight.set_data(
           weight_init.initializer(weight_init.Normal(sigma=0.001), \
           self.classifier.weight.shape, self.classifier.weight.dtype))

        self.avgpool = P.ReduceMean(keep_dims=True)
        if self.part > 0:
            self.wpa = IWPA(pool_dim, self.part)
        else:
            self.wpa = IWPA(pool_dim, 3)
Пример #11
0
    def __init__(self, emb_size, args=None):
        super(Head0, self).__init__()
        if args.pre_bn == 1:
            self.bn1 = bn_with_initialize(512, use_inference=args.inference)
        else:
            self.bn1 = Cut()

        if args is not None:
            if args.use_drop == 1:
                self.drop = nn.Dropout(keep_prob=0.4)
            else:
                self.drop = Cut()
        else:
            self.drop = nn.Dropout(keep_prob=0.4)

        self.fc1 = fc_with_initialize(512 * 7 * 7, emb_size)
        if args.inference == 1:
            self.bn2 = Cut()
        else:
            self.bn2 = nn.BatchNorm1d(
                emb_size, affine=False,
                momentum=0.9).add_flags_recursive(fp32=True)
        self.reshape = P.Reshape()
        self.shape = P.Shape()
Пример #12
0
 def __init__(self):
     super(BatchNormReshapeNet, self).__init__()
     self.vd = P._VirtualDataset()
     self.batch_norm = nn.BatchNorm1d(512, affine=False)
     self.reshape = P.Reshape()
     self.prelu = nn.PReLU(channel=256)
Пример #13
0
 def __init__(self):
     super(Blockcell, self).__init__()
     self.bn = nn.BatchNorm1d(64, momentum=0.9)
Пример #14
0
    def __init__(self,
                 batch_size,
                 input_size,
                 hidden_size,
                 num_layers,
                 bidirectional=False,
                 batch_norm=False,
                 rnn_type='LSTM'):
        super(BatchRNN, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        self.bidirectional = bidirectional
        self.has_bias = True
        self.is_batch_norm = batch_norm
        self.num_directions = 2 if bidirectional else 1
        self.reshape_op = P.Reshape()
        self.shape_op = P.Shape()
        self.sum_op = P.ReduceSum()

        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size)
        layers = []

        for i in range(num_layers):
            layers.append(
                nn.LSTMCell(input_size=input_size_list[i],
                            hidden_size=hidden_size,
                            bidirectional=bidirectional,
                            has_bias=self.has_bias))

        weights = []
        for i in range(num_layers):
            weight_size = (input_size_list[i] +
                           hidden_size) * hidden_size * self.num_directions * 4
            if self.has_bias:
                bias_size = self.num_directions * hidden_size * 4 * 2
                weight_size = weight_size + bias_size

            stdv = 1 / math.sqrt(hidden_size)
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)

            weights.append(
                Parameter(initializer(Tensor(w_np), w_np.shape),
                          name="weight" + str(i)))

        self.h, self.c = self.stack_lstm_default_state(
            batch_size,
            hidden_size,
            num_layers=num_layers,
            bidirectional=bidirectional)
        self.lstms = layers
        self.weight = ParameterTuple(tuple(weights))

        if batch_norm:
            batch_norm_layer = []
            for i in range(num_layers - 1):
                batch_norm_layer.append(nn.BatchNorm1d(hidden_size))
            self.batch_norm_list = batch_norm_layer
def bn_with_initialize(out_channels):
    bn = nn.BatchNorm1d(out_channels, momentum=0.1, eps=1e-5)
    return bn
Пример #16
0
    def __init__(self,
                 in_channels,
                 part=3,
                 inter_channels=None,
                 out_channels=None):
        super(IWPA, self).__init__()

        self.in_channels = in_channels
        self.inter_channels = inter_channels
        self.out_channels = out_channels
        self.part = part
        self.l2norm = L2Normalize()
        self.softmax = nn.Softmax(axis=-1)

        if self.inter_channels is None:
            self.inter_channels = in_channels

        if self.out_channels is None:
            self.out_channels = in_channels

        self.fc1 = nn.Conv2d(in_channels=self.in_channels,
                             out_channels=self.inter_channels,
                             kernel_size=1,
                             stride=1,
                             padding=0)

        self.fc2 = nn.Conv2d(in_channels=self.in_channels,
                             out_channels=self.inter_channels,
                             kernel_size=1,
                             stride=1,
                             padding=0)

        self.fc3 = nn.Conv2d(in_channels=self.in_channels,
                             out_channels=self.inter_channels,
                             kernel_size=1,
                             stride=1,
                             padding=0)

        self.W = nn.SequentialCell(
            nn.Conv2d(in_channels=self.inter_channels,
                      out_channels=self.out_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            nn.BatchNorm2d(self.out_channels),
        )
        # self.W[1].weight.set_data(Constant(0.0))
        # self.w[2].bias.set_data(Constant(0.0))

        self.bottleneck = nn.BatchNorm1d(in_channels)
        self.bottleneck.requires_grad = False  # no shift

        # self.bottleneck.weight.set_data(Normal(sigma=0.01))
        #In original PyTorch code:nn.init.normal_(self.bottleneck.weight.data, 1.0, 0.01)

        # weighting vector of the part features
        self.gate = ms.Parameter(ms.Tensor(np.ones(self.part),
                                           dtype=ms.float32),
                                 name="w",
                                 requires_grad=True)
        self.gate.set_data(
            weight_init.initializer(Constant(1 / self.part), self.gate.shape,
                                    self.gate.dtype))