Example #1
0
    def __init__(self, block, num_classes=100, batch_size=32):
        """init"""
        super(ResNet, self).__init__()
        self.batch_size = batch_size
        self.num_classes = num_classes

        self.conv1 = conv7x7(3, 64, stride=2, padding=0)

        self.bn1 = bn_with_initialize(64)
        self.relu = ops.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = MakeLayer0(block,
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer1(block,
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer2(block,
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.pool = ops.ReduceMean(keep_dims=True)
        self.squeeze = ops.Squeeze(axis=(2, 3))
        self.fc = fc_with_initialize(512 * block.expansion, num_classes)
Example #2
0
    def __init__(self, block, num_classes=100, batch_size=32):
        """init"""
        super(ResNet, self).__init__()
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.head = Head()

        self.layer1 = MakeLayer0(block,
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer1(block,
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer2(block,
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.pool = ops.ReduceMean(keep_dims=True)
        self.squeeze = ops.Squeeze(axis=(2, 3))
        self.fc = fc_with_initialize(512 * block.expansion, num_classes)

        # pipeline parallel config
        self.head.pipeline_stage = 0
        self.layer1.pipeline_stage = 0
        self.layer2.pipeline_stage = 0
        self.layer3.pipeline_stage = 1
        self.layer4.pipeline_stage = 1
        self.fc.pipeline_stage = 1
Example #3
0
 def __init__(self):
     super(CrossEntropyLoss, self).__init__()
     self.sm_scalar = ops.ScalarSummary()
     self.cross_entropy = ops.SoftmaxCrossEntropyWithLogits()
     self.mean = ops.ReduceMean()
     self.one_hot = ops.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
Example #4
0
 def __init__(self, reduction="mean"):
     super(CrossEntropyLoss, self).__init__()
     self.cross_entropy = P.SoftmaxCrossEntropyWithLogits()
     if reduction == "sum":
         self.reduction = P.ReduceSum()
     if reduction == "mean":
         self.reduction = P.ReduceMean()
     self.one_hot = P.OneHot()
     self.one = Tensor(1.0, mstype.float32)
     self.zero = Tensor(0.0, mstype.float32)
 def __init__(self, config):
     super(BertPretrainingLoss, self).__init__()
     self.vocab_size = config.vocab_size
     self.onehot = ops.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.reduce_sum = ops.ReduceSum()
     self.reduce_mean = ops.ReduceMean()
     self.reshape = ops.Reshape()
     self.last_idx = (-1,)
     self.neg = ops.Neg()
     self.cast = ops.Cast()
Example #6
0
 def __init__(self, model, config, is_training, dropout_prob=0.0, use_one_hot_embeddings=False):
     super(BertPoetry, self).__init__(auto_prefix=False)
     self.num_tokens = 3191
     self.poetry = model
     self.onehot = ops.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.reduce_sum = ops.ReduceSum()
     self.reduce_mean = ops.ReduceMean()
     self.reshape = ops.Reshape()
     self.neg = ops.Neg()
     self.cast = ops.Cast()
     self.last_idx = (-1,)
     self.log = ops.Log()
     self.max = ops.ArgMaxWithValue(axis=-1)
Example #7
0
 def __init__(self, sparse=False):
     super(SoftmaxCrossEntropyExpand, self).__init__()
     self.exp = ops.Exp()
     self.sum = ops.ReduceSum(keep_dims=True)
     self.onehot = ops.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.div = ops.RealDiv()
     self.log = ops.Log()
     self.sum_cross_entropy = ops.ReduceSum(keep_dims=False)
     self.mul = ops.Mul()
     self.mul2 = ops.Mul()
     self.mean = ops.ReduceMean(keep_dims=False)
     self.sparse = sparse
     self.max = ops.ReduceMax(keep_dims=True)
     self.sub = ops.Sub()
Example #8
0
    def __init__(self, reduction='mean'):
        super(BCEWithLogits, self).__init__()
        if reduction is None:
            reduction = 'none'
        if reduction not in ('mean', 'sum', 'none'):
            raise ValueError(
                f"reduction method for {reduction.lower()} is not supported")

        self.loss = ops.SigmoidCrossEntropyWithLogits()
        self.reduce = False
        if reduction == 'sum':
            self.reduce_mode = ops.ReduceSum()
            self.reduce = True
        elif reduction == 'mean':
            self.reduce_mode = ops.ReduceMean()
            self.reduce = True
Example #9
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides=(1, 2, 2, 2),
                 num_classes=100):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError("the length of "
                             "layer_num, inchannel, outchannel list must be 4!")

        self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _fused_bn(64)
        self.relu = ops.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.mean = ops.ReduceMean(keep_dims=True)
        self.end_point = nn.Dense(out_channels[3], num_classes, has_bias=True,
                                  weight_init=dense_weight_variable())
        self.squeeze = ops.Squeeze()
        self.cast = ops.Cast()
Example #10
0
    def __init__(self,
                 low_dim,
                 class_num=200,
                 drop=0.2,
                 part=0,
                 alpha=0.2,
                 nheads=4,
                 arch="resnet50"):
        super(embed_net, self).__init__()
        # print("class_num is :", class_num)
        self.thermal_module = thermal_module(arch=arch)
        self.visible_module = visible_module(arch=arch)
        self.base_resnet = base_resnet(arch=arch)
        pool_dim = 2048
        self.dropout = drop
        self.part = part

        self.l2norm = Normalize(2)
        self.bottleneck = nn.BatchNorm1d(num_features=pool_dim)
        self.bottleneck.requires_grad = False  # Maybe problematic? Original in PyTorch bottleneck.bias.requires_grad(False)

        self.classifier = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier1 = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier2 = nn.Dense(pool_dim, class_num, has_bias=False)

        # TODO:add weights initialization module
        # self.bottleneck.apply(weights_init_kaiming)
        # self.classifier.apply(weights_init_classifier)
        # self.classifier1.apply(weights_init_classifier)
        # self.classifier2.apply(weights_init_classifier)


        self.classifier.weight.set_data(
           weight_init.initializer(weight_init.Normal(sigma=0.001), \
           self.classifier.weight.shape, self.classifier.weight.dtype))

        self.avgpool = P.ReduceMean(keep_dims=True)
        if self.part > 0:
            self.wpa = IWPA(pool_dim, self.part)
        else:
            self.wpa = IWPA(pool_dim, 3)
Example #11
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 num_classes):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")

        self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _bn(64)
        self.relu = ops.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.mean = ops.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3], num_classes)
Example #12
0
    def __init__(
            self, 
            hidden_size,
            vocab_size, 
            sample_softmax, 
            num_sampled, 
            num_true=1,
            seed=0,
            training=True):
        super().__init__()
        self.training = training
        self.sample_softmax = sample_softmax
        self.hidden_size = hidden_size

        self.weight = Parameter(initializer(Normal(1.0 / np.sqrt(hidden_size)), (vocab_size, hidden_size), mindspore.float32))
        self.bias = Parameter(initializer(Zero(), (vocab_size), mindspore.float32))

        self.sampled_softmax_loss = SampledSoftmaxLoss(num_sampled, vocab_size, num_true, seed=seed)
        self.sparse_softmax_cross_entropy_with_logits = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
        self.matmul = nn.MatMul(False, True)
        self.reduce_mean = P.ReduceMean()
 def __init__(self, in_c, out_c):
     super().__init__()
     self.relu = nn.ReLU()
     self.bn1 = nn.BatchNorm2d(num_features=in_c,
                               gamma_init='ones',
                               beta_init='zeros',
                               moving_mean_init='zeros',
                               moving_var_init='ones')
     self.bn2 = nn.BatchNorm2d(num_features=out_c,
                               gamma_init='ones',
                               beta_init='zeros',
                               moving_mean_init='zeros',
                               moving_var_init='ones')
     self.conv = nn.Conv2d(in_channels=in_c,
                           out_channels=out_c,
                           kernel_size=3,
                           stride=1,
                           has_bias=True,
                           pad_mode='same',
                           weight_init='ones',
                           bias_init='ones')
     self.mean = ops.ReduceMean(keep_dims=False)
Example #14
0
def average_gradients(tower_grads):
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        g0, v0 = grad_and_vars[0]
        if g0 is None:
            average_grads.append((g0, v0))
            continue
        # the gradient is type IndexedSlices
        # to do
        # a normal tensor can just do a simple  average
        grads = []
        for g, v in grad_and_vars:
            expand_g = P.ExpandDims()(g, 0)
            grads.append(expand_g)

        # Average over the 'tower' dimension
        grad = P.Concat(0)(grads)
        grad = P.ReduceMean(grad, 0)

        v = grad_and_vars[0][1]
        grad_and_vars = (grad, v)
        average_grads.append(grad_and_vars)
    assert len(average_grads) == len(list(zip(*tower_grads)))
    return average_grads
Example #15
0
 def __init__(self):
     """edsr"""
     super().__init__()
     self.ReduceMean = ops.ReduceMean(keep_dims=True)