Exemple #1
0
    def __init__(self, config, loc_cls_shared_addition=False):
        super(WeightSharedMultiBox, self).__init__()
        num_classes = config.num_classes
        out_channels = config.extras_out_channels[0]
        num_default = config.num_default[0]
        num_features = len(config.feature_size)
        num_addition_layers = config.num_addition_layers
        self.loc_cls_shared_addition = loc_cls_shared_addition

        if not loc_cls_shared_addition:
            loc_convs = [
                _conv2d(out_channels, out_channels, 3, 1)
                for x in range(num_addition_layers)
            ]
            cls_convs = [
                _conv2d(out_channels, out_channels, 3, 1)
                for x in range(num_addition_layers)
            ]
            addition_loc_layer_list = []
            addition_cls_layer_list = []
            for _ in range(num_features):
                addition_loc_layer = [
                    ConvBNReLU(out_channels, out_channels, 3, 1, 1,
                               loc_convs[x])
                    for x in range(num_addition_layers)
                ]
                addition_cls_layer = [
                    ConvBNReLU(out_channels, out_channels, 3, 1, 1,
                               cls_convs[x])
                    for x in range(num_addition_layers)
                ]
                addition_loc_layer_list.append(
                    nn.SequentialCell(addition_loc_layer))
                addition_cls_layer_list.append(
                    nn.SequentialCell(addition_cls_layer))
            self.addition_layer_loc = nn.CellList(addition_loc_layer_list)
            self.addition_layer_cls = nn.CellList(addition_cls_layer_list)
        else:
            convs = [
                _conv2d(out_channels, out_channels, 3, 1)
                for x in range(num_addition_layers)
            ]
            addition_layer_list = []
            for _ in range(num_features):
                addition_layers = [
                    ConvBNReLU(out_channels, out_channels, 3, 1, 1, convs[x])
                    for x in range(num_addition_layers)
                ]
                addition_layer_list.append(nn.SequentialCell(addition_layers))
            self.addition_layer = nn.SequentialCell(addition_layer_list)

        loc_layers = [
            _conv2d(out_channels,
                    4 * num_default,
                    kernel_size=3,
                    stride=1,
                    pad_mod='same')
        ]
        cls_layers = [
            _conv2d(out_channels,
                    num_classes * num_default,
                    kernel_size=3,
                    stride=1,
                    pad_mod='same')
        ]

        self.loc_layers = nn.SequentialCell(loc_layers)
        self.cls_layers = nn.SequentialCell(cls_layers)
        self.flatten_concat = FlattenConcat(config)
Exemple #2
0
 def test_setitem1(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     m[1] = conv2
     assert m[1] == m[0]
Exemple #3
0
 def test_delitem1(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     del m[0]
     assert len(m) == 1
Exemple #4
0
 def test_SequentialCell_init2(self):
     m = nn.SequentialCell([conv2])
     assert len(m) == 1
Exemple #5
0
 def test_SequentialCell_init4(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     assert len(m) == 2
Exemple #6
0
    def __init__(self, inp, oup, mid_channels, *, ksize, stride):
        super(ShuffleV2Block, self).__init__()
        self.stride = stride
        ##assert stride in [1, 2]

        self.mid_channels = mid_channels
        self.ksize = ksize
        pad = ksize // 2
        self.pad = pad
        self.inp = inp

        outputs = oup - inp

        branch_main = [
            # pw
            nn.Conv2d(in_channels=inp,
                      out_channels=mid_channels,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      padding=0,
                      has_bias=False),
            nn.BatchNorm2d(num_features=mid_channels, momentum=0.9),
            nn.ReLU(),
            # dw
            nn.Conv2d(in_channels=mid_channels,
                      out_channels=mid_channels,
                      kernel_size=ksize,
                      stride=stride,
                      pad_mode='pad',
                      padding=pad,
                      group=mid_channels,
                      has_bias=False),
            nn.BatchNorm2d(num_features=mid_channels, momentum=0.9),
            # pw-linear
            nn.Conv2d(in_channels=mid_channels,
                      out_channels=outputs,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      padding=0,
                      has_bias=False),
            nn.BatchNorm2d(num_features=outputs, momentum=0.9),
            nn.ReLU(),
        ]
        self.branch_main = nn.SequentialCell(branch_main)

        if stride == 2:
            branch_proj = [
                # dw
                nn.Conv2d(in_channels=inp,
                          out_channels=inp,
                          kernel_size=ksize,
                          stride=stride,
                          pad_mode='pad',
                          padding=pad,
                          group=inp,
                          has_bias=False),
                nn.BatchNorm2d(num_features=inp, momentum=0.9),
                # pw-linear
                nn.Conv2d(in_channels=inp,
                          out_channels=inp,
                          kernel_size=1,
                          stride=1,
                          pad_mode='pad',
                          padding=0,
                          has_bias=False),
                nn.BatchNorm2d(num_features=inp, momentum=0.9),
                nn.ReLU(),
            ]
            self.branch_proj = nn.SequentialCell(branch_proj)
        else:
            self.branch_proj = None
Exemple #7
0
    def __init__(self,
                 num_classes=1000,
                 width_mult=1.,
                 has_dropout=False,
                 inverted_residual_setting=None,
                 round_nearest=8):
        super(mobilenetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280
        # setting of inverted residual blocks
        self.cfgs = inverted_residual_setting
        if inverted_residual_setting is None:
            self.cfgs = [
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.out_channels = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)

        features = [ConvBNReLU(3, input_channel, stride=2)]
        # building inverted residual blocks
        for t, c, n, s in self.cfgs:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(
                    block(input_channel,
                          output_channel,
                          stride,
                          expand_ratio=t))
                input_channel = output_channel
        # building last several layers
        features.append(
            ConvBNReLU(input_channel, self.out_channels, kernel_size=1))
        # make it nn.CellList
        self.features = nn.SequentialCell(features)
        # mobilenet head
        head = ([
            GlobalAvgPooling(),
            nn.DenseBnAct(
                self.out_channels, num_classes, has_bias=True, has_bn=False)
        ] if not has_dropout else [
            GlobalAvgPooling(),
            nn.Dropout(0.2),
            nn.DenseBnAct(
                self.out_channels, num_classes, has_bias=True, has_bn=False)
        ])
        self.head = nn.SequentialCell(head)

        # init weights
        self._initialize_weights()
Exemple #8
0
 def __init__(self):
     super().__init__()
     self.seq = nn.SequentialCell(
         [nn.AvgPool2d(3, 1), nn.ReLU(),
          nn.Flatten()])
Exemple #9
0
    def __init__(self, n_class=1000, model_size='2.0x', group=3):
        super(ShuffleNetV1, self).__init__()
        print('model size is ', model_size)

        self.stage_repeats = [4, 8, 4]
        self.model_size = model_size
        if group == 3:
            if model_size == '0.5x':
                self.stage_out_channels = [-1, 12, 120, 240, 480]
            elif model_size == '1.0x':
                self.stage_out_channels = [-1, 24, 240, 480, 960]
            elif model_size == '1.5x':
                self.stage_out_channels = [-1, 24, 360, 720, 1440]
            elif model_size == '2.0x':
                self.stage_out_channels = [-1, 48, 480, 960, 1920]
            else:
                raise NotImplementedError
        elif group == 8:
            if model_size == '0.5x':
                self.stage_out_channels = [-1, 16, 192, 384, 768]
            elif model_size == '1.0x':
                self.stage_out_channels = [-1, 24, 384, 768, 1536]
            elif model_size == '1.5x':
                self.stage_out_channels = [-1, 24, 576, 1152, 2304]
            elif model_size == '2.0x':
                self.stage_out_channels = [-1, 48, 768, 1536, 3072]
            else:
                raise NotImplementedError

        # building first layer
        input_channel = self.stage_out_channels[1]
        self.first_conv = nn.SequentialCell(
            nn.Conv2d(3,
                      input_channel,
                      3,
                      2,
                      'pad',
                      1,
                      weight_init='xavier_uniform',
                      has_bias=False),
            nn.BatchNorm2d(input_channel),
            nn.ReLU(),
        )
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')

        features = []
        for idxstage in range(len(self.stage_repeats)):
            numrepeat = self.stage_repeats[idxstage]
            output_channel = self.stage_out_channels[idxstage + 2]

            for i in range(numrepeat):
                stride = 2 if i == 0 else 1
                first_group = idxstage == 0 and i == 0
                features.append(
                    ShuffleV1Block(input_channel,
                                   output_channel,
                                   group=group,
                                   first_group=first_group,
                                   mid_channels=output_channel // 4,
                                   ksize=3,
                                   stride=stride))
                input_channel = output_channel

        self.features = nn.SequentialCell(features)
        self.globalpool = nn.AvgPool2d(7)
        self.classifier = nn.Dense(self.stage_out_channels[-1], n_class)
        self.reshape = P.Reshape()
Exemple #10
0
    def __init__(self, in_channels):
        super(Stem, self).__init__()
        self.conv2d_1a_3x3 = Conv2d(in_channels,
                                    32,
                                    3,
                                    stride=2,
                                    padding=0,
                                    has_bias=False)

        self.conv2d_2a_3x3 = Conv2d(32,
                                    32,
                                    3,
                                    stride=1,
                                    padding=0,
                                    has_bias=False)
        self.conv2d_2b_3x3 = Conv2d(32,
                                    64,
                                    3,
                                    stride=1,
                                    pad_mode='pad',
                                    padding=1,
                                    has_bias=False)

        self.mixed_3a_branch_0 = nn.MaxPool2d(3, stride=2)
        self.mixed_3a_branch_1 = Conv2d(64,
                                        96,
                                        3,
                                        stride=2,
                                        padding=0,
                                        has_bias=False)

        self.mixed_4a_branch_0 = nn.SequentialCell([
            Conv2d(160, 64, 1, stride=1, padding=0, has_bias=False),
            Conv2d(64,
                   96,
                   3,
                   stride=1,
                   padding=0,
                   pad_mode='valid',
                   has_bias=False)
        ])

        self.mixed_4a_branch_1 = nn.SequentialCell([
            Conv2d(160, 64, 1, stride=1, padding=0, has_bias=False),
            Conv2d(64, 64, (1, 7), pad_mode='same', stride=1, has_bias=False),
            Conv2d(64, 64, (7, 1), pad_mode='same', stride=1, has_bias=False),
            Conv2d(64,
                   96,
                   3,
                   stride=1,
                   padding=0,
                   pad_mode='valid',
                   has_bias=False)
        ])

        self.mixed_5a_branch_0 = Conv2d(192,
                                        192,
                                        3,
                                        stride=2,
                                        padding=0,
                                        has_bias=False)
        self.mixed_5a_branch_1 = nn.MaxPool2d(3, stride=2)
        self.concat0 = P.Concat(1)
        self.concat1 = P.Concat(1)
        self.concat2 = P.Concat(1)
Exemple #11
0
 def _make_block(self, cell):
     layers = []
     for _ in range(3):
         layers.append(cell())
     return nn.SequentialCell(layers)
Exemple #12
0
def Conv2d(in_channels: int,
           out_channels: int,
           kernel_size: Union[int, Tuple[int, int]],
           stride: Union[int, Tuple[int, int]] = 1,
           padding: Union[str, int, Tuple[int, int]] = 'same',
           groups: int = 1,
           dilation: int = 1,
           bias: Optional[bool] = None,
           norm: Optional[str] = None,
           act: Optional[str] = None):
    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)
    if isinstance(stride, int):
        stride = (stride, stride)
    if isinstance(dilation, int):
        dilation = (dilation, dilation)
    if isinstance(padding, int):
        padding = (padding, padding)
    if isinstance(padding, str):
        assert padding == 'same'
    if padding == 'same':
        padding = calc_same_padding(kernel_size, dilation)

    # Init
    init_cfg = DEFAULTS['init']
    if init_cfg['type'] == 'msra':
        mode = init_cfg['mode']
        distribution = init_cfg['distribution']
        if 'uniform' in distribution:
            weight_init = HeUniform(mode=mode)
        else:
            weight_init = HeNormal(mode=mode)
    else:
        raise ValueError("Unsupported init type: %s" % init_cfg['type'])

    scale = math.sqrt(1 / (kernel_size[0] * kernel_size[1] *
                           (in_channels // groups)))
    bias_init = Uniform(scale)

    if bias is None:
        use_bias = norm is None
    else:
        use_bias = bias

    conv = nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     pad_mode='pad',
                     has_bias=use_bias,
                     dilation=dilation,
                     group=groups,
                     weight_init=weight_init,
                     bias_init=bias_init)

    layers = [conv]

    if norm:
        layers.append(Norm(out_channels, norm))
    if act:
        layers.append(Act(act))

    if len(layers) == 1:
        return layers[0]
    else:
        return nn.SequentialCell(layers)
Exemple #13
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 reps,
                 strides=1,
                 start_with_relu=True,
                 grow_first=True):
        super(Block, self).__init__()

        if out_filters != in_filters or strides != 1:
            self.skip = nn.Conv2d(in_filters,
                                  out_filters,
                                  1,
                                  stride=strides,
                                  pad_mode='valid',
                                  has_bias=False,
                                  weight_init='xavier_uniform')
            self.skipbn = nn.BatchNorm2d(out_filters, momentum=0.9)
        else:
            self.skip = None

        self.relu = nn.ReLU()
        rep = []
        filters = in_filters
        if grow_first:
            rep.append(nn.ReLU())
            rep.append(
                SeparableConv2d(in_filters,
                                out_filters,
                                kernel_size=3,
                                stride=1,
                                padding=1))
            rep.append(nn.BatchNorm2d(out_filters, momentum=0.9))
            filters = out_filters

        for _ in range(reps - 1):
            rep.append(nn.ReLU())
            rep.append(
                SeparableConv2d(filters,
                                filters,
                                kernel_size=3,
                                stride=1,
                                padding=1))
            rep.append(nn.BatchNorm2d(filters, momentum=0.9))

        if not grow_first:
            rep.append(nn.ReLU())
            rep.append(
                SeparableConv2d(in_filters,
                                out_filters,
                                kernel_size=3,
                                stride=1,
                                padding=1))
            rep.append(nn.BatchNorm2d(out_filters, momentum=0.9))

        if not start_with_relu:
            rep = rep[1:]
        else:
            rep[0] = nn.ReLU()

        if strides != 1:
            rep.append(nn.MaxPool2d(3, strides, pad_mode="same"))
        self.rep = nn.SequentialCell(*rep)
        self.add = P.Add()
Exemple #14
0
 def make_layer(self, kernel_height):
     return nn.SequentialCell([
         make_conv_layer((kernel_height, self.vec_length)),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=(self.word_len - kernel_height + 1, 1)),
     ])
Exemple #15
0
    def __init__(self, fine_tune_batch_norm=False):
        super(ResNetV1, self).__init__()
        self.layer_root = nn.SequentialCell([
            RootBlockBeta(fine_tune_batch_norm),
            nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), pad_mode='same')
        ])
        self.layer1_1 = BottleneckV1(128,
                                     256,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer1_2 = BottleneckV2(256,
                                     256,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer1_3 = BottleneckV3(256,
                                     256,
                                     stride=2,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer2_1 = BottleneckV1(256,
                                     512,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer2_2 = BottleneckV2(512,
                                     512,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer2_3 = BottleneckV2(512,
                                     512,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer2_4 = BottleneckV3(512,
                                     512,
                                     stride=2,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_1 = BottleneckV1(512,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_2 = BottleneckV2(1024,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_3 = BottleneckV2(1024,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_4 = BottleneckV2(1024,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_5 = BottleneckV2(1024,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer3_6 = BottleneckV2(1024,
                                     1024,
                                     stride=1,
                                     use_batch_statistics=fine_tune_batch_norm)

        self.layer4_1 = BottleneckV1(1024,
                                     2048,
                                     stride=1,
                                     use_batch_to_stob_and_btos=True,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer4_2 = BottleneckV2(2048,
                                     2048,
                                     stride=1,
                                     use_batch_to_stob_and_btos=True,
                                     use_batch_statistics=fine_tune_batch_norm)
        self.layer4_3 = BottleneckV2(2048,
                                     2048,
                                     stride=1,
                                     use_batch_to_stob_and_btos=True,
                                     use_batch_statistics=fine_tune_batch_norm)
Exemple #16
0
    def __init__(self, inp, oup, group, first_group, mid_channels, ksize,
                 stride):
        super(ShuffleV1Block, self).__init__()
        self.stride = stride

        pad = ksize // 2
        self.group = group

        if stride == 2:
            outputs = oup - inp
        else:
            outputs = oup

        self.relu = nn.ReLU()
        self.add = P.Add()
        self.concat = P.Concat(1)
        self.shape = P.Shape()
        self.transpose = P.Transpose()
        self.reshape = P.Reshape()

        branch_main_1 = [
            # pw
            GroupConv(in_channels=inp,
                      out_channels=mid_channels,
                      kernel_size=1,
                      stride=1,
                      pad_mode="pad",
                      pad=0,
                      groups=1 if first_group else group),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(),
        ]

        branch_main_2 = [
            # dw
            nn.Conv2d(mid_channels,
                      mid_channels,
                      kernel_size=ksize,
                      stride=stride,
                      pad_mode='pad',
                      padding=pad,
                      group=mid_channels,
                      weight_init='xavier_uniform',
                      has_bias=False),
            nn.BatchNorm2d(mid_channels),
            # pw
            GroupConv(in_channels=mid_channels,
                      out_channels=outputs,
                      kernel_size=1,
                      stride=1,
                      pad_mode="pad",
                      pad=0,
                      groups=group),
            nn.BatchNorm2d(outputs),
        ]
        self.branch_main_1 = nn.SequentialCell(branch_main_1)
        self.branch_main_2 = nn.SequentialCell(branch_main_2)
        if stride == 2:
            self.branch_proj = nn.AvgPool2d(kernel_size=3,
                                            stride=2,
                                            pad_mode='same')
Exemple #17
0
    def evaluate(self, explainer, inputs, targets, saliency=None):
        """
        Evaluate robustness on single sample.

        Note:
            Currently only single sample (:math:`N=1`) at each call is supported.

        Args:
            explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`.
            inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`.
            targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
                If `targets` is a 1D tensor, its length should be the same as `inputs`.
            saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`.
                If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and
                continue the evaluation. Default: None.

        Returns:
            numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`.

        Raises:
            ValueError: If batch_size is larger than 1.

        Examples:
            >>> import numpy as np
            >>> import mindspore as ms
            >>> from mindspore import nn
            >>> from mindspore.explainer.explanation import Gradient
            >>> from mindspore.explainer.benchmark import Robustness
            >>>
            >>> # Initialize a Robustness benchmarker passing num_labels of the dataset.
            >>> num_labels = 10
            >>> activation_fn = nn.Softmax()
            >>> robustness = Robustness(num_labels, activation_fn)
            >>>
            >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
            >>> net = LeNet5(10, num_channel=3)
            >>> # prepare your explainer to be evaluated, e.g., Gradient.
            >>> gradient = Gradient(net)
            >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
            >>> target_label = ms.Tensor([0], ms.int32)
            >>> # robustness is a Robustness instance
            >>> res = robustness.evaluate(gradient, input_x, target_label)
            >>> print(res.shape)
            (1,)
        """

        self._check_evaluate_param(explainer, inputs, targets, saliency)
        if inputs.shape[0] > 1:
            raise ValueError(
                'Robustness only support a sample each time, but receive {}'.
                format(inputs.shape[0]))

        if isinstance(targets, int):
            targets = ms.Tensor([targets], ms.int32)
        if saliency is None:
            saliency = explainer(inputs, targets)
        saliency = saliency.asnumpy()

        norm = np.sqrt(
            np.sum(np.square(saliency),
                   axis=tuple(range(1, len(saliency.shape)))))
        if (norm == 0).any():
            log.warning(
                'Get saliency norm equals 0, robustness return NaN for zero-norm saliency currently.'
            )
            norm[norm == 0] = np.nan

        full_network = nn.SequentialCell(
            [explainer.network, self._activation_fn])
        original_outputs = full_network(inputs).asnumpy()
        sensitivities = []
        inputs = inputs.asnumpy()
        for _ in range(self._num_perturbations):
            perturbations = []
            for j, sample in enumerate(inputs):
                perturbation_on_single_sample = self._perturb_with_threshold(
                    full_network, np.expand_dims(sample, axis=0),
                    original_outputs[j])
                perturbations.append(perturbation_on_single_sample)
            perturbations = np.vstack(perturbations)
            perturbations = explainer(ms.Tensor(perturbations, ms.float32),
                                      targets).asnumpy()
            sensitivity = np.sqrt(
                np.sum((perturbations - saliency)**2,
                       axis=tuple(range(1, len(saliency.shape)))))
            sensitivities.append(sensitivity)
        sensitivities = np.stack(sensitivities, axis=-1)
        sensitivity = np.max(sensitivities, axis=1) / norm
        return 1 / np.exp(sensitivity)
Exemple #18
0
 def __init__(self, model_settings, model_size_info):
     super(DSCNN, self).__init__()
     # N C H W
     label_count = model_settings['label_count']
     input_frequency_size = model_settings['dct_coefficient_count']
     input_time_size = model_settings['spectrogram_length']
     t_dim = input_time_size
     f_dim = input_frequency_size
     num_layers = model_size_info[0]
     conv_feat = [None] * num_layers
     conv_kt = [None] * num_layers
     conv_kf = [None] * num_layers
     conv_st = [None] * num_layers
     conv_sf = [None] * num_layers
     i = 1
     for layer_no in range(0, num_layers):
         conv_feat[layer_no] = model_size_info[i]
         i += 1
         conv_kt[layer_no] = model_size_info[i]
         i += 1
         conv_kf[layer_no] = model_size_info[i]
         i += 1
         conv_st[layer_no] = model_size_info[i]
         i += 1
         conv_sf[layer_no] = model_size_info[i]
         i += 1
     seq_cell = []
     in_channel = 1
     for layer_no in range(0, num_layers):
         if layer_no == 0:
             seq_cell.append(
                 nn.Conv2d(in_channels=in_channel,
                           out_channels=conv_feat[layer_no],
                           kernel_size=(conv_kt[layer_no],
                                        conv_kf[layer_no]),
                           stride=(conv_st[layer_no], conv_sf[layer_no]),
                           pad_mode="same",
                           padding=0,
                           has_bias=False))
             seq_cell.append(
                 nn.BatchNorm2d(num_features=conv_feat[layer_no],
                                momentum=0.98))
             in_channel = conv_feat[layer_no]
         else:
             seq_cell.append(
                 DepthWiseConv(in_planes=in_channel,
                               kernel_size=(conv_kt[layer_no],
                                            conv_kf[layer_no]),
                               stride=(conv_st[layer_no],
                                       conv_sf[layer_no]),
                               pad_mode='same',
                               pad=0))
             seq_cell.append(
                 nn.BatchNorm2d(num_features=in_channel, momentum=0.98))
             seq_cell.append(nn.ReLU())
             seq_cell.append(
                 nn.Conv2d(in_channels=in_channel,
                           out_channels=conv_feat[layer_no],
                           kernel_size=(1, 1),
                           pad_mode="same"))
             seq_cell.append(
                 nn.BatchNorm2d(num_features=conv_feat[layer_no],
                                momentum=0.98))
             seq_cell.append(nn.ReLU())
             in_channel = conv_feat[layer_no]
         t_dim = math.ceil(t_dim / float(conv_st[layer_no]))
         f_dim = math.ceil(f_dim / float(conv_sf[layer_no]))
     seq_cell.append(nn.AvgPool2d(kernel_size=(t_dim, f_dim)))  # to fix ?
     seq_cell.append(nn.Flatten())
     seq_cell.append(nn.Dropout(model_settings['dropout1']))
     seq_cell.append(nn.Dense(in_channel, label_count))
     self.model = nn.SequentialCell(seq_cell)
Exemple #19
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 stride=1,
                 use_se=False,
                 se_block=False):
        super(ResidualBlock, self).__init__()
        self.stride = stride
        self.use_se = use_se
        self.se_block = se_block
        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel,
                              channel,
                              stride=1,
                              use_se=self.use_se)
        self.bn1 = _bn(channel)
        if self.use_se and self.stride != 1:
            self.e2 = nn.SequentialCell([
                _conv3x3(channel, channel, stride=1, use_se=True),
                _bn(channel),
                nn.ReLU(),
                nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')
            ])
        else:
            self.conv2 = _conv3x3(channel,
                                  channel,
                                  stride=stride,
                                  use_se=self.use_se)
            self.bn2 = _bn(channel)

        self.conv3 = _conv1x1(channel,
                              out_channel,
                              stride=1,
                              use_se=self.use_se)
        self.bn3 = _bn_last(out_channel)
        if self.se_block:
            self.se_global_pool = P.ReduceMean(keep_dims=False)
            self.se_dense_0 = _fc(out_channel,
                                  int(out_channel / 4),
                                  use_se=self.use_se)
            self.se_dense_1 = _fc(int(out_channel / 4),
                                  out_channel,
                                  use_se=self.use_se)
            self.se_sigmoid = nn.Sigmoid()
            self.se_mul = P.Mul()
        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            if self.use_se:
                if stride == 1:
                    self.down_sample_layer = nn.SequentialCell([
                        _conv1x1(in_channel,
                                 out_channel,
                                 stride,
                                 use_se=self.use_se),
                        _bn(out_channel)
                    ])
                else:
                    self.down_sample_layer = nn.SequentialCell([
                        nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
                        _conv1x1(in_channel,
                                 out_channel,
                                 1,
                                 use_se=self.use_se),
                        _bn(out_channel)
                    ])
            else:
                self.down_sample_layer = nn.SequentialCell([
                    _conv1x1(in_channel,
                             out_channel,
                             stride,
                             use_se=self.use_se),
                    _bn(out_channel)
                ])
        self.add = P.Add()
Exemple #20
0
 def test_SequentialCell_init(self):
     m = nn.SequentialCell()
     assert not m
Exemple #21
0
 def test_SequentialCell_init(self):
     m = nn.SequentialCell()
     assert type(m).__name__ == 'SequentialCell'
Exemple #22
0
 def test_delitem2(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     del m[:]
     assert not m
Exemple #23
0
 def test_SequentialCell_init3(self):
     m = nn.SequentialCell([conv2, avg_pool])
     assert len(m) == 2
Exemple #24
0
 def test_construct(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     m.construct(Tensor(np.ones([1, 3, 16, 50], dtype=np.float32)))
Exemple #25
0
 def test_getitem2(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     assert len(m[0:2]) == 2
     assert m[:2][1] == avg_pool
Exemple #26
0
    def __init__(self,
                 num_in,
                 num_mid,
                 num_out,
                 kernel_size,
                 stride=1,
                 act_type='relu',
                 use_se=False,
                 use_res_connect=True,
                 last_relu=False):
        super(GhostBottleneck, self).__init__()
        self.ghost1 = GhostModule(num_in,
                                  num_mid,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  act_type=act_type)

        self.use_res_connect = use_res_connect
        self.last_relu = last_relu
        self.use_dw = stride > 1
        self.dw = None
        if self.use_dw:
            self.dw = ConvBNReLU(num_mid,
                                 num_mid,
                                 kernel_size=kernel_size,
                                 stride=stride,
                                 act_type=act_type,
                                 groups=num_mid,
                                 use_act=False)

        self.use_se = use_se
        if use_se:
            self.se = SE(num_mid)

        self.ghost2 = GhostModule(num_mid,
                                  num_out,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  act_type=act_type,
                                  use_act=False)
        self.relu = nn.ReLU()
        if self.use_res_connect:
            self.down_sample = False
            if num_in != num_out or stride != 1:
                self.down_sample = True
            self.shortcut = None
            if self.down_sample:
                self.shortcut = nn.SequentialCell([
                    ConvBNReLU(num_in,
                               num_in,
                               kernel_size=kernel_size,
                               stride=stride,
                               groups=num_in,
                               use_act=False),
                    ConvBNReLU(num_in,
                               num_out,
                               kernel_size=1,
                               stride=1,
                               groups=1,
                               use_act=False),
                ])
            self.add = P.TensorAdd()
Exemple #27
0
 def test_setitem2(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     with pytest.raises(TypeError):
         m[1.0] = conv2
Exemple #28
0
    def evaluate(self, explainer, inputs, targets, saliency=None):
        """
        Evaluate robustness on single sample.

        Note:
            Currently only single sample (:math:`N=1`) at each call is supported.

        Args:
            explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`.
            inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`.
            targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
                If `targets` is a 1D tensor, its length should be the same as `inputs`.
            saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`.
                If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and
                continue the evaluation. Default: None.

        Returns:
            numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`.

        Raises:
            ValueError: If batch_size is larger than 1.

        Examples:
            >>> from mindspore.explainer.explanation import Gradient
            >>> from mindspore.explainer.benchmark import Robustness
            >>> from mindspore.train.serialization import load_checkpoint, load_param_into_net
            >>> # prepare your network and load the trained checkpoint file, e.g., resnet50.
            >>> network = resnet50(10)
            >>> param_dict = load_checkpoint("resnet50.ckpt")
            >>> load_param_into_net(network, param_dict)
            >>> # prepare your explainer to be evaluated, e.g., Gradient.
            >>> gradient = Gradient(network)
            >>> input_x = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
            >>> target_label = ms.Tensor([0], ms.int32)
            >>> robustness = Robustness(num_labels=10)
            >>> res = robustness.evaluate(gradient, input_x, target_label)
        """

        self._check_evaluate_param(explainer, inputs, targets, saliency)
        if inputs.shape[0] > 1:
            raise ValueError('Robustness only support a sample each time, but receive {}'.format(inputs.shape[0]))

        inputs_np = inputs.asnumpy()
        if isinstance(targets, int):
            targets = ms.Tensor([targets], ms.int32)
        if saliency is None:
            saliency = explainer(inputs, targets)
        saliency_np = saliency.asnumpy()

        norm = np.sqrt(np.sum(np.square(saliency_np), axis=tuple(range(1, len(saliency_np.shape)))))
        if (norm == 0).any():
            log.warning('Get saliency norm equals 0, robustness return NaN for zero-norm saliency currently.')
            norm[norm == 0] = np.nan

        model = nn.SequentialCell([explainer.model, self._activation_fn])
        original_outputs = model(inputs).asnumpy()
        sensitivities = []
        for _ in range(self._num_perturbations):
            perturbations = []
            for j, sample in enumerate(inputs_np):
                perturbation_on_single_sample = self._perturb_with_threshold(model,
                                                                             np.expand_dims(sample, axis=0),
                                                                             original_outputs[j])
                perturbations.append(perturbation_on_single_sample)
            perturbations = np.vstack(perturbations)
            perturbations_saliency = explainer(ms.Tensor(perturbations, ms.float32), targets).asnumpy()
            sensitivity = np.sum((perturbations_saliency - saliency_np) ** 2,
                                 axis=tuple(range(1, len(saliency_np.shape))))
            sensitivities.append(sensitivity)
        sensitivities = np.stack(sensitivities, axis=-1)
        max_sensitivity = np.max(sensitivities, axis=1) / norm
        robustness_res = 1 / np.exp(max_sensitivity)
        return robustness_res
Exemple #29
0
 def test_delitem2(self):
     m = nn.SequentialCell(
         OrderedDict([('cov2d', conv2), ('avg_pool', avg_pool)]))
     del m[:]
     assert type(m).__name__ == 'SequentialCell'
Exemple #30
0
def _conv_bn(in_channel, out_channel, ksize, stride=1):
    """Get a conv2d batchnorm and relu layer."""
    return nn.SequentialCell([
        nn.Conv2d(in_channel, out_channel, kernel_size=ksize, stride=stride),
        nn.BatchNorm2d(out_channel)
    ])