Example #1
0
def compute_gradients(model, inputs, targets=None, weights=None):
    r"""
    Compute the gradient of output w.r.t input.

    Args:
        model (`ms.nn.Cell`): Differentiable black-box model.
        inputs (`ms.Tensor`): Input to calculate gradient and explanation.
        targets (int, optional): Target label id specifying which category to compute gradient. Default: None.
        weights (`ms.Tensor`, optional): Custom weights for computing gradients. The shape of weights should match the
            model outputs. If None is provided, an one-hot weights with one in targets positions will be used instead.
            Default: None.

    Returns:
        saliency map (ms.Tensor): Gradient back-propagated to the input.
    """
    inputs = unify_inputs(inputs)
    if targets is None and weights is None:
        raise ValueError('Must provide one of targets or weights')
    if weights is None:
        targets = unify_targets(targets)
        output = model(*inputs).asnumpy()
        num_categories = output.shape[-1]
        weights = generate_one_hot(targets, num_categories)

    grad_op = GradOperation(get_all=True, get_by_list=False,
                            sens_param=True)(model)
    gradients = grad_op(*inputs, weights)
    return gradients[0]
 def __init__(self, network, sens_param=True, real_inputs_count=None):
     super().__init__(grad=GradOperation(get_all=True,
                                         get_by_list=True,
                                         sens_param=sens_param),
                      network=network,
                      wrt_params=True,
                      real_inputs_count=real_inputs_count)
Example #3
0
 def __init__(self,
              fn: Callable,
              args: List[Any],
              delta: float = 1e-3,
              max_error: float = 1e-3,
              input_selector=None,
              output_selector=None,
              sampling_times=-1,
              reduce_output=False) -> None:
     grad_op = GradOperation('grad', get_all=True, sens_param=True)
     super(OperationGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \
                                                output_selector, sampling_times, reduce_output)
Example #4
0
 def __init__(self,
              fn: Callable,
              args: List[Any],
              delta: float = 1e-3,
              max_error: float = 1e-3,
              input_selector=None,
              output_selector=None,
              sampling_times=-1,
              reduce_output=False) -> None:
     grad_op = GradOperation(get_by_list=True, sens_param=True)
     self.params = ParameterTuple(fn.trainable_params())
     super(NNGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \
                                         output_selector, sampling_times, reduce_output)
Example #5
0
def test_sit_parser_input_parameter():
    def tensor_add(x, y):
        add = P.Add()
        z = add(x, y)
        return  z
    x = Tensor(np.ones([2, 2]).astype(np.float32))
    x = Parameter(x, name="x")
    y = Tensor(np.ones([2, 2]).astype(np.float32))
    y = Parameter(y, name="y")
    grad = GradOperation(get_all=True, get_by_list=False, sens_param=False)

    with pytest.raises(TypeError):
        grad(tensor_add)(x, y)
Example #6
0
def test_gather_grad_graph_int32_fp16():
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    x = Tensor(np.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]), ms.float16)
    dim = 0
    index = Tensor(np.array([[0, 1, 1, 0, 0], [1, 0, 0, 1, 1]]), ms.int32)
    grad = Tensor(
        np.array([[0.9031, 0.0890, 0.2779, 0.3198, 0.5710],
                  [0.6949, 0.8439, 0.2003, 0.6868, 0.4437]]), ms.float16)
    expect = np.array([[0.9031, 0.8439, 0.2003, 0.3198, 0.5710],
                       [0.6949, 0.0890, 0.2779, 0.6868, 0.4437]], np.float16)
    net = GatherDNet(dim)
    grad_net = GradOperation(get_all=True, sens_param=True)(net)
    output = grad_net(x, index, grad)
    error = 1e-4
    diff = output[0].asnumpy() - expect
    assert np.all(diff < error)
Example #7
0
 def function(*inputs):
     # gradient
     if grad_op:
         if num_outputs == 0:
             grad_op_ = GradOperation('grad', get_all=grad_op.get_all,
                                      get_by_list=grad_op.get_by_list, sens_param=False)
             b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const,
                                 const_first=const_first, add_fake_input=add_fake_input)
             return block_runner(b, *inputs, rand_func=rand_func)
         if num_outputs == 1:
             b = block_generator(block, grad_op, len(inputs) - 1, inputs[-1], desc_const=desc_const,
                                 const_first=const_first, add_fake_input=add_fake_input)
             return block_runner(b, *(inputs[:-1]), rand_func=rand_func)
         if split_outputs:
             block_inputs = inputs[0:len(inputs) - num_outputs]
             sens_inputs = inputs[len(inputs) - num_outputs:]
             ret = []
             for i in range(num_outputs):
                 bi_inputs = list(block_inputs)
                 bi = get_output_cell(block, len(block_inputs), i)
                 bi = block_generator(bi, grad_op, len(bi_inputs), sens_inputs[i], desc_const=desc_const,
                                      const_first=const_first, add_fake_input=add_fake_input)
                 grads_i = block_runner(bi, *bi_inputs, rand_func=rand_func)
                 if isinstance(grads_i, tuple):
                     ret.extend(grads_i)
                 else:
                     ret.append(grads_i)
             return ret
         block_inputs = inputs[0:len(inputs) - num_outputs]
         sens_inputs = tuple(inputs[len(inputs) - num_outputs:])
         b = block_generator(block, grad_op, len(block_inputs), sens_inputs, desc_const=desc_const,
                             const_first=const_first, add_fake_input=add_fake_input)
         return block_runner(b, *block_inputs, rand_func=rand_func)
     # forward
     inputs_num = len(inputs)
     if add_fake_input and inputs_num == 1:
         # input is faked
         inputs_num = 0
     b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
                         add_fake_input=add_fake_input)
     return block_runner(b, *inputs, rand_func=rand_func)
Example #8
0
def test_parser_construct():
    class ParentNet(Cell):
        def __init__(self):
            super().__init__()
            self.relu = P.ReLU()

        def construct(self, x):
            return self.relu(x)

    class UncleNet(Cell):
        def __init__(self):
            super(UncleNet, self).__init__()
            self.sigmoid = P.Sigmoid()

        def construct(self, x):
            return self.sigmoid(x)

    class Net(UncleNet, ParentNet):
        def __init__(self):
            super().__init__()
            super(UncleNet, self).__init__()

        def construct(self, x):
            return super(UncleNet, self).construct(x)

    input_np_x = np.ones([2, 3, 4, 5]).astype(np.float32)
    out_np = np.ones([2, 3, 4, 5]).astype(np.float32)

    input_me = Tensor(input_np_x)
    output_grad_me = Tensor(out_np)
    net = Net()
    out_me = net(input_me)

    net1 = Net()
    grad = GradOperation(sens_param=True)
    grad_op = grad(net1)
    grad_me = grad_op(input_me, output_grad_me)

    assert np.allclose(input_np_x, out_me.asnumpy(), 0.001, 0.001)
    assert np.allclose(input_np_x, grad_me.asnumpy(), 0.001, 0.001)
 def build_sut(self, verification_set):
     grad_op = GradOperation('grad', get_all=True, sens_param=True)
     return create_funcs(verification_set, gen_grad_net, compile_block, grad_op)
Example #10
0
 def __init__(self, network):
     super(Grad, self).__init__()
     self.grad = GradOperation(name="get_all",
                               get_all=True,
                               sens_param=True)
     self.network = network
Example #11
0
 def __init__(self, network):
     super(NetGatherDGrad, self).__init__()
     self.grad = GradOperation(get_all=True, sens_param=True)
     self.network = network
 def __init__(self, network):
     super(GradWithSens, self).__init__()
     self.grad = GradOperation(name="grad", get_all=False, sens_param=True)
     self.network = network
Example #13
0
 def __init__(self, network):
     super(Grad, self).__init__()
     self.grad = GradOperation(get_all=True)
     self.network = network
 def build_sut(self, verification_set):
     grad_op = GradOperation('grad', get_by_list=True, sens_param=True)
     return create_funcs(verification_set, gen_grad_net, run_block, grad_op,
                         get_uniform_with_shape)
 def __call__(self, verification_set):
     grad_op = GradOperation('grad', get_by_list=True, sens_param=True)
     return create_funcs(self.verification_set, gen_grad_net, compile_block,
                         grad_op)
 def __call__(self):
     grad_op = GradOperation(get_by_list=True, sens_param=True)
     return create_funcs(self.verification_set, gen_grad_net, run_block,
                         grad_op)
 def __init__(self, network, sens_param=True, real_inputs_count=None):
     super().__init__(grad=GradOperation(sens_param=sens_param),
                      network=network, real_inputs_count=real_inputs_count)
 def __init__(self, network):
     super(GradWrapWithLoss, self).__init__()
     self._grad_all = GradOperation(get_all=True, sens_param=False)
     self._network = network
Example #19
0
 def __init__(self, network):
     super(GradData, self).__init__()
     self.grad = GradOperation(get_all=True, sens_param=True)
     self.network = network
 def __call__(self):
     grad_op = GradOperation(get_all=True, sens_param=True)
     return create_funcs(self.verification_set, gen_grad_net, run_block,
                         grad_op, get_uniform_with_shape)