Пример #1
0
 def vm_impl(x):
     x = x.asnumpy()
     out = vm.Concat(x, self.axis)
     return Tensor(out)
Пример #2
0
def test_simple_if():
    output = simple_if(c1, c2, c3)
    expect = Tensor([6], mstype.int32)
    assert output == expect
Пример #3
0
def test_simple_while():
    output = simple_while(c1, c2, c3)
    expect = Tensor([21], mstype.int32)
    assert output == expect
Пример #4
0
 def __init__(self):
     super(MulAddWithParam, self).__init__()
     self.mul_add = MulAdd()
     self.param = Parameter(Tensor(np.array([[3, 2]], np.float32)), 'param')
def weight_variable_1(shape):
    ones = np.ones(shape).astype(np.float32)
    return Tensor(ones)
Пример #6
0
def test_tensor_sub(x, y):
    x = Tensor(x)
    y = Tensor(y)
    z = x - y
    return z
Пример #7
0
    def _init_group_params(self, parameters, learning_rate, weight_decay):
        """Init learning rate or weight decay in group params."""
        origin_dynamic_lr = self.dynamic_lr
        self._parse_group_params(parameters, learning_rate)
        if self.dynamic_lr and not origin_dynamic_lr:
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')

        params_store = []
        for group_param in parameters:
            if 'order_params' in group_param.keys():
                ordered_parameters = group_param['order_params']
                continue

            self.group_params += group_param['params']
            if 'lr' in group_param.keys():
                params_dynamic_lr = isinstance(group_param['lr'],
                                               (Iterable, Tensor))
                if self.dynamic_lr and not params_dynamic_lr:
                    lr = Tensor(
                        np.array([group_param['lr']] *
                                 self.dynamic_lr_length).astype(np.float32))
                else:
                    lr = self._get_single_lr(group_param['lr'])
            else:
                if self.dynamic_lr and not origin_dynamic_lr:
                    lr = Tensor(
                        np.array([self.scalar_lr] *
                                 self.dynamic_lr_length).astype(np.float32))
                else:
                    lr = learning_rate

            if 'weight_decay' in group_param.keys():
                validator.check_float_legal_value('weight_decay',
                                                  group_param['weight_decay'],
                                                  None)
                validator.check_number_range('weight_decay',
                                             group_param['weight_decay'], 0.0,
                                             float("inf"), Rel.INC_LEFT,
                                             self.cls_name)
                weight_decay_ = group_param['weight_decay'] * self.loss_scale
            else:
                weight_decay_ = weight_decay * self.loss_scale

            for key in group_param.keys():
                if key not in ('params', 'lr', 'weight_decay'):
                    logger.warning(
                        f"The optimizer cannot parse '{key}' when setting parameter groups."
                    )

            for param in group_param['params']:
                validator.check_value_type("parameter", param, [Parameter],
                                           self.cls_name)
                if param.name in params_store:
                    raise RuntimeError(
                        f"The {param.name} parameter has appeared in parameter groups."
                    )

                params_store.append(param.name)
                self.group_lr.append(Parameter(lr, name="lr_" + param.name))
                self.group_weight_decay.append(weight_decay_)

        if self.is_group_params_ordered:
            self._order_and_adjust_group_params(ordered_parameters,
                                                learning_rate, weight_decay)
Пример #8
0
 def vm_impl(x):
     x = x.asnumpy()
     out = vm.squeeze(x, self.axis)
     return Tensor(out)
Пример #9
0
 def vm_impl(x, perm=None):
     x = x.asnumpy()
     if perm is None:
         perm = [i for i in reversed(range(len(x.shape)))]
     out = vm.transpose(x, perm)
     return Tensor(out)
Пример #10
0
 def vm_impl(x, axis):
     if isinstance(x, float):
         x = Tensor(np.array([x]))
     x = x.asnumpy()
     out = vm.expand_dims(x, axis)
     return Tensor(out)
Пример #11
0
 def vm_impl(x, shp):
     x = x.asnumpy()
     out = vm.reshape(x, shp)
     return Tensor(out)
Пример #12
0
 def vm_impl(x):
     return Tensor(np.zeros_like(x.asnumpy()))
Пример #13
0
 def vm_impl(x):
     x = x.asnumpy()
     return Tensor(x * x)
Пример #14
0
 def vm_impl(x, begin, size):
     x = x.asnumpy()
     begin = begin.asnumpy()
     size = size.asnumpy()
     out = vm.Slice(x, begin, size)
     return Tensor(out)
Пример #15
0
def test_tensor_set_type(x):
    t = Tensor(x)
    t.set_dtype(ms.float32)
    return t
Пример #16
0
def _weight_variable(shape, factor=0.01):
    init_value = np.random.randn(*shape).astype(np.float32) * factor
    return Tensor(init_value)
Пример #17
0
def test_tensor_mul(x, y):
    x = Tensor(x)
    y = Tensor(y)
    z = x * y

    return z
Пример #18
0
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(int(224 * 224 * 64 / 16), num_classes)

    def construct(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.flatten(x)
        x = self.fc(x)
        return x


_input_x = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
_cur_dir = os.path.dirname(os.path.realpath(__file__))


def setup_module():
    import shutil
    if os.path.exists('./test_files'):
        shutil.rmtree('./test_files')


def test_save_graph():
    """ test_exec_save_graph """

    class Net1(nn.Cell):
        def __init__(self):
            super(Net1, self).__init__()
Пример #19
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        Validator.check_value_type("kernel_size", kernel_size, [int],
                                   self.cls_name)
        Validator.check_value_type("stride", stride, [int], self.cls_name)
        Validator.check_value_type("padding", padding, [int], self.cls_name)
        Validator.check_value_type("dilation", dilation, [int], self.cls_name)
        Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE,
                                self.cls_name)
        Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
        Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
        Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
        kernel_size = (1, kernel_size)
        stride = (1, stride)
        dilation = (1, dilation)
        get_shape = P.Shape()
        get_dtype = P.DType()
        if isinstance(weight_init, Tensor):
            weight_init_shape = get_shape(weight_init)
            Validator.check_integer('weight_init_shape',
                                    len(weight_init_shape), 3, Rel.EQ,
                                    self.cls_name)
            weight_init_dtype = get_dtype(weight_init)
            weight_init_value = weight_init.asnumpy()
            weight_init_value = np.expand_dims(weight_init_value, 2)
            weight_init = Tensor(weight_init_value, weight_init_dtype)
        # out_channels and in_channels swap.
        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
        # then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
        super(Conv1dTranspose, self).__init__(in_channels,
                                              out_channels,
                                              kernel_size,
                                              stride,
                                              pad_mode,
                                              padding,
                                              dilation,
                                              group,
                                              has_bias,
                                              weight_init,
                                              bias_init,
                                              transposed=True)
        self.padding = (0, 0, padding, padding)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.shape = P.Shape()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed ' +
                str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.is_valid = self.pad_mode == 'valid'
        self.is_same = self.pad_mode == 'same'
        self.is_pad = self.pad_mode == 'pad'
        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name='bias')

        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
        self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
                                                      kernel_size=kernel_size,
                                                      mode=1,
                                                      pad_mode=pad_mode,
                                                      pad=self.padding,
                                                      stride=stride,
                                                      dilation=dilation,
                                                      group=group)
        self.bias_add = P.BiasAdd()
        self.expand_dims = P.ExpandDims()
        self.squeeze = P.Squeeze(2)
Пример #20
0
def test_export():
    net = MYNET()
    input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
    with pytest.raises(ValueError):
        export(net, input_data, file_name="./me_export.pb", file_format="AIR")
Пример #21
0
def test_grad_one_input_bprop():
    net = OneInputBprop()
    input = Tensor(np.ones([2, 2]).astype(np.float32))
    grad = C.grad_all(net)(input)
    assert (grad[0].asnumpy() == np.array([5, 5]).astype(np.float32)).all()
Пример #22
0
def test_mindir_export():
    net = MYNET()
    input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
    export(net, input_data, file_name="./me_binary_export.mindir", file_format="MINDIR")
def weight_variable_0(shape):
    zeros = np.zeros(shape).astype(np.float32)
    return Tensor(zeros)
Пример #24
0
def test_print():
    print_net = PrintNet()
    int8 = Tensor(np.random.randint(100, size=(10, 10), dtype="int8"))
    uint8 = Tensor(np.random.randint(100, size=(10, 10), dtype="uint8"))
    int16 = Tensor(np.random.randint(100, size=(10, 10), dtype="int16"))
    uint16 = Tensor(np.random.randint(100, size=(10, 10), dtype="uint16"))
    int32 = Tensor(np.random.randint(100, size=(10, 10), dtype="int32"))
    uint32 = Tensor(np.random.randint(100, size=(10, 10), dtype="uint32"))
    int64 = Tensor(np.random.randint(100, size=(10, 10), dtype="int64"))
    uint64 = Tensor(np.random.randint(100, size=(10, 10), dtype="uint64"))
    float16 = Tensor(np.random.rand(224, 224).astype(np.float16))
    float32 = Tensor(np.random.rand(224, 224).astype(np.float32))
    float64 = Tensor(np.random.rand(224, 224).astype(np.float64))
    bool_ = Tensor(np.arange(-10, 10, 2).astype(np.bool_))
    scale1 = Tensor(np.array(1))
    scale2 = Tensor(np.array(0.1))
    print_net(int8, uint8, int16, uint16, int32, uint32, int64, uint64, float16, float32, float64, bool_, scale1,
              scale2)
Пример #25
0
""" test_multigraph_sink """
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common import ms_function
from mindspore.ops import operations as P


def setup_module(module):
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")


c1 = Tensor([2], mstype.int32)
c2 = Tensor([14], mstype.int32)
c3 = Tensor([1], mstype.int32)
c4 = Tensor([0], mstype.int32)
c5 = Tensor([14], mstype.int32)


@ms_function
def simple_if(x, y, z):
    if x < y:
        x = x + 1
    else:
        x = x + 2
    x = x + 3
    return x
Пример #26
0
def _range_op(start, limit, delta, dtype):
    """helper function for Grad TopK"""
    output_tensor = Tensor(list(range(start, limit, delta)), dtype)
    return output_tensor
Пример #27
0
def test_if_in_if():
    output = if_in_if(c1, c2, c3)
    expect = Tensor([7], mstype.int32)
    assert output == expect
Пример #28
0
def test_tensor_add(x, y):
    t1 = Tensor(np.ones(x))
    t2 = Tensor(np.zeros(y), ms.float32)
    return t1 + t2
Пример #29
0
def test_while_by_while():
    output = while_by_while(c1, c2, c3)
    expect = Tensor([28], mstype.int32)
    assert output == expect
Пример #30
0
 def vm_impl(x, axis):
     x = x.asnumpy()
     out = vm.any(x, axis, self.keep_dims)
     return Tensor(out)