コード例 #1
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_lr_schedule """
import numpy as np

from mindspore import Parameter, ParameterTuple, Tensor
from mindspore.nn import Cell
from mindspore.nn.optim import Optimizer
from mindspore.ops.operations import BiasAdd, MatMul
import mindspore.ops.composite as C

grad_by_list = C.GradOperation('get_by_list', get_by_list=True)


class Net(Cell):
    """ Net definition """
    def __init__(self):
        super(Net, self).__init__()
        self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight")
        self.bias = Parameter(Tensor(np.ones([10])), name="bias")
        self.matmul = MatMul()
        self.biasAdd = BiasAdd()

    def construct(self, x):
        x = self.biasAdd(self.matmul(x, self.weight), self.bias)
        return x
コード例 #2
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test implicit conversion """
import numpy as np
import pytest

from mindspore import Tensor, nn, context, Parameter
from mindspore import dtype as mstype
from mindspore.ops import composite as C

grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)


def test_user_define_bprop_check_ok():
    class Net(nn.Cell):
        def __init__(self):
            super(Net, self).__init__()
            self.grad = Tensor(
                np.array([[1.1, 2.2, 3.3], [2.0, 3.0, 4.0]], dtype=np.float32))

        def construct(self, x):
            ret = x * 2
            return ret

        def bprop(self, x, out, dout):
            return (self.grad * 3, )
コード例 #3
0
 def __init__(self, net):
     super(NetGrad, self).__init__()
     self.grad_op = C.GradOperation(get_by_list=True, sens_param=False)
     self.net = net
     self.weights = ParameterTuple(self.net.trainable_params())
コード例 #4
0
from mindspore.ops import operations as P
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from ..ut_filter import non_graph_engine
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
    import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
from ....mindspore_test_framework.pipeline.forward.verify_exception \
    import pipeline_for_verify_exception_for_case_by_case_config
context.set_context(mode=context.GRAPH_MODE)

# pylint: disable=W0613
# pylint: disable=W0231
# W0613: unused-argument
# W0231: super-init-not-called

grad = C.GradOperation('grad')


def test_multiply():
    """ test_multiply """
    input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
    input_y = Tensor(np.array([[0.1, 0.3, -3.6], [0.4, 0.5, -3.2]]))

    mul = P.Mul()
    result = mul(input_x, input_y)
    expect = np.array([[-0.01, 0.09, -12.96], [0.16, 0.25, 10.24]])
    diff = result.asnumpy() - expect
    error = np.ones(shape=[2, 3]) * 1.0e-6
    assert np.all(diff < error)
    assert np.all(-diff < error)
コード例 #5
0
ファイル: test_batchnorm_op.py プロジェクト: yrpang/mindspore
 def __init__(self, network):
     super(Grad, self).__init__()
     self.grad = C.GradOperation(get_all=True, sens_param=True)
     self.network = network
コード例 #6
0
    def step_end(self, run_context):
        step_msseconds = (time.time() - self.step_time) * 1000
        if step_msseconds < 275:
            self.total = self.total + 1
        print(f"step time:{step_msseconds}", flush=True)

    def good_step(self):
        return self.total


random.seed(1)
np.random.seed(1)
ds.config.set_seed(1)

grad_by_list = CP.GradOperation(get_by_list=True)


def weight_variable_0(shape):
    zeros = np.zeros(shape).astype(np.float32)
    return Tensor(zeros)


def weight_variable_1(shape):
    ones = np.ones(shape).astype(np.float32)
    return Tensor(ones)


def conv3x3(in_channels, out_channels, stride=1, padding=0):
    """3x3 convolution """
    return nn.Conv2d(in_channels,
コード例 #7
0
import mindspore as ms
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Parameter, ParameterTuple
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import ms_function
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops.functional import stop_gradient
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer
from ..ut_filter import non_graph_engine
from ....mindspore_test_framework.utils.bprop_util import bprop

grad_by_list = C.GradOperation('get_by_list', get_by_list=True)
grad_all = C.GradOperation('get_all', get_all=True)


def setup_module(module):
    context.set_context(mode=context.PYNATIVE_MODE)


def stop_func(x, y):
    """ stop_func"""
    c = x * y
    c_s = x + y
    return c_s, c


def stop_test1(x, y):
コード例 #8
0
 def __init__(self, net):
     super(GradNet, self).__init__()
     self.forward_net = net
     self.sens = Tensor(np.ones((2, 2), np.float32) * 5)
     self.grad_all = C.GradOperation(get_all=True)
コード例 #9
0
 def __init__(self, network, sens_param=True, real_inputs_count=None):
     super().__init__(grad=C.GradOperation(get_all=True,
                                           sens_param=sens_param),
                      network=network,
                      real_inputs_count=real_inputs_count)
コード例 #10
0
import numpy as np
import pytest

import mindspore as ms
import mindspore.ops.composite as C
from mindspore import context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore import Tensor
from mindspore.common.parameter import Parameter, ParameterTuple

grad_all_list = C.GradOperation(get_all=True, get_by_list=True)
grad_by_list = C.GradOperation(get_by_list=True)

context.set_context(mode=context.GRAPH_MODE, save_graphs=False)


def test_load_grad():
    class LoadNet(nn.Cell):
        def __init__(self):
            super().__init__()
            self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')

        def construct(self, x, y):
            x = x * y * self.z
            return x

    x = Tensor(np.array([2.0], np.float32))
    y = Tensor(np.array([3.0], np.float32))
    load_net = LoadNet()
コード例 #11
0
 def __init__(self, network):
     super(GetParamGrad, self).__init__(auto_prefix=False)
     self.network = network
     self.weights = ParameterTuple(network.trainable_params())
     self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
コード例 #12
0
 def __init__(self, forward_net):
     super(NetSqrtGradGrad, self).__init__()
     self.forward_net = forward_net
     self.gradOps = C.GradOperation(get_all=True, sens_param=True)
コード例 #13
0
 def __init__(self, net):
     super(BackwardNet, self).__init__(auto_prefix=False)
     self.forward_net = net
     self.grad = C.GradOperation(get_all=True)
コード例 #14
0
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np

import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss


grad_all = C.GradOperation('get_all', get_all=True)


class NetWithLoss(nn.Cell):
    def __init__(self, network):
        super(NetWithLoss, self).__init__()
        self.loss = VirtualLoss()
        self.network = network

    def construct(self, x, y, b):
        predict = self.network(x, y, b)
        return self.loss(predict)


class GradWrap(nn.Cell):
    def __init__(self, network):
コード例 #15
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np

import mindspore.context as context
from mindspore import Tensor
from mindspore.nn import Cell
from mindspore.ops import composite as C
from mindspore.ops.operations import Minimum

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
grad = C.GradOperation(get_all=True, sens_param=True)


class MinNetMe(Cell):
    def __init__(self):
        super(MinNetMe, self).__init__()
        self.min = Minimum()

    def construct(self, inputA, inputB):
        x = self.min(inputA, inputB)
        return x


class GradWrap(Cell):
    def __init__(self, network):
        super(GradWrap, self).__init__()
コード例 #16
0
 def __init__(self, network):
     super(Grad, self).__init__()
     self.network = network
     self.weights = ParameterTuple(network.trainable_params())
     self.grad = C.GradOperation(get_by_list=True, sens_param=True)
コード例 #17
0
ファイル: test_math_ops.py プロジェクト: zuoshou030/mindspore
from mindspore.ops import operations as P
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from ..ut_filter import non_graph_engine
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
    import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
from ....mindspore_test_framework.pipeline.forward.verify_exception \
    import pipeline_for_verify_exception_for_case_by_case_config
context.set_context(mode=context.GRAPH_MODE)

# pylint: disable=W0613
# pylint: disable=W0231
# W0613: unused-argument
# W0231: super-init-not-called

grad = C.GradOperation()


def test_multiply():
    """ test_multiply """
    input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
    input_y = Tensor(np.array([[0.1, 0.3, -3.6], [0.4, 0.5, -3.2]]))

    mul = P.Mul()
    result = mul(input_x, input_y)
    expect = np.array([[-0.01, 0.09, -12.96], [0.16, 0.25, 10.24]])
    diff = result.asnumpy() - expect
    error = np.ones(shape=[2, 3]) * 1.0e-6
    assert np.all(diff < error)
    assert np.all(-diff < error)
コード例 #18
0
ファイル: test_nn_ops.py プロジェクト: xjg23/mindspore
 def __init__(self, network):
     super(FusedBatchNormGrad, self).__init__()
     self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True)
     self.network = network
コード例 #19
0
 def __init__(self, network):
     super(GradData, self).__init__()
     self.grad = C.GradOperation(name="get_all",
                                 get_all=True,
                                 sens_param=False)
     self.network = network
コード例 #20
0
 def __init__(self, net):
     super(BackwardNetReplaceBreak, self).__init__(auto_prefix=False)
     self.forward_net = net
     self.grad = C.GradOperation()
コード例 #21
0
 def construct(self, x, label):
     weights = self.weights
     return C.GradOperation('get_by_list', get_by_list=True)(self.network,
                                                             weights)(x,
                                                                      label)
コード例 #22
0
import pytest

import mindspore as ms
from mindspore import Tensor
from mindspore import context
from mindspore import nn
from mindspore.common import dtype as mstype
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common import ms_function

context.set_context(mode=context.GRAPH_MODE)

grad_by_list = C.GradOperation(get_by_list=True)
grad_all = C.GradOperation(get_all=True)
grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)


def cond_data_test(x_init, y_init):
    class Net(nn.Cell):
        def __init__(self):
            """"""
            super(Net, self).__init__()
            self.square = P.Square()
            self.add = P.TensorAdd()
            self.value = Tensor(3, dtype=ms.float32)
            self.switch = P.GeSwitch()
            self.merge = P.Merge()
            self.less = P.Less()
コード例 #23
0
    def __init__(self,
                 network,
                 sens=1024.0,
                 host_device_mix=False,
                 parameter_server=False,
                 sparse=False):
        super(TrainStepWrap, self).__init__()
        parallel_mode = context.get_auto_parallel_context("parallel_mode")
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        self.network = network
        self.network.set_train()
        self.trainable_params = network.trainable_params()
        weights_w = []
        weights_d = []
        for params in self.trainable_params:
            if 'wide' in params.name:
                weights_w.append(params)
            else:
                weights_d.append(params)
        self.weights_w = ParameterTuple(weights_w)
        self.weights_d = ParameterTuple(weights_d)

        if (sparse and is_auto_parallel) or parameter_server:
            self.optimizer_d = LazyAdam(self.weights_d,
                                        learning_rate=3.5e-4,
                                        eps=1e-8,
                                        loss_scale=sens)
            self.optimizer_w = FTRL(learning_rate=5e-2,
                                    params=self.weights_w,
                                    l1=1e-8,
                                    l2=1e-8,
                                    initial_accum=1.0,
                                    loss_scale=sens)
            if host_device_mix or parameter_server:
                self.optimizer_w.target = "CPU"
                self.optimizer_d.target = "CPU"
        else:
            self.optimizer_d = Adam(self.weights_d,
                                    learning_rate=3.5e-4,
                                    eps=1e-8,
                                    loss_scale=sens)
            self.optimizer_w = FTRL(learning_rate=5e-2,
                                    params=self.weights_w,
                                    l1=1e-8,
                                    l2=1e-8,
                                    initial_accum=1.0,
                                    loss_scale=sens)
        self.hyper_map = C.HyperMap()
        self.grad_w = C.GradOperation(get_by_list=True, sens_param=True)
        self.grad_d = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.loss_net_w = IthOutputCell(network, output_index=0)
        self.loss_net_d = IthOutputCell(network, output_index=1)
        self.loss_net_w.set_grad()
        self.loss_net_d.set_grad()

        self.reducer_flag = False
        self.grad_reducer_w = None
        self.grad_reducer_d = None
        self.reducer_flag = parallel_mode in (ParallelMode.DATA_PARALLEL,
                                              ParallelMode.HYBRID_PARALLEL)
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("gradients_mean")
            degree = context.get_auto_parallel_context("device_num")
            self.grad_reducer_w = DistributedGradReducer(
                self.optimizer_w.parameters, mean, degree)
            self.grad_reducer_d = DistributedGradReducer(
                self.optimizer_d.parameters, mean, degree)
コード例 #24
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np

import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss

grad_all = C.GradOperation(get_all=True)


class NetWithLoss(nn.Cell):
    def __init__(self, network):
        super(NetWithLoss, self).__init__()
        self.loss = VirtualLoss()
        self.network = network

    def construct(self, x, y):
        predict = self.network(x, y)
        return self.loss(predict)


class GradWrap(nn.Cell):
    def __init__(self, network):
コード例 #25
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_tensor_slice """
import numpy as np
import pytest

from mindspore import Tensor, Parameter
from mindspore import context
from mindspore import dtype as mstype
from mindspore.nn import Cell
from mindspore.common.parameter import ParameterTuple
from mindspore.ops import composite as C

grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens',
                                         get_by_list=True,
                                         sens_param=True)


def setup_module():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")


class NetWorkSlicePositive(Cell):
    def __init__(self):
        super(NetWorkSlicePositive, self).__init__()
        self.tensor_ret0 = Tensor(np.ones([1, 2, 3], np.int32))
        self.tensor_ret1 = Tensor(np.ones([4, 8, 10], np.int32))
        self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))
        self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))