def __init__(self): super(ParameterNet, self).__init__() self.para_xavier_uniform = Parameter(init.initializer( 'xavier_uniform', parameter_shape), name="xavier_uniform") self.para_he_uniform = Parameter(init.initializer( 'he_uniform', parameter_shape), name="he_uniform") self.para_xavier_uniform2 = Parameter(init.initializer( init.XavierUniform(), parameter_shape), name="xavier_uniform2") self.para_he_uniform2 = Parameter(init.initializer( init.HeUniform(), parameter_shape), name="he_uniform2") self.para_truncated_normal = Parameter(init.initializer( init.TruncatedNormal(), parameter_shape), name="truncated_normal") self.para_normal = Parameter(init.initializer(init.Normal(), parameter_shape), name="normal") self.para_uniform = Parameter(init.initializer(init.Uniform(), parameter_shape), name="uniform")
def test_collect_histogram_from_regular(self, mock_add_value, histogram_regular, expected_names): """Test collect histogram from regular success.""" mock_add_value.side_effect = add_value cb_params = _InternalCallbackParam() parameters = [ Parameter(Tensor(1), 'conv1.weight1'), Parameter(Tensor(2), 'conv2.weight2'), Parameter(Tensor(3), 'conv1.bias1'), Parameter(Tensor(4), 'conv3.bias'), Parameter(Tensor(5), 'conv5.bias'), Parameter(Tensor(6), 'conv6.bias'), ] cb_params.optimizer = Optimizer(learning_rate=0.1, parameters=parameters) with SummaryCollector((tempfile.mkdtemp( dir=self.base_summary_dir))) as summary_collector: summary_collector._collect_specified_data[ 'histogram_regular'] = histogram_regular summary_collector._collect_histogram(cb_params) result = get_value() assert PluginEnum.HISTOGRAM.value == result[0][0] assert expected_names == [data[1] for data in result]
def __init__(self, mul_weight, strategy1=None, strategy2=None): super().__init__() self.mul = P.Mul().set_strategy(strategy1) self.neg = P.Neg().set_strategy(strategy2) self.mul_weight = Parameter(mul_weight, "w1")
def __init__(self, strategy1, weight): super().__init__() self.weight = Parameter(weight, "w1") self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)
def __init__(self): super(Net, self).__init__() self.AssignAdd = P.AssignAdd() self.inputdata = Parameter(initializer(1, [1], ms.int64), name="global_step") print("inputdata: ", self.inputdata)
def __init__(self, strategy1, strategy2, weight): super().__init__() self.weight = Parameter(weight, "w1") self.matmul = P.MatMul(transpose_a=False, transpose_b=True).set_strategy(strategy1) self.relu = P.ReLU().set_strategy(strategy2)
def __init__(self, mul_weight, strategy1=None, strategy2=None): super().__init__() self.expand_dims = P.ExpandDims().shard(strategy1) self.mul = P.Mul().shard(strategy2) self.mul_weight = Parameter(mul_weight, "w1")
def __init__(self): super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__() self.const = Tensor(np.ones((6, 7, 8)), mstype.float32) self.param = Parameter(Tensor( np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
def __init__(self, mul_weight, strategy1=None, strategy2=None): super().__init__() self.mul = P.Mul().shard(strategy1) self.repeat = P.RepeatElements(rep=2, axis=1).shard(strategy2) self.mul_weight = Parameter(mul_weight, "w1")
def __init__(self): super(Net, self).__init__() self.apply_adagrad = P.ApplyAdagrad() self.var = Parameter(Tensor(var_np), name="var") self.accum = Parameter(Tensor(accum_np), name="accum")
def __init__(self, weight, strategy1=None, strategy2=None, axis=0): super().__init__() self.mul = P.Mul().shard(strategy1) self.concat = P.Concat(axis=axis).shard(strategy2) self.weight = Parameter(weight, "w")
def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight") self.bias = Parameter(Tensor(np.ones([10])), name="bias") self.matmul = MatMul() self.biasAdd = BiasAdd()
def __init__(self, lock, inputx, indices, updates): super(TestScatterAddNet, self).__init__() self.scatter_add = P.ScatterAdd(use_locking=lock) self.inputx = Parameter(inputx, name="inputx") self.indices = Parameter(indices, name="indices") self.updates = Parameter(updates, name="updates")
def __init__(self): super(ScatterNdUpdate3, self).__init__() self.scatter_nd_update = P.ScatterNdUpdate() self.x = Parameter(Tensor(np.zeros((4, 4, 4)), mstype.float32), name="x")
def __init__(self): super(ScatterNdUpdate2, self).__init__() self.scatter_nd_update = P.ScatterNdUpdate() self.x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mstype.float32), name="x")
def test_parameter_div(): x = Parameter(Tensor(np.ones((3, 3)).astype(np.float32) * 8), name="ref") y = Tensor(np.ones((3, 3)).astype(np.float32) * 2) expect = np.ones((3, 3)).astype(np.float32) * 4 z = x / y assert np.allclose(z.asnumpy(), expect)
def __init__(self): super(ParameterNet, self).__init__() self.weight = Parameter(Tensor( np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], np.float32)), name="ref")
def __init__(self, strategy1, strategy2, weight, weight2): super().__init__() self.weight = Parameter(weight, "w1", requires_grad=True) self.weight2 = Parameter(weight2, "w2", requires_grad=True) self.matmul = P.MatMul().set_strategy(strategy1) self.matmul2 = P.MatMul().set_strategy(strategy2)
def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([4, 4, 5]), dtype=mstype.float32), "b1", requires_grad=True)
def test_parameter_init_illegal(): dat = np.array([[1, 2, 3], [2, 3, 4]]) tensor = Tensor(dat) data_none = None data_bool = True data_str = "nicai" data_int = 3 data_list = [1, "2", True] data_tuple = (1, 2, 3) # test data Parameter(tensor, name=data_str) Parameter(data_int, name=data_str) Parameter(dat, name=data_str) with pytest.raises(ValueError): Parameter(data_bool, name=data_str) # test name Parameter(tensor, name=data_none) with pytest.raises(ValueError): Parameter(tensor, name=dat) with pytest.raises(ValueError): Parameter(tensor, name=tensor) with pytest.raises(ValueError): Parameter(tensor, name=data_bool) with pytest.raises(ValueError): Parameter(tensor, name=data_int) with pytest.raises(ValueError): Parameter(tensor, name=data_list) with pytest.raises(ValueError): Parameter(tensor, name=data_tuple) Parameter(tensor, name=data_str, requires_grad=data_bool) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_none) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=dat) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=tensor) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_str) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_int) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_list) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_tuple) Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_bool) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=dat) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=tensor) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_none) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_str) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_int) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_list) with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_tuple)
def __init__(self): super(CustomNet, self).__init__() self.add = TensorAdd self.optimizer = Optimizer(learning_rate=1, parameters=[Parameter(Tensor(1), 'weight')])
def __init__(self, net): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net self.sens = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), name='sens', requires_grad=False) self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
def __init__(self, tensor): super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(64) self.weight = Parameter(tensor, name='w')
def __init__(self, net): super(VarNet, self).__init__() self.b = Parameter( Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) self.net = net
def __init__(self): super(AssignAddNet, self).__init__() self.AssignAdd = P.AssignAdd() self.inputdata = Parameter(initializer(1, [1], ms.float16), name="KIND_AUTOCAST_SCALAR_TO_TENSOR") self.one = 1
def __init__(self): super(AddNet, self).__init__() self.w = Parameter( Tensor(np.ones((3, 4, 5), np.float32)), "w2", requires_grad=True)
def __init__(self, mul_weight): super().__init__() self.reshape1 = P.Reshape() self.reshape2 = P.Reshape() self.mul_weight = Parameter(mul_weight, "w1")
def __init__(self): super(SecondNet, self).__init__() self.b2 = Parameter( Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True)
def __init__(self, mul_weight, strategy1=None, strategy2=None): super().__init__() self.mul = P.Mul().shard(strategy1) self.loss = P.SigmoidCrossEntropyWithLogits().shard(strategy2) self.mul_weight = Parameter(mul_weight, "w1")
def __init__(self, weight2, strategy1=None, strategy2=None): super().__init__() self.mul = P.Mul().shard(strategy1) self.tile = P.Tile().shard(strategy2) self.weight2 = Parameter(weight2, "w2")