Ejemplo n.º 1
0
    def __init__(self,
                 input_dim,
                 hidden_num,
                 hidden_dim,
                 output_dim,
                 mu,
                 lamb,
                 nonlinear="leaky-relu",
                 norm_prod='paths',
                 square_prod=False):

        super(BaseModel, self).__init__()
        self.input_dim = input_dim
        self.hidden_num = hidden_num
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.mu = mu
        self.lamb = lamb
        self.nonlinear = nonlinear
        self.norm_prod = norm_prod
        self.square_prod = square_prod

        self.normal = msd.Normal(dtype=mstype.float32)
        self.extra_params = []

        # initialize current adjacency matrix
        self.adjacency = msnp.ones(
            (self.input_dim, self.input_dim), dtype=mstype.float32) - msnp.eye(
                self.input_dim, dtype=mstype.float32)

        # Generate layer_list
        layer_list = [self.hidden_dim] * self.hidden_num
        layer_list.insert(0, self.input_dim)
        layer_list.append(self.output_dim)

        # Instantiate the parameters of each layer in the model of each variable
        tmp_weights = list()
        tmp_biases = list()
        for i, item in enumerate(layer_list[:-1]):
            in_dim = item
            out_dim = layer_list[i + 1]
            tmp_weights.append(
                Parameter(msnp.zeros((self.input_dim, out_dim, in_dim),
                                     dtype=mstype.float32),
                          requires_grad=True,
                          name='w' + str(i)))
            tmp_biases.append(
                Parameter(msnp.zeros((self.input_dim, out_dim),
                                     dtype=mstype.float32),
                          requires_grad=True,
                          name='b' + str(i)))

        self.weights = ParameterTuple(tmp_weights)
        self.biases = ParameterTuple(tmp_biases)

        # reset initialization parameters
        self.reset_params()
Ejemplo n.º 2
0
def test_zeros():
    test_case = Cases()
    for shape in test_case.all_shapes:
        for i in range(len(test_case.onp_dtypes)):
            actual = onp.zeros(shape, test_case.onp_dtypes[i])
            expected = mnp.zeros(shape, test_case.mnp_dtypes[i]).asnumpy()
            match_array(actual, expected)
        actual = onp.zeros(shape)
        expected = mnp.zeros(shape).asnumpy()
        match_array(actual, expected)
Ejemplo n.º 3
0
def compute_jacobian_avg(model, data_manager, batch_size):
    """
    compute the average Jacobian of learned model
    """
    jac_avg = msnp.zeros((model.input_dim, model.input_dim),
                         dtype=mstype.float32)

    # sample
    x, _ = data_manager.sample(batch_size)
    model.set_train(False)

    # compute jacobian of the loss
    for k in range(model.input_dim):
        grad_output = msnp.zeros(shape=x.shape, dtype=mstype.float32)
        grad_output[:, k] = 1
        tmp_grad = GradNetWrtX(model)(x, grad_output)
        jac_avg[k, :] = ops.reduce_mean(ops.absolute(tmp_grad), 0)

    return jac_avg
Ejemplo n.º 4
0
 def reset_params(self):
     """reset initialize parameter of network"""
     for node in range(self.input_dim):
         for i, w in enumerate(self.weights):
             w = w[node]
             tmp_w = initializer(XavierUniform(),
                                 shape=w.shape,
                                 dtype=mstype.float32)
             self.weights[i][node] = tmp_w
         for i, b in enumerate(self.biases):
             b = b[node]
             tmp_b = msnp.zeros((b.shape[0]), dtype=mstype.float32)
             self.biases[i][node] = tmp_b
Ejemplo n.º 5
0
    def get_matrix_exp(matrix):
        """
        compute matrix exponent

        Parameters
        ----------
        matrix: mindspore.Tensor

        Returns
        -------
        expm: matrix exponent value of A
        """
        expm_val = msnp.zeros(matrix.shape, dtype=mstype.float32)
        eye_mat = msnp.eye(matrix.shape[0], dtype=mstype.float32)
        k = 1.0

        while msnp.norm(eye_mat, 1) > 0:
            expm_val = expm_val + eye_mat
            eye_mat = msnp.matmul(matrix, eye_mat) / k
            k += 1.0
        return expm_val
Ejemplo n.º 6
0
def zeros(shape, dtype=mstype.float32):
    return mnp.zeros(shape, dtype)