Exemplo n.º 1
0
def test_eye():
    test_case = Cases()
    for i in range(len(test_case.onp_dtypes)):
        for m in range(1, 5):
            actual = onp.eye(m, dtype=test_case.onp_dtypes[i])
            expected = mnp.eye(m, dtype=test_case.mnp_dtypes[i]).asnumpy()
            match_array(actual, expected)
            for n in range(1, 5):
                for k in range(0, 5):
                    actual = onp.eye(m, n, k, dtype=test_case.onp_dtypes[i])
                    expected = mnp.eye(
                        m, n, k, dtype=test_case.mnp_dtypes[i]).asnumpy()
                    match_array(actual, expected)
Exemplo n.º 2
0
def compute_A_phi(model, norm="none", square=False):
    """
    compute matrix A consisting of products of NN weights

    Parameters
    ----------
    model: instantiating model objects
        'NonLinGaussANM' or 'NonLinGauss'
    norm: str, default 'none'
        use norm of product of paths, 'none' or 'paths'
        'paths': use norm, 'none': with no norm
    square: bool, default False
        use squared product of paths
    """
    weights = model.get_parameters(mode='w')[0]
    prod = msnp.eye(model.input_dim, dtype=mstype.float32)
    if norm == "paths":
        prod_norm = msnp.eye(model.input_dim)
    for i, w in enumerate(weights):
        if square:
            w = w**2
        else:
            w = ops.absolute(w)
        if i == 0:
            tmp_adj = ops.expand_dims(model.adjacency.transpose(), 1)
            ein_one = ops.mul(w, tmp_adj)
            prod = ops.matmul(ein_one, prod)
            if norm == "paths":
                tmp_adj = 1. - msnp.eye(model.input_dim, dtype=mstype.float32)
                tmp_adj = ops.expand_dims(tmp_adj.transpose(), 1)
                ein_two = ops.mul(ops.ones_like(w), tmp_adj)
                prod_norm = ops.matmul(ein_two, prod_norm)
        else:
            prod = ops.matmul(w, prod)
            if norm == "paths":
                prod_norm = ops.matmul(ops.ones_like(w), prod_norm)

    # sum over density parameter axis
    prod = ops.reduce_sum(prod, 1)
    if norm == "paths":
        prod_norm = ops.reduce_sum(prod_norm, 1)
        # avoid / 0 on diagonal
        denominator = prod_norm + msnp.eye(model.input_dim,
                                           dtype=mstype.float32)
        return (prod / denominator).transpose()
    elif norm == "none":
        return prod.transpose()
    else:
        raise NotImplementedError
Exemplo n.º 3
0
    def __init__(self,
                 input_dim,
                 hidden_num,
                 hidden_dim,
                 output_dim,
                 mu,
                 lamb,
                 nonlinear="leaky-relu",
                 norm_prod='paths',
                 square_prod=False):

        super(BaseModel, self).__init__()
        self.input_dim = input_dim
        self.hidden_num = hidden_num
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.mu = mu
        self.lamb = lamb
        self.nonlinear = nonlinear
        self.norm_prod = norm_prod
        self.square_prod = square_prod

        self.normal = msd.Normal(dtype=mstype.float32)
        self.extra_params = []

        # initialize current adjacency matrix
        self.adjacency = msnp.ones(
            (self.input_dim, self.input_dim), dtype=mstype.float32) - msnp.eye(
                self.input_dim, dtype=mstype.float32)

        # Generate layer_list
        layer_list = [self.hidden_dim] * self.hidden_num
        layer_list.insert(0, self.input_dim)
        layer_list.append(self.output_dim)

        # Instantiate the parameters of each layer in the model of each variable
        tmp_weights = list()
        tmp_biases = list()
        for i, item in enumerate(layer_list[:-1]):
            in_dim = item
            out_dim = layer_list[i + 1]
            tmp_weights.append(
                Parameter(msnp.zeros((self.input_dim, out_dim, in_dim),
                                     dtype=mstype.float32),
                          requires_grad=True,
                          name='w' + str(i)))
            tmp_biases.append(
                Parameter(msnp.zeros((self.input_dim, out_dim),
                                     dtype=mstype.float32),
                          requires_grad=True,
                          name='b' + str(i)))

        self.weights = ParameterTuple(tmp_weights)
        self.biases = ParameterTuple(tmp_biases)

        # reset initialization parameters
        self.reset_params()
Exemplo n.º 4
0
def MeanShift(x,
              rgb_range,
              rgb_mean=(0.4488, 0.4371, 0.4040),
              rgb_std=(1.0, 1.0, 1.0),
              sign=-1):

    # super(MeanShift, self).__init__(3, 3, kernel_size=1)

    std = Tensor(rgb_std)
    conv2d = ops.Conv2D(out_channel=3, kernel_size=1)
    biasadd = ops.BiasAdd()
    weight = numpy.eye(3, 3).view((3, 3, 1, 1)) / std.view(3, 1, 1, 1)
    bias = sign * rgb_range * Tensor(rgb_mean) / std
    weight = weight.astype(numpy.float32)
    bias = bias.astype(numpy.float32)

    x = conv2d(x, weight)
    x = biasadd(x, bias)
    return x
Exemplo n.º 5
0
    def get_matrix_exp(matrix):
        """
        compute matrix exponent

        Parameters
        ----------
        matrix: mindspore.Tensor

        Returns
        -------
        expm: matrix exponent value of A
        """
        expm_val = msnp.zeros(matrix.shape, dtype=mstype.float32)
        eye_mat = msnp.eye(matrix.shape[0], dtype=mstype.float32)
        k = 1.0

        while msnp.norm(eye_mat, 1) > 0:
            expm_val = expm_val + eye_mat
            eye_mat = msnp.matmul(matrix, eye_mat) / k
            k += 1.0
        return expm_val
Exemplo n.º 6
0
def eye(n, m=None, dtype=mstype.float32):
    return mnp.eye(n, m, dtype=dtype)