예제 #1
0
 def __init__(self,
              feature_in_dim,
              feature_out_dim,
              use_fc=True,
              weight_init="normal",
              bias_init="zeros",
              has_bias=True,
              dropout_ratio=None,
              activation=None):
     super(_BaseAggregator, self).__init__()
     self.in_dim = feature_in_dim
     self.out_dim = feature_out_dim
     self.use_fc = use_fc
     if self.use_fc:
         self.weight_init = weight_init
         self.bias_init = bias_init
         self.has_bias = has_bias
         self.fc = GNNFeatureTransform(self.in_dim,
                                       self.out_dim,
                                       weight_init=self.weight_init,
                                       bias_init=self.bias_init,
                                       has_bias=self.has_bias)
     self.dropout_ratio = dropout_ratio
     if self.dropout_ratio is not None:
         self.dropout = nn.Dropout(keep_prob=self.dropout_ratio)
     self.dropout_flag = self.dropout_ratio is not None
     self.activation = get_activation(activation)
     self.activation_flag = self.activation is not None
예제 #2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor_GPU, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]))

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]))

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        split_dim = 128
        matrix_A_shape, matrix_G_shape = caculate_matmul_shape(self.in_channels, self.out_channels, split_dim)
        self.matrix_A_inv = Parameter(Tensor(np.zeros(matrix_A_shape).astype(np.float32)), requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros(matrix_G_shape).astype(np.float32)), requires_grad=False)
        self.broadcast_to = P.BroadcastTo(matrix_A_shape)
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.cube_matmul = P.MatMul(transpose_a=True)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.damping = Parameter(Tensor(damping), requires_grad=False)
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.cast = P.Cast()
        self.gather = P.Gather()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.add = P.Add()
        self.sqrt = P.Sqrt()
        self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
예제 #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(DenseLayer, self).__init__()
        self.in_channels = validator.check_positive_int(in_channels)
        self.out_channels = validator.check_positive_int(out_channels)
        self.has_bias = validator.check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(
            initializer(weight_init, [out_channels, in_channels]))

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape()[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]))

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.cast = P.Cast()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
예제 #4
0
    def __init__(self, weights=None, ignore_indiex=None, activation="softmax"):
        super(MultiClassDiceLoss, self).__init__()
        activation_list = [
            'softmax', 'logsoftmax', 'relu', 'relu6', 'tanh', 'sigmoid'
        ]

        self.binarydiceloss = DiceLoss(smooth=1e-5)
        self.weights = weights if weights is None else validator.check_value_type(
            "weights", weights, [Tensor])
        if isinstance(self.weights, Tensor) and self.weights.ndim != 2:
            raise ValueError("The weight dim should be 2, but got {}.".format(
                self.weights.ndim))
        self.ignore_indiex = ignore_indiex if ignore_indiex is None else \
            validator.check_value_type("ignore_indiex", ignore_indiex, [int])
        if isinstance(activation, str) and activation not in activation_list:
            raise ValueError(
                "The activation must be in {}, but got {}.".format(
                    activation_list, activation))

        self.activation = get_activation(activation) if isinstance(
            activation, str) else activation
        if self.activation is not None and not isinstance(
                self.activation, Cell):
            raise TypeError(
                "The activation must be str or Cell, but got {}.".format(
                    type(self.activation)))
        self.reshape = P.Reshape()
예제 #5
0
파일: quant.py 프로젝트: opendlf/mindspore
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              batchnorm=None,
              activation=None):
     super(Conv2dBnAct, self).__init__()
     self.conv = conv.Conv2d(in_channels, out_channels, kernel_size, stride,
                             pad_mode, padding, dilation, group, has_bias,
                             weight_init, bias_init)
     self.has_bn = batchnorm is not None
     self.has_act = activation is not None
     self.batchnorm = batchnorm
     if batchnorm is True:
         self.batchnorm = BatchNorm2d(out_channels)
     elif batchnorm is not None:
         validator.check_isinstance('batchnorm', batchnorm, (BatchNorm2d, ))
     self.activation = get_activation(activation)
예제 #6
0
파일: loss.py 프로젝트: chncwang/mindspore
    def __init__(self, weights=None, ignore_indiex=None, activation="softmax"):
        super(MultiClassDiceLoss, self).__init__()

        self.binarydiceloss = DiceLoss(smooth=1e-5)
        self.weights = weights if weights is None else validator.check_value_type("weights", weights, [Tensor])
        self.ignore_indiex = ignore_indiex if ignore_indiex is None else \
            validator.check_value_type("ignore_indiex", ignore_indiex, [int])
        self.activation = get_activation(activation) if isinstance(activation, str) else activation
        if self.activation is not None and not isinstance(self.activation, Cell):
            raise TypeError("The activation must be str or Cell, but got {}.".format(activation))
        self.reshape = P.Reshape()
예제 #7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None,
                 num_bits=8,
                 quant_delay=0,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(DenseQuant, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        self.fake_quant_weight = nn.FakeQuantWithMinMax(
            min_init=-6,
            max_init=6,
            ema=False,
            num_bits=num_bits,
            quant_delay=quant_delay,
            per_channel=per_channel,
            channel_size=out_channels,
            symmetric=symmetric,
            narrow_range=narrow_range)
예제 #8
0
파일: quant.py 프로젝트: opendlf/mindspore
 def __init__(self,
              in_channels,
              out_channels,
              weight_init='normal',
              bias_init='zeros',
              has_bias=True,
              batchnorm=None,
              activation=None):
     super(DenseBnAct, self).__init__()
     self.dense = basic.Dense(in_channels, out_channels, weight_init,
                              bias_init, has_bias)
     self.has_bn = batchnorm is not None
     self.has_act = activation is not None
     if batchnorm is True:
         self.batchnorm = BatchNorm2d(out_channels)
     elif batchnorm is not None:
         validator.check_isinstance('batchnorm', batchnorm, (BatchNorm2d, ))
     self.activation = get_activation(activation)
예제 #9
0
 def __init__(self,
              feature_in_dim,
              feature_out_dim,
              dropout_ratio=None,
              activation=None):
     super(GraphConvolution, self).__init__()
     self.in_dim = feature_in_dim
     self.out_dim = feature_out_dim
     self.weight_init = glorot([self.out_dim, self.in_dim])
     self.fc = nn.Dense(self.in_dim,
                        self.out_dim,
                        weight_init=self.weight_init,
                        has_bias=False)
     self.dropout_ratio = dropout_ratio
     if self.dropout_ratio is not None:
         self.dropout = nn.Dropout(keep_prob=1 - self.dropout_ratio)
     self.dropout_flag = self.dropout_ratio is not None
     self.activation = get_activation(activation)
     self.activation_flag = self.activation is not None
     self.matmul = P.MatMul()
예제 #10
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A_inv = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.pad = P.Pad(((0, 24), (0, 24)))
        self.pad1 = P.Pad(((0, 8), (0, 8)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(2048), mstype.float32)
        self.dampingG = Tensor(np.identity(1024), mstype.float32)
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
예제 #11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.thor = True
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float32)),
                                  name='matrix_A',
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            if out_channels == 1001:
                self.matrix_G = Parameter(Tensor(
                    np.zeros([1024, 1024]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
                self.pad = P.Pad(((0, 23), (0, 23)))
                self.pad1 = P.Pad(((0, 7), (0, 7)))
                self.slice = P.Slice()
                self.add = P.TensorAdd()
            else:
                self.matrix_G = Parameter(Tensor(
                    np.eye(out_channels).astype(np.float32)),
                                          name="matrix_G",
                                          requires_grad=False)
                self.abs = P.Abs()
                self.reduce_max = P.ReduceMax(keep_dims=False)
                self.neg = P.Neg()
                self.reduce_sum = P.ReduceSum()
            self.matmul = P.MatMul(transpose_b=True)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.cast = P.Cast()
            self.is_nsp_layer = (out_channels == 2)
        else:
            self.is_Ascend = False
            self.matrix_G = Parameter(Tensor(
                np.eye(out_channels).astype(np.float32)),
                                      name="matrix_G",
                                      requires_grad=False)
            self.cube_matmul = P.MatMul(transpose_a=True)
        self.getG = P.InsertGradientOf(self.save_gradient)
예제 #12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=100,
                 has_bias=False,
                 activation=None,
                 batch_size=12):
        super(Dense_Thor, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        self.matrix_A_inv = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float16)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros([out_channels, out_channels]).astype(np.float16)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(
            np.zeros([out_channels, out_channels]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = damping
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.abs = P.Abs()
        self.reduce_max = P.ReduceMax(keep_dims=False)
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.batch_size = batch_size