Exemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init=None,
                 bias_init=None,
                 quant_delay=0,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dQuant, self).__init__()
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = has_bias
        self.stride = twice(stride)
        self.dilation = twice(dilation)
        self.pad_mode = pad_mode
        self.padding = padding
        self.group = group
        self.quant_delay = quant_delay

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if bias_init is None:
            bias_init = initializer('zeros', [out_channels])
        if has_bias:
            self.bias = Parameter(bias_init, name='bias')
            self.bias_add = P.BiasAdd()

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
Exemplo n.º 2
0
 def __init__(self, model, train_dataset, task_type, num_classes=None, epochs=1,
              epi_uncer_model_path=None, ale_uncer_model_path=None, save_model=False):
     self.epi_model = model
     self.ale_model = deepcopy(model)
     self.epi_train_dataset = train_dataset
     self.ale_train_dataset = deepcopy(train_dataset)
     self.task_type = task_type
     self.epochs = check_int_positive(epochs)
     self.epi_uncer_model_path = epi_uncer_model_path
     self.ale_uncer_model_path = ale_uncer_model_path
     self.save_model = check_bool(save_model)
     self.epi_uncer_model = None
     self.ale_uncer_model = None
     self.concat = P.Concat(axis=0)
     self.sum = P.ReduceSum()
     self.pow = P.Pow()
     if not isinstance(model, Cell):
         raise TypeError('The model should be Cell type.')
     if task_type not in ('regression', 'classification'):
         raise ValueError('The task should be regression or classification.')
     if task_type == 'classification':
         self.num_classes = check_int_positive(num_classes)
     else:
         self.num_classes = num_classes
     if save_model:
         if epi_uncer_model_path is None or ale_uncer_model_path is None:
             raise ValueError("If save_model is True, the epi_uncer_model_path and "
                              "ale_uncer_model_path should not be None.")
Exemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True):
        super(GNNFeatureTransform, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
               weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
Exemplo n.º 4
0
 def __init__(self, encoder, decoder, hidden_size, latent_size,
              num_classes):
     super(ConditionalVAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
         raise TypeError('The encoder and decoder should be Cell type.')
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     if hidden_size < latent_size:
         raise ValueError(
             'The latent_size should be less than or equal to the hidden_size.'
         )
     self.num_classes = check_int_positive(num_classes)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.concat = P.Concat(axis=1)
     self.to_tensor = P.ScalarToArray()
     self.one_hot = OneHot(depth=num_classes)
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size + self.num_classes,
                         self.hidden_size)
Exemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
               weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = _selected_ops.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
Exemplo n.º 6
0
    def __init__(self,
                 num_groups,
                 num_channels,
                 eps=1e-05,
                 affine=True,
                 gamma_init='ones',
                 beta_init='zeros'):
        super(GroupNorm, self).__init__()
        self.num_groups = check_int_positive(num_groups)
        self.num_channels = check_int_positive(num_channels)
        if num_channels % num_groups != 0:
            raise ValueError("num_channels should be divided by num_groups")
        self.eps = check_typename('eps', eps, (float, ))
        self.affine = check_bool(affine)

        gamma = initializer(gamma_init, [num_channels, 1, 1])
        beta = initializer(beta_init, [num_channels, 1, 1])
        if self.affine:
            self.gamma = Parameter(gamma, name='gamma')
            self.beta = Parameter(beta, name='beta')
        else:
            self.gamma = gamma
            self.beta = beta
        self.shape = F.shape
        self.reshape = F.reshape
        self.reduce_mean = P.ReduceMean(keep_dims=True)
        self.square = F.square
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.sqrt = P.Sqrt()
Exemplo n.º 7
0
    def __init__(
            self,
            in_channels,
            out_channels,
            activation=None,
            has_bias=True,
            weight_prior_fn=NormalPrior,
            weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
            bias_prior_fn=NormalPrior,
            bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        super(_DenseVariational, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_prior_fn, Cell):
            if weight_prior_fn.__class__.__name__ != 'NormalPrior':
                raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn
        else:
            if weight_prior_fn.__name__ != 'NormalPrior':
                raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn()

        try:
            self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight')
        except TypeError:
            raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`')

        if self.has_bias:
            if isinstance(bias_prior_fn, Cell):
                if bias_prior_fn.__class__.__name__ != 'NormalPrior':
                    raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn
            else:
                if bias_prior_fn.__name__ != 'NormalPrior':
                    raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn()

            try:
                self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')

        self.activation = activation
        if not self.activation:
            self.activation_flag = False
        else:
            self.activation_flag = True
            if isinstance(self.activation, str):
                self.activation = get_activation(activation)
            elif isinstance(self.activation, Cell):
                self.activation = activation
            else:
                raise ValueError('The type of `activation` is wrong.')

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.sum = P.ReduceSum()
Exemplo n.º 8
0
 def __init__(self,
              features,
              biases,
              ftr_dims,
              num_class,
              num_nodes,
              hidden_units,
              num_heads,
              attn_drop=0.0,
              ftr_drop=0.0,
              activation=nn.ELU(),
              residual=False):
     super(GAT, self).__init__()
     self.features = Tensor(features)
     self.biases = Tensor(biases)
     self.ftr_dims = check_int_positive(ftr_dims)
     self.num_class = check_int_positive(num_class)
     self.num_nodes = check_int_positive(num_nodes)
     self.hidden_units = hidden_units
     self.num_heads = num_heads
     self.attn_drop = attn_drop
     self.ftr_drop = ftr_drop
     self.activation = activation
     self.residual = check_bool(residual)
     self.layers = []
     # first layer
     self.layers.append(
         AttentionAggregator(self.ftr_dims,
                             self.hidden_units[0],
                             self.num_heads[0],
                             self.ftr_drop,
                             self.attn_drop,
                             self.activation,
                             residual=False))
     # intermediate layer
     for i in range(1, len(self.hidden_units)):
         self.layers.append(
             AttentionAggregator(self.hidden_units[i - 1] *
                                 self.num_heads[i - 1],
                                 self.hidden_units[i],
                                 self.num_heads[i],
                                 self.ftr_drop,
                                 self.attn_drop,
                                 self.activation,
                                 residual=self.residual))
     # output layer
     self.layers.append(
         AttentionAggregator(self.hidden_units[-1] * self.num_heads[-2],
                             self.num_class,
                             self.num_heads[-1],
                             self.ftr_drop,
                             self.attn_drop,
                             activation=None,
                             residual=False,
                             output_transform='sum'))
     self.layers = nn.layer.CellList(self.layers)
Exemplo n.º 9
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     validator.check_integer('group', group, in_channels, Rel.EQ)
     validator.check_integer('group', group, out_channels, Rel.EQ)
     validator.check_integer('group', group, 1, Rel.GE)
     self.pad_mode = pad_mode
     self.dilation = dilation
     self.group = group
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     Validator.check_value_type('padding', padding, (int, tuple),
                                self.cls_name)
     if isinstance(padding, tuple):
         Validator.check_integer('padding size', len(padding), 4, Rel.EQ,
                                 self.cls_name)
     self.padding = padding
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape),
                             name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]),
                               name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning(
                 "value of `has_bias` is False, value of `bias_init` will be ignore."
             )
         self.bias = None
Exemplo n.º 10
0
    def __init__(self, in_channels, out_channels, kernel_size, stride,
                 pad_mode, padding, dilation, group, has_bias, weight_init,
                 bias_init):
        super(_Conv, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.padding = check_int_non_negative(padding)
        self.dilation = dilation
        self.group = check_int_positive(group)
        self.has_bias = has_bias
        if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
            kernel_size[0] < 1 or kernel_size[1] < 1:
            raise ValueError(
                "Attr 'kernel_size' of 'Conv2D' Op passed " +
                str(self.kernel_size) +
                ", should be a int or tuple and equal to or greater than 1.")
        if (not isinstance(stride[0], int)) or (not isinstance(
                stride[1], int)) or stride[0] < 1 or stride[1] < 1:
            raise ValueError(
                "Attr 'stride' of 'Conv2D' Op passed " + str(self.stride) +
                ", should be a int or tuple and equal to or greater than 1.")
        if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
            dilation[0] < 1 or dilation[1] < 1:
            raise ValueError("Attr 'dilation' of 'Conv2D' Op passed " +
                             str(self.dilation) +
                             ", should equal to or greater than 1.")
        if in_channels % group != 0:
            raise ValueError(
                "Attr 'in_channels' of 'Conv2D' Op must be divisible by "
                "attr 'group' of 'Conv2D' Op.")
        if out_channels % group != 0:
            raise ValueError(
                "Attr 'out_channels' of 'Conv2D' Op must be divisible by "
                "attr 'group' of 'Conv2D' Op.")

        self.weight = Parameter(initializer(
            weight_init, [out_channels, in_channels // group, *kernel_size]),
                                name='weight')

        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name='bias')
        else:
            if bias_init != 'zeros':
                logger.warning(
                    "Value of 'has_bias' is False, value of 'bias_init' will be ignored."
                )
            self.bias = None
Exemplo n.º 11
0
 def __init__(self, encoder, decoder, hidden_size, latent_size):
     super(VAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.to_tensor = P.ScalarToArray()
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size, self.hidden_size)
Exemplo n.º 12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None,
                 num_bits=8,
                 quant_delay=0,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(DenseQuant, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        self.fake_quant_weight = nn.FakeQuantWithMinMax(
            min_init=-6,
            max_init=6,
            ema=False,
            num_bits=num_bits,
            quant_delay=quant_delay,
            per_channel=per_channel,
            channel_size=out_channels,
            symmetric=symmetric,
            narrow_range=narrow_range)
Exemplo n.º 13
0
    def generate_sample(self, sample_y, generate_nums, shape):
        """
        Randomly sample from the latent space to generate samples.

        Args:
            sample_y (Tensor): Define the label of samples. Tensor of shape (generate_nums, ) and type mindspore.int32.
            generate_nums (int): The number of samples to generate.
            shape(tuple): The shape of sample, which must be the format of (generate_nums, C, H, W) or (-1, C, H, W).

        Returns:
            Tensor, the generated samples.
        """
        generate_nums = check_int_positive(generate_nums)
        if not isinstance(shape, tuple) or len(shape) != 4 or (
                shape[0] != -1 and shape[0] != generate_nums):
            raise ValueError(
                'The shape should be (generate_nums, C, H, W) or (-1, C, H, W).'
            )
        sample_z = self.normal((generate_nums, self.latent_size),
                               self.to_tensor(0.0),
                               self.to_tensor(1.0),
                               seed=0)
        sample_y = self.one_hot(sample_y)
        sample_c = self.concat((sample_z, sample_y))
        sample = self._decode(sample_c)
        sample = self.reshape(sample, shape)
        return sample
Exemplo n.º 14
0
 def __init__(self,
              num_features,
              eps=1e-5,
              momentum=0.9,
              affine=True,
              gamma_init='ones',
              beta_init='zeros',
              moving_mean_init='zeros',
              moving_var_init='ones',
              use_batch_statistics=None,
              device_num_each_group=1):
     super(GlobalBatchNorm, self).__init__(num_features,
                                           eps,
                                           momentum,
                                           affine,
                                           gamma_init,
                                           beta_init,
                                           moving_mean_init,
                                           moving_var_init,
                                           use_batch_statistics,
                                           device_num_each_group,
                                           input_dims='both')
     self.group = check_int_positive(device_num_each_group)
     if self.group <= 1:
         raise ValueError("the number of group must be greater than 1.")
Exemplo n.º 15
0
    def run(self, train_dataset, epochs=10):
        """
        Optimize the parameters by training the probability network, and return the trained network.

        Args:
            epochs (int): Total number of iterations on the data. Default: 10.
            train_dataset (Dataset): A training dataset iterator.

        Outputs:
            Cell, the trained probability network.
        """
        epochs = check_int_positive(epochs)
        train_net = TrainOneStepCell(self.net_with_loss, self.optimizer)
        train_net.set_train()
        for _ in range(1, epochs+1):
            train_loss = 0
            dataset_size = 0
            for data in train_dataset.create_dict_iterator():
                x = Tensor(data['image'], dtype=mstype.float32)
                y = Tensor(data['label'], dtype=mstype.int32)
                dataset_size += len(x)
                loss = train_net(x, y).asnumpy()
                train_loss += loss
            self._loss = train_loss / dataset_size
        model = self.net_with_loss.backbone_network
        return model
Exemplo n.º 16
0
 def __init__(self,
              model,
              train_dataset,
              task_type,
              num_classes=None,
              epochs=1,
              epi_uncer_model_path=None,
              ale_uncer_model_path=None,
              save_model=False):
     self.model = model
     self.train_dataset = train_dataset
     self.task_type = task_type
     self.num_classes = check_int_positive(num_classes)
     self.epochs = epochs
     self.epi_uncer_model_path = epi_uncer_model_path
     self.ale_uncer_model_path = ale_uncer_model_path
     self.save_model = save_model
     self.epi_uncer_model = None
     self.ale_uncer_model = None
     self.concat = P.Concat(axis=0)
     self.sum = P.ReduceSum()
     self.pow = P.Pow()
     if self.task_type not in ('regression', 'classification'):
         raise ValueError(
             'The task should be regression or classification.')
     if self.task_type == 'classification':
         if self.num_classes is None:
             raise ValueError("Classification task needs to input labels.")
     if self.save_model:
         if self.epi_uncer_model_path is None or self.ale_uncer_model_path is None:
             raise ValueError(
                 "If save_model is True, the epi_uncer_model_path and "
                 "ale_uncer_model_path should not be None.")
Exemplo n.º 17
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 in_drop_ratio=0.0,
                 coef_drop_ratio=0.0,
                 residual=False,
                 coef_activation=nn.LeakyReLU(),
                 activation=nn.ELU()):
        super(AttentionHead, self).__init__()
        self.in_channel = check_int_positive(in_channel)
        self.out_channel = check_int_positive(out_channel)
        self.in_drop_ratio = in_drop_ratio
        self.in_drop = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.in_drop_2 = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.feature_transform = GNNFeatureTransform(
            in_channels=self.in_channel,
            out_channels=self.out_channel,
            has_bias=False,
            weight_init='XavierUniform')

        self.f_1_transform = GNNFeatureTransform(
            in_channels=self.out_channel,
            out_channels=1,
            weight_init='XavierUniform')
        self.f_2_transform = GNNFeatureTransform(
            in_channels=self.out_channel,
            out_channels=1,
            weight_init='XavierUniform')
        self.softmax = nn.Softmax()

        self.coef_drop = nn.Dropout(keep_prob=1 - coef_drop_ratio)
        self.matmul = P.MatMul()
        self.bias_add = P.BiasAdd()
        self.bias = Parameter(initializer('zeros', self.out_channel), name='bias')
        self.residual = check_bool(residual)
        if self.residual:
            if in_channel != out_channel:
                self.residual_transform_flag = True
                self.residual_transform = GNNFeatureTransform(
                    in_channels=self.in_channel,
                    out_channels=self.out_channel)
            else:
                self.residual_transform = None
        self.coef_activation = coef_activation
        self.activation = activation
Exemplo n.º 18
0
 def __init__(self, encoder, decoder, hidden_size, latent_size,
              num_classes):
     super(ConditionalVAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     self.num_classes = check_int_positive(num_classes)
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.concat = P.Concat(axis=1)
     self.to_tensor = P.ScalarToArray()
     self.one_hot = OneHot(depth=num_classes)
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size + self.num_classes,
                         self.hidden_size)
Exemplo n.º 19
0
 def __init__(self, encoder, decoder, hidden_size, latent_size):
     super(VAE, self).__init__()
     self.encoder = encoder
     self.decoder = decoder
     if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
         raise TypeError('The encoder and decoder should be Cell type.')
     self.hidden_size = check_int_positive(hidden_size)
     self.latent_size = check_int_positive(latent_size)
     if hidden_size < latent_size:
         raise ValueError(
             'The latent_size should be less than or equal to the hidden_size.'
         )
     self.normal = C.normal
     self.exp = P.Exp()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.to_tensor = P.ScalarToArray()
     self.dense1 = Dense(self.hidden_size, self.latent_size)
     self.dense2 = Dense(self.hidden_size, self.latent_size)
     self.dense3 = Dense(self.latent_size, self.hidden_size)
Exemplo n.º 20
0
    def _train(self,
               epoch,
               train_dataset,
               callbacks=None,
               dataset_sink_mode=True):
        """
        Training.

        Args:
            epoch (int): Total number of iterations on the data.
            train_dataset (Dataset): A training dataset iterator. If there is no
                                     loss_fn, a tuple with multiply data (data1, data2, data3, ...) will be
                                     returned and passed to the network. Otherwise, a tuple (data, label) will
                                     be returned, and the data and label are passed to the network and loss
                                     function respectively.
            callbacks (list): List of callback object. Callbacks which should be executed while training. Default: None.
            dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
                                      Configure pynative mode, the training process will be performed with
                                      dataset not sink.
        """
        epoch = check_int_positive(epoch)
        self._train_network.set_train()

        if self._parameter_broadcast:
            self._train_network.set_broadcast_flag()

        # build callback list
        list_callback = _build_callbacks(callbacks)
        cb_params = _InternalCallbackParam()
        cb_params.train_network = self._train_network
        cb_params.epoch_num = epoch
        cb_params.batch_num = train_dataset.get_dataset_size()
        cb_params.mode = "train"
        cb_params.loss_fn = self._loss_fn
        cb_params.optimizer = self._optimizer
        cb_params.parallel_mode = self._parallel_mode
        cb_params.device_number = self._device_number
        cb_params.train_dataset = train_dataset
        cb_params.list_callback = list_callback

        if dataset_sink_mode:
            if context.get_context("mode") == context.PYNATIVE_MODE:
                logger.warning(
                    "The pynative mode cannot support dataset sink mode currently."
                    "So the training process will be performed with dataset not sink."
                )
                self._train_process(epoch, train_dataset, list_callback,
                                    cb_params)
            else:
                self._train_dataset_sink_process(epoch, train_dataset,
                                                 list_callback, cb_params)
        else:
            self._train_process(epoch, train_dataset, list_callback, cb_params)
Exemplo n.º 21
0
    def __init__(
            self,
            in_channels,
            out_channels,
            activation=None,
            has_bias=True,
            weight_prior_fn=NormalPrior,
            weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
            bias_prior_fn=NormalPrior,
            bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        super(_DenseVariational, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()

        self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight')

        if self.has_bias:
            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()

            self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')

        self.activation = activation
        if isinstance(self.activation, str):
            self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.sum = P.ReduceSum()
Exemplo n.º 22
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     self.pad_mode = pad_mode
     self.padding = padding
     self.dilation = dilation
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
         self.bias = None
Exemplo n.º 23
0
 def __init__(self, model, train_dataset, task_type, num_classes=None, epochs=None,
              uncertainty_model_path=None):
     self.model = model
     self.train_dataset = train_dataset
     self.task_type = task_type
     self.num_classes = check_int_positive(num_classes)
     self.epochs = epochs
     self.uncer_model_path = uncertainty_model_path
     self.uncer_model = None
     self.concat = P.Concat(axis=0)
     self.sum = P.ReduceSum()
     self.pow = P.Pow()
     if self.task_type not in ('regression', 'classification'):
         raise ValueError('The task should be regression or classification.')
     if self.task_type == 'classification':
         if self.num_classes is None:
             raise ValueError("Classification task needs to input labels.")
Exemplo n.º 24
0
    def generate_sample(self, generate_nums, shape):
        """
        Randomly sample from latent space to generate sample.

        Args:
            generate_nums (int): The number of samples to generate.
            shape(tuple): The shape of sample, it should be (generate_nums, C, H, W) or (-1, C, H, W).

        Returns:
            Tensor, the generated sample.
        """
        generate_nums = check_int_positive(generate_nums)
        if not isinstance(shape, tuple) or len(shape) != 4 or (
                shape[0] != -1 and shape[0] != generate_nums):
            raise ValueError(
                'The shape should be (generate_nums, C, H, W) or (-1, C, H, W).'
            )
        sample_z = self.normal((generate_nums, self.latent_size),
                               self.to_tensor(0.0),
                               self.to_tensor(1.0),
                               seed=0)
        sample = self._decode(sample_z)
        sample = self.reshape(sample, shape)
        return sample
Exemplo n.º 25
0
    def __init__(self,
                 num_features,
                 eps=1e-5,
                 momentum=0.9,
                 affine=True,
                 gamma_init='ones',
                 beta_init='zeros',
                 moving_mean_init='zeros',
                 moving_var_init='ones',
                 use_batch_statistics=True,
                 device_num_each_group=1):
        super(_BatchNorm, self).__init__()
        if num_features < 1:
            raise ValueError("num_features must be at least 1")

        if momentum < 0 or momentum > 1:
            raise ValueError(
                "momentum should be a number in range [0, 1], but got {}".
                format(momentum))

        self.use_batch_statistics = use_batch_statistics
        self.num_features = num_features
        self.eps = eps
        self.moving_mean = Parameter(initializer(moving_mean_init,
                                                 num_features),
                                     name="mean",
                                     requires_grad=False)
        self.moving_variance = Parameter(initializer(moving_var_init,
                                                     num_features),
                                         name="variance",
                                         requires_grad=False)
        self.gamma = Parameter(initializer(gamma_init, num_features),
                               name="gamma",
                               requires_grad=affine)
        self.beta = Parameter(initializer(beta_init, num_features),
                              name="beta",
                              requires_grad=affine)
        self.group = check_int_positive(device_num_each_group)
        self.is_global = False
        if self.group != 1:
            self.rank_id = get_rank()
            self.rank_size = get_group_size()
            self.device_list = [i for i in range(0, self.rank_size)]
            self.rank_list = self.list_group(self.device_list, self.group)
            self.rank_list_idx = len(self.rank_list)
            for i in range(self.rank_list_idx):
                if self.rank_id in self.rank_list[i] and self.group != 1:
                    self.is_global = True
                    management.create_group('group' + str(i),
                                            self.rank_list[i])
                    self.all_reduce = P.AllReduce(
                        P.ReduceOp.SUM,
                        'group' + str(i)).add_prim_attr('fusion', 1)
        self.shape = P.Shape()
        self.reduce_mean = P.ReduceMean(keep_dims=True)
        self.square = P.Square()
        self.sqrt = P.Sqrt()
        self.cast = P.Cast()
        self.dtype = P.DType()
        self.reshape = P.Reshape()
        self.is_ascend = context.get_context("device_target") == "Ascend"

        if context.get_context("enable_ge"):
            self.is_ge_backend = True
            self.momentum = Tensor(1.0 - momentum, mstype.float32)
        else:
            self.is_ge_backend = False
            self.momentum = 1.0 - momentum
        if self.is_ge_backend or self.is_ascend:
            self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps)
        else:
            self.bn_train = P.FusedBatchNorm(mode=1,
                                             epsilon=self.eps,
                                             momentum=self.momentum)
        self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps)

        data_parallel_strategy = ((1, ), (1, ))
        data_parallel_strategy_one = ((1, ), ())
        self.sub_mean = P.Sub().set_strategy(data_parallel_strategy)
        self.sub_var = P.Sub().set_strategy(data_parallel_strategy)
        self.mul_mean = P.Mul().set_strategy(data_parallel_strategy_one)
        self.mul_var = P.Mul().set_strategy(data_parallel_strategy_one)
        self.assign_sub_mean = P.AssignSub().set_strategy(
            data_parallel_strategy)
        self.assign_sub_var = P.AssignSub().set_strategy(
            data_parallel_strategy)
Exemplo n.º 26
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 pad_mode,
                 padding,
                 dilation,
                 group,
                 has_bias,
                 weight_init,
                 bias_init,
                 transposed=False):
        super(_Conv, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.weight_init = weight_init
        self.bias_init = bias_init
        if isinstance(padding, int):
            Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
            self.padding = padding
        elif isinstance(padding, tuple):
            for pad in padding:
                Validator.check_integer('padding item', pad, 0, Rel.GE, self.cls_name)
            self.padding = padding
        else:
            raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))

        self.dilation = dilation
        self.group = check_int_positive(group)
        self.has_bias = has_bias
        if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
            kernel_size[0] < 1 or kernel_size[1] < 1:
            raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
                             + str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
        if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or stride[0] < 1 or stride[1] < 1:
            raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
                             + str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
        if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
            dilation[0] < 1 or dilation[1] < 1:
            raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
                             + str(self.dilation) + ", should equal to or greater than 1.")
        if in_channels % group != 0:
            raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
                             "attr 'group' of 'Conv2D' Op.")
        if out_channels % group != 0:
            raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
                             "attr 'group' of 'Conv2D' Op.")
        if transposed:
            shape = [in_channels, out_channels // group, *kernel_size]
        else:
            shape = [out_channels, in_channels // group, *kernel_size]
        self.weight = Parameter(initializer(self.weight_init, shape), name='weight')

        if check_bool(has_bias):
            self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
        else:
            if self.bias_init != 'zeros':
                logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
            self.bias = None
Exemplo n.º 27
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor_GPU, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        split_dim = 128
        matrix_A_shape, matrix_G_shape = caculate_matmul_shape(
            self.in_channels, self.out_channels, split_dim)
        self.matrix_A_inv = Parameter(Tensor(
            np.zeros(matrix_A_shape).astype(np.float32)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros(matrix_G_shape).astype(np.float32)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.broadcast_to = P.BroadcastTo(matrix_A_shape)
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.cube_matmul = P.MatMul(transpose_a=True)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.damping = Parameter(Tensor(damping),
                                 name="damping_value",
                                 requires_grad=False)
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.cast = P.Cast()
        self.gather = P.GatherV2()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.cholesky = P.Cholesky(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
Exemplo n.º 28
0
def check_int_positive_1():
    with pytest.raises(ValueError):
        check_int_positive(-1)
Exemplo n.º 29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 activation=None,
                 has_bias=True,
                 weight_prior_fn=NormalPrior,
                 weight_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape),
                 bias_prior_fn=NormalPrior,
                 bias_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape)):
        super(_DenseVariational, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()
        for prior_name, prior_dist in self.weight_prior.name_cells().items():
            if prior_name != 'normal':
                raise TypeError(
                    "The type of distribution of `weight_prior_fn` should be `normal`"
                )
            if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor)
                    and isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                raise TypeError(
                    "The input form of `weight_prior_fn` is incorrect")

        try:
            self.weight_posterior = weight_posterior_fn(
                shape=[self.out_channels, self.in_channels], name='bnn_weight')
        except TypeError:
            raise TypeError(
                'The type of `weight_posterior_fn` should be `NormalPosterior`'
            )
        for posterior_name, _ in self.weight_posterior.name_cells().items():
            if posterior_name != 'normal':
                raise TypeError(
                    "The type of distribution of `weight_posterior_fn` should be `normal`"
                )

        if self.has_bias:
            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()
            for prior_name, prior_dist in self.bias_prior.name_cells().items():
                if prior_name != 'normal':
                    raise TypeError(
                        "The type of distribution of `bias_prior_fn` should be `normal`"
                    )
                if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor)
                        and isinstance(getattr(prior_dist, '_sd_value'),
                                       Tensor)):
                    raise TypeError(
                        "The input form of `bias_prior_fn` is incorrect")

            try:
                self.bias_posterior = bias_posterior_fn(
                    shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError(
                    'The type of `bias_posterior_fn` should be `NormalPosterior`'
                )
            for posterior_name, _ in self.bias_posterior.name_cells().items():
                if posterior_name != 'normal':
                    raise TypeError(
                        "The type of distribution of `bias_posterior_fn` should be `normal`"
                    )

        self.activation = activation
        if not self.activation:
            self.activation_flag = False
        else:
            self.activation_flag = True
            if isinstance(self.activation, str):
                self.activation = get_activation(activation)
            elif isinstance(self.activation, Cell):
                self.activation = activation
            else:
                raise ValueError('The type of `activation` is wrong.')

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.sum = P.ReduceSum()
Exemplo n.º 30
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A_inv = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.pad = P.Pad(((0, 24), (0, 24)))
        self.pad1 = P.Pad(((0, 8), (0, 8)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(2048), mstype.float32)
        self.dampingG = Tensor(np.identity(1024), mstype.float32)
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)