def __init__(self): super(log_sum_exp, self).__init__() self.maxi = P.ReduceMax() self.maxi_dim = P.ReduceMax(keep_dims=True) self.log = P.Log() self.sums = P.ReduceSum() self.exp = P.Exp()
def __init__(self): super(log_softmax, self).__init__() self.maxi = P.ReduceMax() self.log = P.Log() self.sums = P.ReduceSum() self.exp = P.Exp() self.axis = -1 self.concat = P.Concat(-1) self.expanddims = P.ExpandDims()
def construct(self, inputs, inputs_back, targets, targets_back): weights = self.weights loss = self.network(inputs, inputs_back, targets, targets_back) grads = self.grad(self.network, weights)(inputs, inputs_back, targets, targets_back) # grad reducer on grads grads = self.grad_reducer(grads) #grads = clip.average_gradients([grads]) #grads = clip.clip_grads(grads) grads = P.clip_by_global_norm(grads) train_perplexity = P.Exp()(loss) succ = self.optimizer(grads) return F.depend(train_perplexity, succ)
def __init__(self, sparse=False): super(SoftmaxCrossEntropyExpand, self).__init__() self.exp = ops.Exp() self.sum = ops.ReduceSum(keep_dims=True) self.onehot = ops.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.div = ops.RealDiv() self.log = ops.Log() self.sum_cross_entropy = ops.ReduceSum(keep_dims=False) self.mul = ops.Mul() self.mul2 = ops.Mul() self.mean = ops.ReduceMean(keep_dims=False) self.sparse = sparse self.max = ops.ReduceMax(keep_dims=True) self.sub = ops.Sub()
def __init__(self, log_scale_min=-7.0, reduce=True): super(mix_gaussian_loss, self).__init__() self.log_scale_min = log_scale_min self.reduce = reduce self.transpose_op = P.Transpose() self.maximum = P.Maximum() self.tile = P.Tile() self.exp = P.Exp() self.logsoftmax = P.LogSoftmax(-1) self.expand_dims = P.ExpandDims() self.sums = P.ReduceSum() self.lse = log_sum_exp() self.sq = P.Square() self.sqrt = P.Sqrt() self.const = P.ScalarToArray() self.log = P.Log()
def __init__(self, num_classes=256, log_scale_min=-7.0, reduce=True): super(discretized_mix_logistic_loss, self).__init__() self.num_classes = num_classes self.log_scale_min = log_scale_min self.reduce = reduce self.transpose_op = P.Transpose() self.exp = P.Exp() self.sigmoid = P.Sigmoid() self.softplus = Stable_softplus() self.log = P.Log() self.cast = P.Cast() self.logsoftmax = P.LogSoftmax(-1) self.expand_dims = P.ExpandDims() self.tile = P.Tile() self.maximum = P.Maximum() self.sums = P.ReduceSum() self.lse = log_sum_exp() self.reshape = P.Reshape() self.factor = self.log(Tensor((self.num_classes - 1) / 2, ms.float32))
def __init__(self, log_scale_min=-7.0, reduce=True): super(mix_gaussian_loss, self).__init__() self.log_scale_min = log_scale_min self.reduce = reduce self.transpose_op = P.Transpose() self.maximum = P.Maximum() self.tile = P.Tile() self.exp = P.Exp() self.expand_dims = P.ExpandDims() self.sums = P.ReduceSum() self.lse = log_sum_exp() self.sq = P.Square() self.sqrt = P.Sqrt() self.const = P.ScalarToArray() self.log = P.Log() self.tensor_one = Tensor(1., ms.float32) if context.get_context("device_target") == "CPU": self.logsoftmax = log_softmax() else: self.logsoftmax = P.LogSoftmax(-1)
def __init__(self): super(Stable_softplus, self).__init__() self.log_op = P.Log() self.abs_op = P.Abs() self.relu_op = P.ReLU() self.exp_op = P.Exp()
def __init__(self): super().__init__() self.exp = ops.Exp() self.reduce_sum = ops.ReduceSum(keep_dims=True)
def __init__(self, alpha=1.0): super().__init__() self.alpha = alpha self.exp = ops.Exp() self.max = ops.Maximum() self, min = ops.Minimum()