def __init__(self, is_grad=True, sparse=False, reduction=None, smooth_factor=0, num_classes=2): super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction) self.is_grad = is_grad self.sparse = sparse validator.check_integer("num_classes", num_classes, 1, Rel.GT, self.cls_name) validator.check_number_range("smooth_factor", smooth_factor, 0, 1, Rel.INC_BOTH, self.cls_name) self.smooth_factor = smooth_factor self.num_classes = num_classes self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.one_hot = P.OneHot() self.on_value = Tensor(1.0 - self.smooth_factor, mstype.float32) self.off_value = Tensor( 1.0 * self.smooth_factor / (self.num_classes - 1), mstype.float32) self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"] if self.is_cpugpu: self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits( is_grad=self.is_grad)
def __init__(self): super(CrossEntropyLoss, self).__init__() self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.mean = P.ReduceMean() self.one_hot = P.OneHot() self.one = Tensor(1.0, mstype.float32) self.zero = Tensor(0.0, mstype.float32)
def __init__(self, network, strategy3, strategy4, axis): super(NetWithLoss, self).__init__() self.one_hot = P.OneHot(axis=axis).shard(strategy3) self.on_value = Tensor(2.0, ms.float32) self.off_value = Tensor(1.0, ms.float32) self.loss = P.SoftmaxCrossEntropyWithLogits().shard(strategy4) self.network = network
def __init__(self, network, types, shapes, output_num, strategy3=None, strategy4=None, axis=-1): super(NetWithLoss, self).__init__() self.get_next = P.GetNext(types, shapes, output_num, "") self.one_hot = P.OneHot(axis=axis).shard(strategy3) self.on_value = Tensor(1.0, ms.float32) self.off_value = Tensor(0.0, ms.float32) self.loss = P.SoftmaxCrossEntropyWithLogits().shard(strategy4) self.network = network
def __init__(self): super(CrossEntropyLoss, self).__init__() self.sm_scalar = P.ScalarSummary() self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.mean = P.ReduceMean() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32)
def __init__(self, sparse=False, reduction='none'): super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction) self.sparse = validator.check_bool(sparse, "sparse") self.reduction = reduction self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0., mstype.float32) self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"] self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits( )
def __init__(self, num_class, label, mask, l2_coeff, params): super(MaskedSoftMaxLoss, self).__init__() self.num_class = num_class self.label = label self.mask = mask self.softmax = P.SoftmaxCrossEntropyWithLogits() self.reduce_mean = P.ReduceMean() self.cast = P.Cast() self.l2_coeff = l2_coeff self.params = ParameterTuple(list(param for param in params if param.name[-4:] != 'bias')) self.reduce_sum = P.ReduceSum() self.num_params = len(self.params)
def __init__(self, label, mask, weight_decay, param): super(Loss, self).__init__(auto_prefix=False) self.label = Tensor(label) self.mask = Tensor(mask) self.loss = P.SoftmaxCrossEntropyWithLogits() self.one = Tensor(1.0, mstype.float32) self.zero = Tensor(0.0, mstype.float32) self.mean = P.ReduceMean() self.cast = P.Cast() self.l2_loss = P.L2Loss() self.reduce_sum = P.ReduceSum() self.weight_decay = weight_decay self.param = param
def __init__(self, is_grad=True, sparse=False, reduction=None): super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction) self.is_grad = is_grad self.sparse = sparse self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"] if self.is_cpugpu: self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits( is_grad=self.is_grad)
def __init__(self, config, batch_size, num_classes, target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2)): super(RcnnCls, self).__init__() cfg = config self.rcnn_loss_cls_weight = Tensor( np.array(cfg.rcnn_loss_cls_weight).astype(np.float16)) self.rcnn_loss_reg_weight = Tensor( np.array(cfg.rcnn_loss_reg_weight).astype(np.float16)) self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels self.target_means = target_means self.target_stds = target_stds self.num_classes = num_classes self.in_channels = cfg.rcnn_in_channels self.train_batch_size = batch_size self.test_batch_size = cfg.test_batch_size self.fpn_cls = FpnCls(self.in_channels, self.rcnn_fc_out_channels, self.num_classes, cfg.roi_layer["out_size"]) self.relu = P.ReLU() self.logicaland = P.LogicalAnd() self.loss_cls = P.SoftmaxCrossEntropyWithLogits() self.loss_bbox = P.SmoothL1Loss(beta=1.0) self.loss_mask = P.SigmoidCrossEntropyWithLogits() self.reshape = P.Reshape() self.onehot = P.OneHot() self.greater = P.Greater() self.cast = P.Cast() self.sum_loss = P.ReduceSum() self.tile = P.Tile() self.expandims = P.ExpandDims() self.gather = P.GatherNd() self.argmax = P.ArgMaxWithValue(axis=1) self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.value = Tensor(1.0, mstype.float16) self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size rmv_first = np.ones((self.num_bboxes, self.num_classes)) rmv_first[:, 0] = np.zeros((self.num_bboxes, )) self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16)) self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size
def __init__(self, num, ignore_label): super(OhemLoss, self).__init__() self.mul = P.Mul() self.shape = P.Shape() self.one_hot = nn.OneHot(-1, num, 1.0, 0.0) self.squeeze = P.Squeeze() self.num = num self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() self.mean = P.ReduceMean() self.select = P.Select() self.reshape = P.Reshape() self.cast = P.Cast() self.not_equal = P.NotEqual() self.equal = P.Equal() self.reduce_sum = P.ReduceSum(keep_dims=False) self.fill = P.Fill() self.transpose = P.Transpose() self.ignore_label = ignore_label self.loss_weight = 1.0
def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0, object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0): super(YoloLoss, self).__init__() self.num_classes = num_classes self.num_anchors = len(anchors_mask) self.anchor_step = len(anchors[0]) # each scale has step anchors self.anchors = np.array(anchors, dtype=np.float32) / reduction # scale every anchor for every scale self.tensor_anchors = Tensor(self.anchors, mstype.float32) self.anchors_mask = anchors_mask anchors_w = [] anchors_h = [] for i in range(len(anchors_mask)): anchors_w.append(self.anchors[self.anchors_mask[i]][0]) anchors_h.append(self.anchors[self.anchors_mask[i]][1]) self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1)) self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1)) self.reduction = reduction self.seen = seen self.head_idx = head_idx self.zero = Tensor(0) self.coord_scale = coord_scale self.no_object_scale = no_object_scale self.object_scale = object_scale self.class_scale = class_scale self.thresh = thresh self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0, 'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0, 'coord_xy': 0, 'coord_wh': 0} self.shape = P.Shape() self.reshape = P.Reshape() self.sigmoid = P.Sigmoid() self.zeros_like = P.ZerosLike() self.concat0 = P.Concat(0) self.concat0_2 = P.Concat(0) self.concat0_3 = P.Concat(0) self.concat0_4 = P.Concat(0) self.concat1 = P.Concat(1) self.concat1_2 = P.Concat(1) self.concat1_3 = P.Concat(1) self.concat1_4 = P.Concat(1) self.concat2 = P.Concat(2) self.concat2_2 = P.Concat(2) self.concat2_3 = P.Concat(2) self.concat2_4 = P.Concat(2) self.tile = P.Tile() self.transpose = P.Transpose() self.cast = P.Cast() self.exp = P.Exp() self.sum = P.ReduceSum() self.smooth_l1_loss = P.SmoothL1Loss() self.bce = P.SigmoidCrossEntropyWithLogits() self.ce = P.SoftmaxCrossEntropyWithLogits() self.pt_linspace = PtLinspace() self.one_hot = nn.OneHot(-1, self.num_classes, 1.0, 0.0) self.squeeze_2 = P.Squeeze(2) self.reduce_sum = P.ReduceSum() self.select = P.Select() self.iou = P.IOU()
def __init__(self, config, representation_size, batch_size, num_classes, target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2)): super(Rcnn, self).__init__() cfg = config self.rcnn_loss_cls_weight = Tensor( np.array(cfg.rcnn_loss_cls_weight).astype(np.float16)) self.rcnn_loss_reg_weight = Tensor( np.array(cfg.rcnn_loss_reg_weight).astype(np.float16)) self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels self.target_means = target_means self.target_stds = target_stds self.num_classes = num_classes self.in_channels = cfg.rcnn_in_channels self.train_batch_size = batch_size self.test_batch_size = cfg.test_batch_size self.use_ambigous_sample = cfg.use_ambigous_sample shape_0 = (self.rcnn_fc_out_channels, representation_size) weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=mstype.float16).to_tensor() shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels) weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=mstype.float16).to_tensor() self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0) self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1) cls_weight = initializer( 'Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1], dtype=mstype.float16).to_tensor() reg_weight = initializer( 'Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1], dtype=mstype.float16).to_tensor() self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight) self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight) self.flatten = P.Flatten() self.relu = P.ReLU() self.logicaland = P.LogicalAnd() self.loss_cls = P.SoftmaxCrossEntropyWithLogits() self.loss_bbox = P.SmoothL1Loss(beta=1.0) self.reshape = P.Reshape() self.onehot = P.OneHot() self.greater = P.Greater() self.equal = P.Equal() self.cast = P.Cast() self.sum_loss = P.ReduceSum() self.tile = P.Tile() self.expandims = P.ExpandDims() self.gather = P.GatherNd() self.argmax = P.ArgMaxWithValue(axis=1) self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.value = Tensor(1.0, mstype.float16) self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size if self.use_ambigous_sample: self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_amb_stage2 + cfg.num_expected_neg_stage2) * batch_size rmv_first = np.ones((self.num_bboxes, self.num_classes)) rmv_first[:, 0] = np.zeros((self.num_bboxes, )) self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16)) self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size range_max = np.arange(self.num_bboxes_test).astype(np.int32) self.range_max = Tensor(range_max)
def __init__(self, is_grad=False): super(Net, self).__init__() self.SoftmaxCrossEntropyWithLogits = P.SoftmaxCrossEntropyWithLogits()
def get_add(a, b): return a + b def get_f(v): return v + 1 relu = nn.ReLU() def get_relu(x): return relu(x) softmax_cross_entropy_with_logits = P.SoftmaxCrossEntropyWithLogits() def get_softmax_cross_entropy_with_logits(logits, labels): return softmax_cross_entropy_with_logits(logits, labels) class TensorToScalar(PrimitiveWithInfer): """this is a test primitive for cases that has tensor input, but has only one scalar output""" @prim_attr_register def __init__(self): """init""" def __call__(self, logits, labels): raise NotImplementedError
def __init__(self): super(ConstDepend, self).__init__() self.value = (Tensor(np.zeros((2, 3)).astype(np.float32)), Tensor(np.ones((2, 3)).astype(np.float32))) self.soft = P.SoftmaxCrossEntropyWithLogits() self.depend = depend
def __init__(self): super(SoftmaxCrossEntropyWithLogitsNet, self).__init__() self.soft = P.SoftmaxCrossEntropyWithLogits() self.value = (Tensor(np.zeros((2,)).astype(np.float32)), Tensor(np.ones((2,)).astype(np.float32)))
def __init__(self): super(NetSoftmaxCrossEntropyWithLogits, self).__init__() self.loss = P.SoftmaxCrossEntropyWithLogits()
def __init__(self, network, strategy2=None): super(NetWithLoss, self).__init__() self.loss = P.SoftmaxCrossEntropyWithLogits(strategy=strategy2) self.network = network
def __init__(self, network): super(NetWithLoss, self).__init__() self.loss = P.SoftmaxCrossEntropyWithLogits() self.network = network
def __init__(self, network, strategy3): super(NetWithLoss, self).__init__() self.loss = P.SoftmaxCrossEntropyWithLogits().shard(strategy3) self.network = network
'block': P.AvgPool(ksize=(2, 2), strides=(2, 2), padding="VALID"), 'desc_inputs': [[100, 3, 28, 28]], 'desc_bprop': [[100, 3, 14, 14]]}), ('AvgPoolGrad', { 'block': G.AvgPoolGrad(ksize=(2, 2), strides=(2, 2), padding="VALID"), 'desc_const': [(3, 4, 6, 6)], 'const_first': True, 'desc_inputs': [[3, 4, 6, 6]], 'desc_bprop': [[3, 4, 6, 6]], 'skip': ['backward']}), ('MaxPoolWithArgmax', { 'block': P.MaxPoolWithArgmax(ksize=2, strides=2), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [[128, 32, 8, 16], [128, 32, 8, 16]]}), ('SoftmaxCrossEntropyWithLogits', { 'block': P.SoftmaxCrossEntropyWithLogits(), 'desc_inputs': [[1, 10], [1, 10]], 'desc_bprop': [[1], [1, 10]], 'skip': ['backward_exec']}), ('Flatten', { 'block': P.Flatten(), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [[128 * 32 * 8 * 16]]}), ('LogSoftmax', { 'block': P.LogSoftmax(), 'desc_inputs': [[64, 2]], 'desc_bprop': [[160, 30522]]}), ('LogSoftmaxGrad', { 'block': G.LogSoftmaxGrad(), 'desc_inputs': [[16, 1234], [16, 1234]], 'desc_bprop': [[64, 2]],