def construct(self): return (P.ReduceMin(self.keep_dims0)(self.x0, self.axis0), P.ReduceMin(self.keep_dims1)(self.x1, self.axis1), P.ReduceMin(self.keep_dims2)(self.x2, self.axis2), P.ReduceMin(self.keep_dims3)(self.x3, self.axis3), P.ReduceMin(self.keep_dims4)(self.x4, self.axis4), P.ReduceMin(self.keep_dims5)(self.x5, self.axis5), P.ReduceMin(self.keep_dims6)(self.x6, self.axis6), P.ReduceMin(self.keep_dims7)(self.x7, self.axis7), P.ReduceMin(self.keep_dims8)(self.x8, self.axis8))
def construct(self, box_xy, box_wh, box_confidence, box_probs, image_shape): batch_size = F.shape(box_xy)[0] x = box_xy[:, :, :, :, 0:1] y = box_xy[:, :, :, :, 1:2] box_yx = P.Concat(-1)((y, x)) w = box_wh[:, :, :, :, 0:1] h = box_wh[:, :, :, :, 1:2] box_hw = P.Concat(-1)((h, w)) new_shape = P.Round()(image_shape * P.ReduceMin()(self.input_shape / image_shape)) offset = (self.input_shape - new_shape) / 2.0 / self.input_shape scale = self.input_shape / new_shape box_yx = (box_yx - offset) * scale box_hw = box_hw * scale box_min = box_yx - box_hw / 2.0 box_max = box_yx + box_hw / 2.0 boxes = P.Concat(-1)( (box_min[:, :, :, :, 0:1], box_min[:, :, :, :, 1:2], box_max[:, :, :, :, 0:1], box_max[:, :, :, :, 1:2])) image_scale = P.Tile()(image_shape, (1, 2)) boxes = boxes * image_scale boxes = F.reshape(boxes, (batch_size, -1, 4)) boxes_scores = box_confidence * box_probs boxes_scores = F.reshape(boxes_scores, (batch_size, -1, self.num_classes)) return boxes, boxes_scores
def minimum(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor: """Reduces a dimension of a tensor by the minimum value in the dimension.""" max_op = op.ReduceMin(keep_dims) outputs = max_op(inputs, axis) return outputs
def __init__(self, batch_size=4): super(DiceLoss, self).__init__() self.threshold0 = Tensor(0.5, mstype.float32) self.zero_float32 = Tensor(0.0, mstype.float32) self.k = int(640 * 640) self.negative_one_int32 = Tensor(-1, mstype.int32) self.batch_size = batch_size self.concat = P.Concat() self.less_equal = P.LessEqual() self.greater = P.Greater() self.reduce_sum = P.ReduceSum() self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True) self.reduce_mean = P.ReduceMean() self.reduce_min = P.ReduceMin() self.cast = P.Cast() self.minimum = P.Minimum() self.expand_dims = P.ExpandDims() self.select = P.Select() self.fill = P.Fill() self.topk = P.TopK(sorted=True) self.shape = P.Shape() self.sigmoid = P.Sigmoid() self.reshape = P.Reshape() self.slice = P.Slice() self.logical_and = P.LogicalAnd() self.logical_or = P.LogicalOr() self.equal = P.Equal() self.zeros_like = P.ZerosLike() self.add = P.TensorAdd() self.gather = P.Gather()
def __init__(self): super(AxisListNet, self).__init__() self.reduce_sum = P.ReduceSum() self.reduce_mean = P.ReduceMean() self.reduce_max = P.ReduceMax() self.reduce_min = P.ReduceMin() self.add_n = P.AddN() self.axis = [0, 1, 2]
def __init__(self): super(Net, self).__init__() self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() self.sqrt = P.Sqrt() self.pow = P.Pow() self.neg = P.Neg() self.reducemin = P.ReduceMin() self.reshape = P.Reshape()
def __init__(self): super(NetReduce, self).__init__() self.axis0 = 0 self.axis1 = 1 self.axis2 = -1 self.axis3 = (0, 1) self.axis4 = (0, 1, 2) self.axis5 = (-1, ) self.axis6 = () self.reduce_mean = P.ReduceMean(False) self.reduce_sum = P.ReduceSum(False) self.reduce_max = P.ReduceMax(False) self.reduce_min = P.ReduceMin(False)
def __init__(self, num_bits=2, compute_type=mstype.float32, clip_value=1.0, per_channel=False): self.num_bits = num_bits self.compute_type = compute_type self.clip_value = clip_value self.per_channel = per_channel self.clamp = C.clip_by_value self.abs = P.Abs() self.sum = P.ReduceSum() self.nelement = F.size self.div = P.Div() self.cast = P.Cast() self.max = P.ReduceMax() self.min = P.ReduceMin() self.floor = P.Floor()
def __init__(self, num_bits=8, compute_type=mstype.float32, clip_value=1.0, per_channel=False): super(QuantizeWeightCell, self).__init__() self.num_bits = num_bits self.compute_type = compute_type self.clip_value = clip_value self.per_channel = per_channel self.clamp = C.clip_by_value self.abs = P.Abs() self.sum = P.ReduceSum() self.nelement = F.size self.div = P.Div() self.cast = P.Cast() self.max = P.ReduceMax() self.min = P.ReduceMin() self.round = P.Round()
def __init__(self, strategy1, strategy2, strategy3): super().__init__() self.mul1 = P.Mul().set_strategy(strategy1) self.reduce_min = P.ReduceMin( keep_dims=False).set_strategy(strategy2) self.mul2 = P.Mul().set_strategy(strategy3)
def __init__(self): super(ReduceMinDynamic, self).__init__() self.reducemin = P.ReduceMin(False) self.test_dynamic = inner.GpuConvertToDynamicShape()
self.bn = nn.BatchNorm2d(out_ch) self.relu = P.ReLU() def construct(self, input_x): return self.relu(self.bn(self.conv(input_x))) test_case_reid_ops = [ ('ReduceMax', { 'block': P.ReduceMax(keep_dims=False), 'desc_const': [(1,)], 'desc_inputs': [convert([32, 32], np.float16)], 'desc_bprop': [convert([32], np.float16)], 'skip': []}), ('ReduceMin', { 'block': P.ReduceMin(), 'desc_const': [(1,)], 'desc_inputs': [[32, 32]], 'desc_bprop': [[32]], 'skip': []}), ('ReduceMean', { 'block': P.ReduceMean(keep_dims=True), 'desc_const': [(1, 2)], 'desc_inputs': [[32, 4, 4]], 'desc_bprop': [[32, 1, 1]]}), ('Log', { 'block': P.Log(), 'desc_inputs': [[4, 128, 1024]], 'desc_bprop': [[4, 128, 1024]], 'skip': ['backward']}), # check backward error ('Reciprocal', {
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') reduce_min = P.ReduceMin(keep_dims=False) reduce_min1 = Primitive('ReduceMin') reduce_min2 = Primitive('ReduceMin') class FnDict: def __init__(self): self.fnDict = {} def __call__(self, fn): self.fnDict[fn.__name__] = fn def __getitem__(self, name): return self.fnDict[name]
def __init__(self, keep_dims): super(ReduceMin, self).__init__() self.reduce_min = P.ReduceMin(keep_dims)
def __init__(self, min_init=-6, max_init=6, num_bits=8, ema=False, ema_decay=0.999, per_channel=False, channel_size=1, quant_delay=0, symmetric=False, narrow_range=False, training=True): """init FakeQuantWithMinMax ascend layer""" super(FakeQuantWithMinMaxD, self).__init__() self.min_init = min_init self.num_bits = num_bits self.max_init = max_init self.ema = ema self.ema_decay = ema_decay self.per_channel = per_channel self.channel_size = channel_size self.quant_delay = quant_delay self.symmetric = symmetric self.narrow_range = narrow_range self.training = training if not per_channel: self.fake_quant = P.FakeQuantWithMinMax( num_bits=self.num_bits, ema=self.ema, ema_decay=self.ema_decay, quant_delay=self.quant_delay, symmetric=self.symmetric, narrow_range=self.narrow_range, training=training) self.ema_update = P.FakeQuantWithMinMaxUpdate( num_bits=self.num_bits, ema=self.ema, ema_decay=self.ema_decay, quant_delay=self.quant_delay, symmetric=self.symmetric, narrow_range=self.narrow_range, training=training) else: raise RuntimeError("not support per channel") if isinstance(min_init, Parameter): self.minq = min_init self.maxq = max_init else: self.minq = Parameter(Tensor( np.array([min_init]).astype(np.float32)), name='quant_min', requires_grad=False) self.maxq = Parameter(Tensor( np.array([max_init]).astype(np.float32)), name='quant_max', requires_grad=False) self.reduce_min = P.ReduceMin() self.reduce_max = P.ReduceMax()