def construct(self, img1, img2): _check_input_4d(F.shape(img1), "img1", self.cls_name) _check_input_4d(F.shape(img2), "img2", self.cls_name) P.SameTypeShape()(img1, img2) max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) img1 = _convert_img_dtype_to_float32(img1, self.max_val) img2 = _convert_img_dtype_to_float32(img2, self.max_val) c1 = (self.k1 * max_val)**2 c2 = (self.k2 * max_val)**2 sim = () mcs = () for i in range(self.level): sim, cs = _compute_multi_channel_loss(c1, c2, img1, img2, self.multi_convs_list[i], self.concat, self.reduce_mean) mcs += (self.relu(cs), ) img1, img2 = _downsample(img1, img2, self.avg_pool) mcs = mcs[0:-1:1] mcs_and_ssim = self.pack(mcs + (self.relu(sim), )) mcs_and_ssim = self.pow(mcs_and_ssim, self.weight_tensor) ms_ssim = self.prod(mcs_and_ssim, -1) loss = self.reduce_mean(ms_ssim, -1) return loss
def __init__(self, seed, dtype, name, param): """ Constructor of distribution class. """ super(Distribution, self).__init__() if seed is None: seed = 0 validator.check_value_type('name', name, [str], type(self).__name__) validator.check_non_negative_int(seed, 'seed', name) self._name = name self._seed = seed self._dtype = cast_type_for_device(dtype) self._parameters = {} # parsing parameters for k in param.keys(): if not(k == 'self' or k.startswith('_')): self._parameters[k] = param[k] # some attributes if 'distribution' in self.parameters.keys(): self.parameter_type = self.parameters['distribution'].parameter_type else: self.parameter_type = set_param_type(self.parameters['param_dict'], dtype) self._broadcast_shape = self._calc_broadcast_shape() self._is_scalar_batch = self._check_is_scalar_batch() # set the function to call according to the derived class's attributes self._set_prob() self._set_log_prob() self._set_sd() self._set_var() self._set_cdf() self._set_survival() self._set_log_cdf() self._set_log_survival() self._set_cross_entropy() self.context_mode = context.get_context('mode') self.device_target = context.get_context('device_target') self.checktuple = CheckTuple() self.checktensor = CheckTensor() self.broadcast = broadcast_to # ops needed for the base class self.cast_base = P.Cast() self.dtype_base = P.DType() self.exp_base = exp_generic self.fill_base = P.Fill() self.log_base = log_generic self.sametypeshape_base = P.SameTypeShape() self.sq_base = P.Square() self.sqrt_base = P.Sqrt() self.shape_base = P.Shape()
def construct(self, img1, img2): _check_input_4d(F.shape(img1), "img1", self.cls_name) _check_input_4d(F.shape(img2), "img2", self.cls_name) P.SameTypeShape()(img1, img2) max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) img1 = _convert_img_dtype_to_float32(img1, self.max_val) img2 = _convert_img_dtype_to_float32(img2, self.max_val) mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1)) psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0) return psnr
def construct(self, img1, img2): _check_input_4d(F.shape(img1), "img1", "SSIM") _check_input_4d(F.shape(img2), "img2", "SSIM") P.SameTypeShape()(img1, img2) max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) img1 = _convert_img_dtype_to_float32(img1, self.max_val) img2 = _convert_img_dtype_to_float32(img2, self.max_val) kernel = self._fspecial_gauss(self.filter_size, self.filter_sigma) kernel = P.Tile()(kernel, (1, P.Shape()(img1)[1], 1, 1)) mean_ssim = self._calculate_mean_ssim(img1, img2, kernel, max_val, self.k1, self.k2) return mean_ssim
def __init__(self, low=None, high=None, seed=None, dtype=mstype.float32, name="Uniform"): """ Constructor of Uniform distribution. """ param = dict(locals()) valid_dtype = mstype.float_type check_type(dtype, valid_dtype, type(self).__name__) super(Uniform, self).__init__(seed, dtype, name, param) self.parameter_type = set_param_type({ 'low': low, 'high': high }, self.dtype) if low is not None and high is not None: self._low = cast_to_tensor(low, self.parameter_type) self._high = cast_to_tensor(high, self.parameter_type) check_greater(self.low, self.high, "low value", "high value") else: self._low = low if low is None else cast_to_tensor( low, self.parameter_type) self._high = high if high is None else cast_to_tensor( high, self.parameter_type) self.default_parameters = [self.low, self.high] self.parameter_names = ['low', 'high'] # ops needed for the class self.exp = exp_generic self.log = log_generic self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.dtypeop = P.DType() self.fill = P.Fill() self.less = P.Less() self.lessequal = P.LessEqual() self.logicaland = P.LogicalAnd() self.select = P.Select() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.zeroslike = P.ZerosLike() self.uniform = C.uniform self.sametypeshape = P.SameTypeShape()
def construct(self, img1, img2): _check_input_dtype(F.dtype(img1), "img1", [mstype.float32, mstype.float16], self.cls_name) _check_input_filter_size(F.shape(img1), "img1", self.filter_size, self.cls_name) P.SameTypeShape()(img1, img2) max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) img1 = _convert_img_dtype_to_float32(img1, self.max_val) img2 = _convert_img_dtype_to_float32(img2, self.max_val) c1 = (self.k1 * max_val) ** 2 c2 = (self.k2 * max_val) ** 2 ssim_ave_channel, _ = _compute_multi_channel_loss(c1, c2, img1, img2, self.conv, self.concat, self.reduce_mean) loss = self.reduce_mean(ssim_ave_channel, -1) return loss
def __init__(self, is_constant_jacobian=False, is_injective=True, name=None, dtype=None, param=None): """ Constructor of Bijector class. """ super(Bijector, self).__init__() validator.check_value_type('name', name, [str], type(self).__name__) validator.check_value_type('is_constant_jacobian', is_constant_jacobian, [bool], name) validator.check_value_type('is_injective', is_injective, [bool], name) if dtype is not None: validator.check_type_name("dtype", dtype, mstype.float_type, type(self).__name__) self._name = name self._dtype = dtype self._parameters = {} # parsing parameters for k in param.keys(): if k == 'param': continue if not (k == 'self' or k.startswith('_')): self._parameters[k] = param[k] # if no bijector is used as an argument during initilization if 'bijector' not in param.keys(): self._batch_shape = self._calc_batch_shape() self._is_scalar_batch = self._check_is_scalar_batch() self._is_constant_jacobian = is_constant_jacobian self._is_injective = is_injective self.context_mode = context.get_context('mode') self.checktensor = CheckTensor() # ops needed for the base class self.cast_base = P.Cast() self.dtype_base = P.DType() self.shape_base = P.Shape() self.fill_base = P.Fill() self.sametypeshape_base = P.SameTypeShape() self.issubclass_base = P.IsSubClass()
def __init__(self, mean=None, sd=None, seed=None, dtype=mstype.float32, name="Normal"): """ Constructor of Normal. """ param = dict(locals()) valid_dtype = mstype.float_type check_type(dtype, valid_dtype, type(self).__name__) super(Normal, self).__init__(seed, dtype, name, param) self.parameter_type = set_param_type( {'mean': mean, 'sd': sd}, self.dtype) if mean is not None and sd is not None: self._mean_value = cast_to_tensor(mean, self.parameter_type) self._sd_value = cast_to_tensor(sd, self.parameter_type) check_greater_zero(self._sd_value, "Standard deviation") else: self._mean_value = mean if mean is None else cast_to_tensor( mean, self.parameter_type) self._sd_value = sd if sd is None else cast_to_tensor( sd, self.parameter_type) self.default_parameters = [self._mean_value, self._sd_value] self.parameter_names = ['mean', 'sd'] # ops needed for the class self.exp = exp_generic self.expm1 = expm1_generic self.log = log_generic self.erf = P.Erf() self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.fill = P.Fill() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt() self.zeroslike = P.ZerosLike() self.dtypeop = P.DType() self.sametypeshape = P.SameTypeShape()
'skip': ['backward'] }), # input is scala, not Tensor ('DType0', { 'block': (P.DType(), { 'exception': TypeError, 'error_keywords': ['DType'] }), 'desc_inputs': [5.0], 'skip': ['backward'] }), # input x scala, not Tensor ('SameTypeShape0', { 'block': (P.SameTypeShape(), { 'exception': TypeError, 'error_keywords': ['SameTypeShape'] }), 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward'] }), # input y scala, not Tensor ('SameTypeShape1', { 'block': (P.SameTypeShape(), { 'exception': TypeError, 'error_keywords': ['SameTypeShape'] }), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), 5.0], 'skip': ['backward'] }),
'skip': ['backward']}), # axis as an attribute, but greater then upper limit ('ExpandDims3', { 'block': (ExpandDimsNet(3), {'exception': ValueError, 'error_keywords': ['ExpandDims']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input is scala, not Tensor ('DType0', { 'block': (P.DType(), {'exception': TypeError, 'error_keywords': ['DType']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input x scala, not Tensor ('SameTypeShape0', { 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}), 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input y scala, not Tensor ('SameTypeShape1', { 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), 5.0], 'skip': ['backward']}), # type of x and y not match ('SameTypeShape2', { 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # shape of x and y not match ('SameTypeShape3', { 'block': (P.SameTypeShape(), {'exception': ValueError, 'error_keywords': ['SameTypeShape']}),