def __init__(self, config): super(Faster_Rcnn_Resnet50, self).__init__() self.dtype = np.float32 self.ms_type = mstype.float32 self.train_batch_size = config.batch_size self.num_classes = config.num_classes self.anchor_scales = config.anchor_scales self.anchor_ratios = config.anchor_ratios self.anchor_strides = config.anchor_strides self.target_means = tuple(config.rcnn_target_means) self.target_stds = tuple(config.rcnn_target_stds) # Anchor generator anchor_base_sizes = None self.anchor_base_sizes = list( self.anchor_strides ) if anchor_base_sizes is None else anchor_base_sizes self.anchor_generators = [] for anchor_base in self.anchor_base_sizes: self.anchor_generators.append( AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios)) self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) featmap_sizes = config.feature_shapes assert len(featmap_sizes) == len(self.anchor_generators) self.anchor_list = self.get_anchors(featmap_sizes) # Backbone resnet50 self.backbone = ResNetFea(ResidualBlockUsing, config.resnet_block, config.resnet_in_channels, config.resnet_out_channels, False) # Fpn self.fpn_ncek = FeatPyramidNeck(config.fpn_in_channels, config.fpn_out_channels, config.fpn_num_outs) # Rpn and rpn loss self.gt_labels_stage1 = Tensor( np.ones((self.train_batch_size, config.num_gts)).astype(np.uint8)) self.rpn_with_loss = RPN(config, self.train_batch_size, config.rpn_in_channels, config.rpn_feat_channels, config.num_anchors, config.rpn_cls_out_channels) # Proposal self.proposal_generator = Proposal(config, self.train_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator.set_train_local(config, True) self.proposal_generator_test = Proposal(config, config.test_batch_size, config.activate_num_classes, config.use_sigmoid_cls) self.proposal_generator_test.set_train_local(config, False) # Assign and sampler stage two self.bbox_assigner_sampler_for_rcnn = BboxAssignSampleForRcnn( config, self.train_batch_size, config.num_bboxes_stage2, True) self.decode = P.BoundingBoxDecode(max_shape=(config.img_height, config.img_width), means=self.target_means, \ stds=self.target_stds) # Roi self.roi_init(config) # Rcnn self.rcnn = Rcnn( config, config.rcnn_in_channels * config.roi_layer['out_size'] * config.roi_layer['out_size'], self.train_batch_size, self.num_classes) # Op declare self.squeeze = P.Squeeze() self.cast = P.Cast() self.concat = P.Concat(axis=0) self.concat_1 = P.Concat(axis=1) self.concat_2 = P.Concat(axis=2) self.reshape = P.Reshape() self.select = P.Select() self.greater = P.Greater() self.transpose = P.Transpose() # Improve speed self.concat_start = min(self.num_classes - 2, 55) self.concat_end = (self.num_classes - 1) # Test mode self.test_mode_init(config) # Init tensor self.init_tensor(config) self.device_type = "Ascend" if context.get_context( "device_target") == "Ascend" else "Others"
def __init__(self, in_channels, out_channels, weight_init='normal', bias_init='zeros', damping=0.03, loss_scale=1, frequency=278, has_bias=True, activation=None): super(Dense_Thor, self).__init__() self.in_channels = Validator.check_positive_int(in_channels) self.out_channels = Validator.check_positive_int(out_channels) self.has_bias = Validator.check_bool(has_bias) self.thor = True if isinstance(weight_init, Tensor): if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \ weight_init.shape[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") if self.has_bias: if isinstance(bias_init, Tensor): if bias_init.ndim != 1 or bias_init.shape[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.matmul = P.MatMul(transpose_b=True) self.bias_add = P.BiasAdd() self.activation = get_activation(activation) self.activation_flag = self.activation is not None self.matrix_A_inv = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_A_inv', requires_grad=False) self.matrix_G_inv = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_G_inv", requires_grad=False) self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)) self.matmul = P.MatMul(transpose_b=True) self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.matrix_combine = P.CusMatrixCombine() self.cholesky = P.CusCholeskyTrsm() self.shape = P.Shape() self.reshape = P.Reshape() self.transpose = P.Transpose() self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.mul = P.Mul() self.cast = P.Cast() self.damping = Tensor(damping) self.loss_scale = Tensor(1 / loss_scale, mstype.float16) self.vector_matmul = P.CusBatchMatMul() self.pad = P.Pad(((0, 24), (0, 24))) self.pad1 = P.Pad(((0, 8), (0, 8))) self.slice = P.Slice() self.gather = P.GatherV2() self.assignadd = P.AssignAdd() self.freq = Tensor(frequency, mstype.int32) self.axis = 0 self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False) self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False) self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000]) self.fused_abs_max2 = P.CusFusedAbsMax1() self.log = P.Log() self.exp = P.Exp() self.dampingA = Tensor(np.identity(2048), mstype.float32) self.dampingG = Tensor(np.identity(1024), mstype.float32) self.add = P.TensorAdd() self.sqrt = P.Sqrt() self.getG = P.InsertGradientOf(self.save_gradient)
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True, gamma_init='ones', beta_init='zeros', moving_mean_init='zeros', moving_var_init='ones', use_batch_statistics=None, device_num_each_group=1): super(_BatchNorm, self).__init__() if num_features < 1: raise ValueError("num_features must be at least 1") if momentum < 0 or momentum > 1: raise ValueError( "momentum should be a number in range [0, 1], but got {}". format(momentum)) self.use_batch_statistics = use_batch_statistics self.num_features = num_features self.eps = eps self.moving_mean = Parameter(initializer(moving_mean_init, num_features), name="mean", requires_grad=False) self.moving_variance = Parameter(initializer(moving_var_init, num_features), name="variance", requires_grad=False) self.gamma = Parameter(initializer(gamma_init, num_features), name="gamma", requires_grad=affine) self.beta = Parameter(initializer(beta_init, num_features), name="beta", requires_grad=affine) self.group = check_int_positive(device_num_each_group) self.is_global = False if self.group != 1: self.rank_id = get_rank() self.rank_size = get_group_size() self.device_list = [i for i in range(0, self.rank_size)] self.rank_list = self.list_group(self.device_list, self.group) self.rank_list_idx = len(self.rank_list) for i in range(self.rank_list_idx): if self.rank_id in self.rank_list[i] and self.group != 1: self.is_global = True management.create_group('group' + str(i), self.rank_list[i]) self.all_reduce = P.AllReduce( P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) self.shape = P.Shape() self.reduce_mean = P.ReduceMean(keep_dims=True) self.square = P.Square() self.sqrt = P.Sqrt() self.cast = P.Cast() self.dtype = P.DType() self.reshape = P.Reshape() self.is_ascend = context.get_context("device_target") == "Ascend" self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE self.momentum = 1.0 - momentum if context.get_context("enable_ge"): self.is_ge_backend = True else: self.is_ge_backend = False if self.is_graph_mode and (self.is_ge_backend or self.is_ascend): self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) else: self.bn_train = P.FusedBatchNorm(mode=1, epsilon=self.eps, momentum=self.momentum) self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps) data_parallel_strategy = ((1, ), (1, )) data_parallel_strategy_one = ((1, ), ()) self.sub_mean = P.Sub().set_strategy(data_parallel_strategy) self.sub_var = P.Sub().set_strategy(data_parallel_strategy) self.mul_mean = P.Mul().set_strategy(data_parallel_strategy_one) self.mul_var = P.Mul().set_strategy(data_parallel_strategy_one) self.assign_sub_mean = P.AssignSub().set_strategy( data_parallel_strategy) self.assign_sub_var = P.AssignSub().set_strategy( data_parallel_strategy)
def __init__(self, network): super(PredictWithSigmoid, self).__init__() self.network = network self.sigmoid = P.Sigmoid() self.reshape = P.Reshape()
def __init__(self, smooth=1e-5): super(DiceLoss, self).__init__() self.smooth = validator.check_positive_float(smooth, "smooth") self.reshape = P.Reshape()
def _update_run_op(beta1, beta2, eps, lr, weight_decay_tensor, global_step, param, m, v, gradient, decay_flag): """ Update parameters. Args: beta1 (Tensor): The exponential decay rate for the 1st moment estimates. Should be in range (0.0, 1.0). beta2 (Tensor): The exponential decay rate for the 2nd moment estimates. Should be in range (0.0, 1.0). eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0. lr (Tensor): Learning rate. weight_decay_tensor (Tensor): Weight decay. Should be equal to or greater than 0. global_step (Tensor): Global step. param (Tensor): Parameters. m (Tensor): m value of parameters. v (Tensor): v value of parameters. gradient (Tensor): Gradient of parameters. decay_flag (bool): Specifies whether param update with weight decay. Returns: Tensor, the new value of v after updating. """ op_mul = P.Mul() op_sqrt = P.Sqrt() op_rsqrt = P.Rsqrt() op_square = P.Square() op_cast = P.Cast() op_reshape = P.Reshape() op_shape = P.Shape() op_pow = P.Pow() op_norm = layer.Norm() op_select = P.Select() op_greater = P.Greater() op_fill = P.Fill() op_dtype = P.DType() param_fp32 = op_cast(param, mstype.float32) m_fp32 = op_cast(m, mstype.float32) v_fp32 = op_cast(v, mstype.float32) gradient_fp32 = op_cast(gradient, mstype.float32) next_m = op_mul(beta1, m_fp32) + op_mul( op_cast(num_one, mstype.float32) - beta1, gradient_fp32) next_v = op_mul(beta2, v_fp32) + op_mul( op_cast(num_one, mstype.float32) - beta2, op_square(gradient_fp32)) next_mm = next_m / (op_cast(num_one, mstype.float32) - op_pow( beta1, op_cast(global_step + num_one, mstype.float32))) next_vv = next_v / (op_cast(num_one, mstype.float32) - op_pow( beta2, op_cast(global_step + num_one, mstype.float32))) w_norm = op_norm(param_fp32) g_norm = op_norm(gradient_fp32) g_norm_hat = op_norm( op_mul(next_mm, op_rsqrt(next_vv + eps)) + weight_decay_tensor * param_fp32) zeros = F.zeros_like_tensor(w_norm) ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0) trust_ratio = op_select( op_greater(w_norm, zeros), op_select(op_greater(g_norm, zeros), w_norm / g_norm_hat, ones), ones) tens = op_fill(op_dtype(trust_ratio), op_shape(trust_ratio), 10.0) trust_ratio = C.clip_by_value(trust_ratio, zeros, tens) update = next_mm / (op_sqrt(next_vv) + eps) if decay_flag: update = update + op_mul(weight_decay_tensor, param_fp32) update_with_lr = op_mul(op_mul(trust_ratio, lr), update) next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32)) next_v = F.depend(next_v, F.assign(param, next_param)) next_v = F.depend(next_v, F.assign(m, next_m)) next_v = F.depend(next_v, F.assign(v, next_v)) return next_v
def __init__(self, config): super(CreateAttentionMaskFromInputMask, self).__init__() self.input_mask = None self.cast = P.Cast() self.reshape = P.Reshape() self.shape = (-1, 1, config.seq_length)
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from mindspore.ops import operations as P from mindspore.ops import Primitive import mindspore as ms addn = P.AddN() add = P.TensorAdd() reshape = P.Reshape() cast = P.Cast() tuple_getitem = Primitive('tuple_getitem') max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) def test_addn_cast(x, y, z): sum = addn((x, y)) res = cast(sum, ms.float16) return res def test_addn_with_max_pool(x, y): sum = addn((x, y)) output = max_pool(sum) res = tuple_getitem(output, 0) return res
def __init__(self, config): super(PredLogProbs, self).__init__() self.reshape = P.Reshape() self.log_softmax = nn.LogSoftmax(axis=-1) self.get_shape = P.Shape()
def __init__(self, network): super(SolveOutput, self).__init__() self._network = network self._reshape = P.Reshape()
def __init__(self): super(GlobalAvgPooling, self).__init__() self.mean = P.ReduceMean(True) self.shape = P.Shape() self.reshape = P.Reshape()
def test_reshape(): input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])) shp = (3, 2) reshape = P.Reshape() output = reshape(input_tensor, shp) assert output.asnumpy().shape == (3, 2)
def __init__(self, batch_size, seq_length, vocab_size, decoder, beam_width=4, decoder_layers_nums=4, length_penalty_weight=0.6, cov_penalty_factor=0.1, hidden_size=1024, max_decode_length=64, sos_id=2, eos_id=3, compute_type=mstype.float32): super(BeamSearchDecoder, self).__init__() self.encoder_length = seq_length self.hidden_size = hidden_size self.batch_size = batch_size self.vocab_size = vocab_size self.beam_width = beam_width self.decoder_layers_nums = decoder_layers_nums self.length_penalty_weight = length_penalty_weight self.cov_penalty_factor = cov_penalty_factor self.max_decode_length = max_decode_length self.decoder = decoder self.add = P.TensorAdd() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1,) self.shape = P.Shape() self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]), mstype.float32) self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF), mstype.float32) self.select = P.Select() self.flat_shape = (batch_size, beam_width * vocab_size) self.topk = P.TopK(sorted=True) self.floor_div = P.FloorDiv() self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32) self.real_div = P.RealDiv() self.mod = Mod() self.equal = P.Equal() self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id), mstype.int32) beam_ids = np.tile(np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1]) self.beam_ids = Tensor(beam_ids, mstype.int32) batch_ids = np.arange(batch_size * beam_width).reshape((batch_size, beam_width)) // beam_width self.batch_ids = Tensor(batch_ids, mstype.int32) self.concat = P.Concat(axis=-1) self.gather_nd = P.GatherNd() self.start = Tensor(0, dtype=mstype.int32) self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id), mstype.int32) self.init_seq = Tensor(np.full([batch_size, beam_width, self.max_decode_length], sos_id), mstype.int32) init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]), [batch_size, 1]) self.init_scores = Tensor(init_scores, mstype.float32) self.init_finished = Tensor(np.zeros([batch_size, beam_width], dtype=np.bool)) self.init_length = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32)) self.length_penalty = LengthPenalty(weight=length_penalty_weight) self.one = Tensor(1, mstype.int32) self.prob_concat = P.Concat(axis=1) self.cast = P.Cast() self.decoder_hidden_state = Tensor(np.zeros([self.decoder_layers_nums, 2, self.batch_size * self.beam_width, hidden_size]), mstype.float32) self.zeros_scores = Tensor(np.zeros([batch_size, beam_width], dtype=np.float)) self.active_index = Tensor(np.ones([batch_size, beam_width], dtype=np.int32)) self.init_zeros = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32)) self.init_ones = Tensor(np.ones([batch_size, beam_width], dtype=np.float32)) self.accu_attn_scores = Tensor(np.zeros([batch_size, beam_width, self.encoder_length], dtype=np.float32)) self.zeros = Tensor([0], mstype.int32) self.eos_tensor = Tensor(np.full([batch_size, beam_width, beam_width], eos_id), mstype.int32) self.ones_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 1), mstype.float32) self.neg_inf_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], -INF), mstype.float32) self.zeros_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 0), mstype.float32) self.zeros_2d = Tensor(np.full([batch_size * beam_width, self.encoder_length], 0), mstype.int32) self.argmin = P.ArgMinWithValue(axis=1) self.reducesum = P.ReduceSum() self.div = P.Div() self.shape_op = P.Shape() self.mul = P.Mul() self.log = P.Log() self.less = P.Less() self.tile = P.Tile() self.noteq = P.Neg() self.zeroslike = P.ZerosLike() self.greater_equal = P.GreaterEqual() self.sub = P.Sub()
def __init__(self, strategy): super().__init__() self.reshape = P.Reshape() self.mul = P.Mul().shard(strategy) self.relu = P.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_mode='same', padding=0, dilation=1, group=1, has_bias=False, weight_init='normal', bias_init='zeros'): kernel_size = twice(kernel_size) stride = twice(stride) self._dilation = dilation dilation = twice(dilation) super(Conv2d_Thor, self).__init__(in_channels, out_channels, kernel_size, stride, pad_mode, padding, dilation, group, has_bias, weight_init, bias_init) self.conv2d = P.Conv2D(out_channel=self.out_channels, kernel_size=self.kernel_size, mode=1, pad_mode=self.pad_mode, pad=self.padding, stride=self.stride, dilation=self.dilation, group=self.group) self._init_depthwise_conv2d(weight_init) self.bias_add = P.BiasAdd() self.thor = True self.hw = kernel_size[0] * kernel_size[1] self.matrix_A_dim = self.in_channels * self.kernel_size[ 0] * self.kernel_size[1] self.matrix_G_dim = self.out_channels self.shape = P.Shape() self.reshape = P.Reshape() self.mul = P.Mul() self.cast = P.Cast() self.A_normalizer = Parameter(initializer(0, [1], mstype.float32), name="A_normalizer", requires_grad=False) self.G_normalizer = Parameter(initializer(0, [1], mstype.float32), name="G_normalizer", requires_grad=False) self.is_Ascend = True if context.get_context("device_target") == "Ascend": ksizes = (1, kernel_size[0], kernel_size[1], 1) strides = (1, stride[0], stride[1], 1) self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides) self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.transpose02314 = P.CusTranspose02314() dampingA_dim = self.matrix_A_dim self.diag_block_dim = 128 if (self.matrix_A_dim % self.diag_block_dim ) != 0 and self.matrix_A_dim > self.diag_block_dim: dampingA_dim = (self.matrix_A_dim // self.diag_block_dim + 1) * self.diag_block_dim dampingG_dim = self.matrix_G_dim if (self.matrix_G_dim % self.diag_block_dim ) != 0 and self.matrix_G_dim > self.diag_block_dim: dampingG_dim = (self.matrix_G_dim // self.diag_block_dim + 1) * self.diag_block_dim self.matrix_A_cov = Parameter(Tensor( np.zeros([dampingA_dim, dampingA_dim]).astype(np.float32)), name='matrix_A', requires_grad=False) self.matrix_G_cov = Parameter(Tensor( np.zeros([dampingG_dim, dampingG_dim]).astype(np.float32)), name='matrix_G', requires_grad=False) self.channels_slice_flag = False self.C0 = 16 if self.in_channels % self.C0 != 0: self.channels_slice_flag = True self.padA_flag = False if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \ and self.matrix_A_dim > self.diag_block_dim: self.padA_flag = True pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim self.padA = P.Pad(((0, pad_dim), (0, pad_dim))) self.slice = P.Slice() else: self.is_Ascend = False self.img2col = P.Im2Col(kernel_size=kernel_size, stride=stride, pad_mode="same") self.matmul = P.MatMul(transpose_b=True) self.reduce_mean = P.ReduceMean(keep_dims=False) self.matrix_A_cov = Parameter(Tensor( np.zeros([self.matrix_A_dim, self.matrix_A_dim]).astype(np.float32)), name='matrix_A', requires_grad=False) self.matrix_G_cov = Parameter(Tensor( np.zeros([self.matrix_G_dim, self.matrix_G_dim]).astype(np.float32)), name='matrix_G', requires_grad=False) self.getG = P.InsertGradientOf(self.save_gradient)
def __init__(self, params, learning_rate, momentum, matrix_A, matrix_G, A_inv_max, G_inv_max, weight_decay=0.0, loss_scale=1.0, decay_filter=lambda x: x.name not in []): super(THOR, self).__init__(learning_rate, params, weight_decay, loss_scale) if isinstance(momentum, float) and momentum < 0.0: raise ValueError( "momentum should be at least 0.0, but got momentum {}".format( momentum)) self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.params = self.parameters self.moments = self.params.clone(prefix="moments", init='zeros') self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum() self.matrix_A = ParameterTuple(matrix_A) self.matrix_G = ParameterTuple(matrix_G) self.A_inv_max = ParameterTuple(A_inv_max) self.G_inv_max = ParameterTuple(G_inv_max) self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast() self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft() self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight() self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul() self.transpose = P.Transpose() self.shape = P.Shape() self.reshape = P.Reshape() self.mul = P.Mul() self.weight_idx = [] for i in range(len(self.params)): if "conv" in self.params[i].name or "end_point" in self.params[ i].name: self.weight_idx.append(i) self.weight_idx.append(len(self.params)) self.feature_map = [ 1.0 / 12544, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 ] mean = _get_mirror_mean() degree = _get_device_num() self.grad_reducer_Amax = DistributedGradReducerThor( self.parameters, 2, mean, degree) self.grad_reducer_Gmax = DistributedGradReducerThor( self.parameters, 5, mean, degree) self.grad_reducer_A = DistributedGradReducerThor( self.parameters, 3, mean, degree) self.grad_reducer_G = DistributedGradReducerThor( self.parameters, 4, mean, degree) self.matrix_A_inv = () self.matrix_G_inv = () self.matrix_max_inv = () for i in range(54): self.matrix_max_inv = self.matrix_max_inv + (Parameter( initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False), ) self.log = P.Log() self.exp = P.Exp() self.sqrt = P.Sqrt() self.matrix_max_inv = ParameterTuple(self.matrix_max_inv) self.assign = P.Assign() self.cast = P.Cast() self.thor = True self.weight_decay = weight_decay * loss_scale self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
def __init__(self, in_channels, out_channels, weight_init='normal', bias_init='zeros', has_bias=True, activation=None): super(Dense_Thor, self).__init__() self.thor = True self.in_channels = Validator.check_positive_int(in_channels) self.out_channels = Validator.check_positive_int(out_channels) self.has_bias = Validator.check_bool(has_bias) if isinstance(weight_init, Tensor): if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ weight_init.shape[1] != in_channels: raise ValueError("Weight init shape error.") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") self.bias = None if self.has_bias: if isinstance(bias_init, Tensor): if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: raise ValueError("Bias init shape error.") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.bias_add = P.BiasAdd() self.matmul = P.MatMul(transpose_b=True) self.activation = get_activation(activation) self.activation_flag = self.activation is not None self.matrix_A = Parameter(Tensor( np.zeros([in_channels, in_channels]).astype(np.float32)), name='matrix_A', requires_grad=False) self.shape = P.Shape() self.reshape = P.Reshape() self.transpose = P.Transpose() self.mul = P.Mul() self.is_Ascend = True if context.get_context("device_target") == "Ascend": if out_channels == 1001: self.matrix_G = Parameter(Tensor( np.zeros([1024, 1024]).astype(np.float32)), name='matrix_G', requires_grad=False) self.pad = P.Pad(((0, 23), (0, 23))) self.pad1 = P.Pad(((0, 7), (0, 7))) self.slice = P.Slice() self.add = P.TensorAdd() else: self.matrix_G = Parameter(Tensor( np.eye(out_channels).astype(np.float32)), name="matrix_G", requires_grad=False) self.abs = P.Abs() self.reduce_max = P.ReduceMax(keep_dims=False) self.neg = P.Neg() self.reduce_sum = P.ReduceSum() self.matmul = P.MatMul(transpose_b=True) self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.cast = P.Cast() self.is_nsp_layer = (out_channels == 2) else: self.is_Ascend = False self.matrix_G = Parameter(Tensor( np.eye(out_channels).astype(np.float32)), name="matrix_G", requires_grad=False) self.cube_matmul = P.MatMul(transpose_a=True) self.getG = P.InsertGradientOf(self.save_gradient)
def __init__(self): super(NetMomentum, self).__init__() self.batch_size = 1 self.reshape = P.Reshape() weight = Tensor(np.ones([10, 16]).astype(np.float32) * 0.01) self.fc1 = Dense(16, 10, weight_init=weight)
def __init__(self, from_tensor_width, to_tensor_width, from_seq_length, to_seq_length, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, has_attention_mask=False, attention_probs_dropout_prob=0.0, use_one_hot_embeddings=False, initializer_range=0.02, do_return_2d_tensor=False, use_relative_positions=False, compute_type=mstype.float32): super(BertAttention, self).__init__() self.from_seq_length = from_seq_length self.to_seq_length = to_seq_length self.num_attention_heads = num_attention_heads self.size_per_head = size_per_head self.has_attention_mask = has_attention_mask self.use_relative_positions = use_relative_positions self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head)) self.reshape = P.Reshape() self.shape_from_2d = (-1, from_tensor_width) self.shape_to_2d = (-1, to_tensor_width) weight = TruncatedNormal(initializer_range) units = num_attention_heads * size_per_head self.query_layer = nn.Dense(from_tensor_width, units, activation=query_act, weight_init=weight).to_float(compute_type) self.key_layer = nn.Dense(to_tensor_width, units, activation=key_act, weight_init=weight).to_float(compute_type) self.value_layer = nn.Dense(to_tensor_width, units, activation=value_act, weight_init=weight).to_float(compute_type) self.shape_from = (-1, from_seq_length, num_attention_heads, size_per_head) self.shape_to = (-1, to_seq_length, num_attention_heads, size_per_head) self.matmul_trans_b = P.BatchMatMul(transpose_b=True) self.multiply = P.Mul() self.transpose = P.Transpose() self.trans_shape = (0, 2, 1, 3) self.trans_shape_relative = (2, 0, 1, 3) self.trans_shape_position = (1, 2, 0, 3) self.multiply_data = -10000.0 self.matmul = P.BatchMatMul() self.softmax = nn.Softmax() self.dropout = nn.Dropout(1 - attention_probs_dropout_prob) if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() self.add = P.TensorAdd() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: self.shape_return = (-1, num_attention_heads * size_per_head) else: self.shape_return = (-1, from_seq_length, num_attention_heads * size_per_head) self.cast_compute_type = SaturateCast(dst_type=compute_type) if self.use_relative_positions: self._generate_relative_positions_embeddings = \ RelaPosEmbeddingsGenerator(length=to_seq_length, depth=size_per_head, max_relative_position=16, initializer_range=initializer_range, use_one_hot_embeddings=use_one_hot_embeddings)
('Transpose_dim4', { 'block': P.Transpose(), 'desc_const': [(0, 1, 2, 3)], 'desc_inputs': [[1, 2, 3, 4]], 'desc_bprop': [[1, 2, 4, 3]]}), ('AddN', { 'block': NetForTupleInput(P.AddN()), 'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), ('Shape', { 'block': P.Shape(), 'desc_inputs': [[3, 3, 2, 2]], 'skip': ['backward']}), ('Reshape', { 'block': P.Reshape(), 'desc_const': [(64,)], 'desc_inputs': [[64, 1]], 'desc_bprop': [[64]]}), ('Cast', { 'block': P.Cast(), 'desc_const': [mstype.int32], 'desc_inputs': [[2, 3, 4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 3, 5)).astype(np.int32))]}), ('ExpandDims', { 'block': P.ExpandDims(), 'desc_const': [0], 'desc_inputs': [[2, 2]], 'desc_bprop': [[1, 2, 2]]}), ('ExpandDims_1', { 'block': P.ExpandDims(),
def __init__(self, config): super(WideDeepModel, self).__init__() emb_128_size = 650000 emb64_single_size = 17300 emb64_multi_size = 20900 indicator_size = 16 deep_dim_list = [1024, 1024, 1024, 1024, 1024] wide_reg_coef = [0.0, 0.0] deep_reg_coef = [0.0, 0.0] wide_lr = 0.2 deep_lr = 1.0 self.input_emb_dim = config.input_emb_dim self.batch_size = config.batch_size self.deep_layer_act = config.deep_layers_act self.init_args = config.init_args self.weight_init, self.bias_init = config.weight_bias_init self.weight_bias_init = config.weight_bias_init self.emb_init = config.emb_init self.keep_prob = config.keep_prob self.layer_dims = deep_dim_list + [1] self.all_dim_list = [self.input_emb_dim] + self.layer_dims self.continue_field_size = 32 self.emb_128_size = emb_128_size self.emb64_single_size = emb64_single_size self.emb64_multi_size = emb64_multi_size self.indicator_size = indicator_size self.wide_l1_coef, self.wide_l2_coef = wide_reg_coef self.deep_l1_coef, self.deep_l2_coef = deep_reg_coef self.wide_lr = wide_lr self.deep_lr = deep_lr init_acts_embedding_metrix = [ ('emb128_embedding', [self.emb_128_size, 128], self.emb_init), ('emb64_single', [self.emb64_single_size, 64], self.emb_init), ('emb64_multi', [self.emb64_multi_size, 64], self.emb_init), ('emb64_indicator', [self.indicator_size, 64], self.emb_init) ] var_map = init_var_dict(self.init_args, init_acts_embedding_metrix) self.emb128_embedding = var_map["emb128_embedding"] self.emb64_single = var_map["emb64_single"] self.emb64_multi = var_map["emb64_multi"] self.emb64_indicator = var_map["emb64_indicator"] init_acts_wide_weight = [ ('wide_continue_w', [self.continue_field_size], self.emb_init), ('wide_emb128_w', [self.emb_128_size], self.emb_init), ('wide_emb64_single_w', [self.emb64_single_size], self.emb_init), ('wide_emb64_multi_w', [self.emb64_multi_size], self.emb_init), ('wide_indicator_w', [self.indicator_size], self.emb_init), ('wide_bias', [1], self.emb_init) ] var_map = init_var_dict(self.init_args, init_acts_wide_weight) self.wide_continue_w = var_map["wide_continue_w"] self.wide_emb128_w = var_map["wide_emb128_w"] self.wide_emb64_single_w = var_map["wide_emb64_single_w"] self.wide_emb64_multi_w = var_map["wide_emb64_multi_w"] self.wide_indicator_w = var_map["wide_indicator_w"] self.wide_bias = var_map["wide_bias"] self.dense_layer_1 = DenseLayer(self.all_dim_list[0], self.all_dim_list[1], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True) self.dense_layer_2 = DenseLayer(self.all_dim_list[1], self.all_dim_list[2], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True) self.dense_layer_3 = DenseLayer(self.all_dim_list[2], self.all_dim_list[3], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True) self.dense_layer_4 = DenseLayer(self.all_dim_list[3], self.all_dim_list[4], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True) self.dense_layer_5 = DenseLayer(self.all_dim_list[4], self.all_dim_list[5], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True) self.deep_predict = DenseLayer(self.all_dim_list[5], self.all_dim_list[6], self.weight_bias_init, self.deep_layer_act, drop_out=config.dropout_flag, convert_dtype=True, use_activation=False) self.gather_v2 = P.Gather() self.mul = P.Mul() self.reduce_sum_false = P.ReduceSum(keep_dims=False) self.reduce_sum_true = P.ReduceSum(keep_dims=True) self.reshape = P.Reshape() self.square = P.Square() self.shape = P.Shape() self.tile = P.Tile() self.concat = P.Concat(axis=1) self.cast = P.Cast() self.reduceMean_false = P.ReduceMean(keep_dims=False) self.Concat = P.Concat(axis=1) self.BiasAdd = P.BiasAdd() self.expand_dims = P.ExpandDims() self.flatten = Flatten()
def __init__(self, src_dim, tgt_dim, attn_embed_dim, num_attn_heads=1, query_act=None, key_act=None, value_act=None, out_act=None, has_attention_mask=True, attention_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=True, compute_type=mstype.float32): super(MultiHeadAttention, self).__init__() if attn_embed_dim % num_attn_heads != 0: raise ValueError(f"The hidden size {attn_embed_dim} is not a multiple of the " f"number of attention heads {num_attn_heads}") self.attn_embed_dim = attn_embed_dim self.num_attn_heads = num_attn_heads self.size_per_head = attn_embed_dim // num_attn_heads self.src_dim = src_dim self.tgt_dim = tgt_dim self.has_attention_mask = has_attention_mask if attn_embed_dim != self.num_attn_heads * self.size_per_head: raise ValueError("`attn_embed_dim` must be divided by num_attn_heads.") self.scores_mul = Tensor([1.0 / math.sqrt(float(self.size_per_head))], dtype=compute_type) self.reshape = P.Reshape() self.query_layer = nn.Dense(src_dim, attn_embed_dim, activation=query_act, has_bias=True, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.key_layer = nn.Dense(tgt_dim, attn_embed_dim, activation=key_act, has_bias=True, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.value_layer = nn.Dense(tgt_dim, attn_embed_dim, activation=value_act, has_bias=True, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.out_layer = nn.Dense(attn_embed_dim, attn_embed_dim, activation=out_act, has_bias=True, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.matmul_trans_b = P.BatchMatMul(transpose_b=True) self.multiply = P.Mul() self.transpose = P.Transpose() self.multiply_data = Tensor([-10000.0], dtype=compute_type) self.matmul = P.BatchMatMul() self.softmax = nn.Softmax() self.dropout = nn.Dropout(1 - attention_dropout_prob) if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() self.add = P.TensorAdd() self.cast = P.Cast() self.get_dtype = P.DType() self.do_return_2d_tensor = do_return_2d_tensor self.cast_compute_type = SaturateCast(dst_type=compute_type) self.softmax_cast = P.Cast() self.get_shape = P.Shape() self.transpose_orders = (0, 2, 1, 3)
def __init__(self): super(Decoder, self).__init__() self.fc2 = nn.Dense(400, 1024) self.sigmoid = nn.Sigmoid() self.reshape = P.Reshape()
def __init__(self): super(CrossEntropyWithLogits, self).__init__() self.transpose_fn = F.Transpose() self.reshape_fn = F.Reshape() self.softmax_cross_entropy_loss = nn.SoftmaxCrossEntropyWithLogits() self.cast = F.Cast()
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_mode='same', padding=0, dilation=1, group=1, data_format='NCHW', has_bias=False, weight_init='normal', damping=0.03, loss_scale=1, frequency=278, bias_init='zeros'): self.thor = True ksizes = (1, kernel_size, kernel_size, 1) self.hw = kernel_size * kernel_size strides = (1, stride, stride, 1) kernel_size = twice(kernel_size) super(Conv2d_Thor, self).__init__( in_channels, out_channels, kernel_size, stride, pad_mode, padding, dilation, group, data_format, has_bias, weight_init, bias_init, ) self.conv2d = P.Conv2D(out_channel=self.out_channels, kernel_size=self.kernel_size, mode=1, pad_mode=self.pad_mode, pad=self.padding, stride=self.stride, dilation=self.dilation, group=self.group ) self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides) self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.matrix_combine = P.CusMatrixCombine() self.cholesky = P.CusCholeskyTrsm() self.transpose02314 = P.CusTranspose02314() self.matrix_A_dim = self.in_channels * self.kernel_size[0] * self.kernel_size[1] self.matrix_G_dim = self.out_channels self.matrix_A_device_shape, self.matrix_A_device_dim = caculate_device_shape(self.matrix_A_dim, self.in_channels, True) self.matrix_G_device_shape, self.matrix_G_device_dim = caculate_device_shape(self.matrix_G_dim, self.in_channels, False) self.matrix_A_device_temp_shape = ( self.matrix_A_device_shape[0], self.matrix_A_device_shape[2], self.matrix_A_device_shape[1], self.matrix_A_device_shape[3]) self.matrix_G_device_temp_shape = ( self.matrix_G_device_shape[0], self.matrix_G_device_shape[2], self.matrix_G_device_shape[1], self.matrix_G_device_shape[3]) self.matrix_A_inv = Parameter( Tensor(np.reshape(np.identity(self.matrix_A_device_dim).astype(np.float16), self.matrix_A_device_shape)), name='matrix_A_inv', requires_grad=False) self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False) self.matrix_G_inv = Parameter( Tensor(np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape)), name="matrix_G_inv", requires_grad=False) self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False) self.fake_G = Tensor( np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape)) self.shape = P.Shape() self.reshape = P.Reshape() self.transpose = P.Transpose() self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.mul = P.Mul() self.cast = P.Cast() self.damping = Tensor(damping) self.vector_matmul = P.CusBatchMatMul() self.diag_block_dim = 128 self.channels_slice_flag = False if self.in_channels % C0 != 0: self.channels_slice_flag = True self.padA_flag = False if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \ and self.matrix_A_dim > self.diag_block_dim: self.padA_flag = True pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim self.padA = P.Pad(((0, pad_dim), (0, pad_dim))) self.device_shape_pad_flag = False if self.matrix_A_dim != self.matrix_A_device_dim: self.device_shape_pad_flag = True self.device_shape_pad = P.Pad(((0, 0), (0, C0 - self.in_channels), (0, 0), (0, C0 - self.in_channels))) self.slice = P.Slice() self.gather = P.GatherV2() self.freq = Tensor(frequency, mstype.int32) self.loss_scale = Tensor(1 / loss_scale, mstype.float16) self.axis = 0 dampingA_dim = self.matrix_A_dim if (self.matrix_A_dim % self.diag_block_dim) != 0 and self.matrix_A_dim > self.diag_block_dim: dampingA_dim = (self.matrix_A_dim // self.diag_block_dim + 1) * self.diag_block_dim dampingG_dim = self.matrix_G_dim if (self.matrix_G_dim % self.diag_block_dim) != 0 and self.matrix_G_dim > self.diag_block_dim: dampingG_dim = (self.matrix_G_dim // self.diag_block_dim + 1) * self.diag_block_dim self.dampingA = Tensor(np.identity(dampingA_dim), mstype.float32) self.dampingG = Tensor(np.identity(dampingG_dim), mstype.float32) self.fused_abs_max1 = P.CusFusedAbsMax1([self.matrix_A_dim, self.matrix_A_dim]) self.fused_abs_max2 = P.CusFusedAbsMax1() self.log = P.Log() self.exp = P.Exp() self.sqrt = P.Sqrt() self.getG = P.InsertGradientOf(self.save_gradient)
def __init__(self, in_channels, out_channels, weight_init='normal', bias_init='zeros', damping=0.03, loss_scale=1, frequency=100, has_bias=False, activation=None, batch_size=12): super(Dense_Thor, self).__init__() self.in_channels = Validator.check_positive_int(in_channels) self.out_channels = Validator.check_positive_int(out_channels) self.has_bias = Validator.check_bool(has_bias) self.thor = True if isinstance(weight_init, Tensor): if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ weight_init.shape()[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") if self.has_bias: if isinstance(bias_init, Tensor): if bias_init.dim() != 1 or bias_init.shape( )[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.matmul = P.MatMul(transpose_b=True) self.bias_add = P.BiasAdd() self.activation = get_activation(activation) self.activation_flag = self.activation is not None self.matrix_A_inv = Parameter(Tensor( np.zeros([in_channels, in_channels]).astype(np.float16)), name='matrix_A_inv', requires_grad=False) self.matrix_G_inv = Parameter(Tensor( np.zeros([out_channels, out_channels]).astype(np.float16)), name="matrix_G_inv", requires_grad=False) self.fake_G = Tensor( np.zeros([out_channels, out_channels]).astype(np.float16)) self.matmul = P.MatMul(transpose_b=True) self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.matrix_combine = P.CusMatrixCombine() self.cholesky = P.CusCholeskyTrsm() self.shape = P.Shape() self.reshape = P.Reshape() self.transpose = P.Transpose() self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.mul = P.Mul() self.cast = P.Cast() self.damping = damping self.loss_scale = Tensor(1 / loss_scale, mstype.float16) self.vector_matmul = P.CusBatchMatMul() self.gather = P.GatherV2() self.assignadd = P.AssignAdd() self.freq = Tensor(frequency, mstype.int32) self.axis = 0 self.abs = P.Abs() self.reduce_max = P.ReduceMax(keep_dims=False) self.log = P.Log() self.exp = P.Exp() self.dampingA = Tensor(np.identity(in_channels), mstype.float32) self.dampingG = Tensor(np.identity(out_channels), mstype.float32) self.sqrt = P.Sqrt() self.getG = P.InsertGradientOf(self.save_gradient) self.batch_size = batch_size
def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag, optim_filter): """ Update parameters. Args: beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0). beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0). eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0. lr (Tensor): Learning rate. overflow (Tensor): Whether overflow occurs. weight_decay (Number): Weight decay. Should be equal to or greater than 0. param (Tensor): Parameters. m (Tensor): m value of parameters. v (Tensor): v value of parameters. gradient (Tensor): Gradient of parameters. decay_flag (bool): Applies weight decay or not. optim_filter (bool): Applies parameter update or not. Returns: Tensor, the new value of v after updating. """ if optim_filter: op_mul = P.Mul() op_square = P.Square() op_sqrt = P.Sqrt() op_cast = P.Cast() op_reshape = P.Reshape() op_shape = P.Shape() op_select = P.Select() param_fp32 = op_cast(param, mstype.float32) m_fp32 = op_cast(m, mstype.float32) v_fp32 = op_cast(v, mstype.float32) gradient_fp32 = op_cast(gradient, mstype.float32) cond = op_cast( F.fill(mstype.int32, op_shape(m_fp32), 1) * op_reshape(overflow, (())), mstype.bool_) next_m = op_mul(beta1, m_fp32) + op_select(cond, m_fp32,\ op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta1, gradient_fp32)) next_v = op_mul(beta2, v_fp32) + op_select(cond, v_fp32,\ op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta2, op_square(gradient_fp32))) update = next_m / (eps + op_sqrt(next_v)) if decay_flag: update = op_mul(weight_decay, param_fp32) + update update_with_lr = op_mul(lr, update) zeros = F.fill(mstype.float32, op_shape(param_fp32), 0) next_param = param_fp32 - op_select( cond, zeros, op_reshape(update_with_lr, op_shape(param_fp32))) next_param = F.depend( next_param, F.assign(param, op_cast(next_param, F.dtype(param)))) next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m)))) next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v)))) return op_cast(next_param, F.dtype(param)) return gradient
def __init__( self, vocab_size, embedding_size, embedding_shape, use_one_hot_embeddings=False, initializer_range=0.02, name='embedding_table', batch_size=12, damping=0.03, loss_scale=1, frequency=100, ): super(Embedding_Thor, self).__init__() self.vocab_size = vocab_size self.use_one_hot_embeddings = use_one_hot_embeddings self.embedding_table = Parameter(initializer( TruncatedNormal(initializer_range), [vocab_size, embedding_size]), name=name) self.thor = True self.expand = P.ExpandDims() self.shape_flat = (-1, ) self.gather = P.GatherV2() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32) self.array_mul = P.MatMul() self.reshape = P.Reshape() self.em_shape = tuple(embedding_shape) self.shape = P.Shape() self.loss_scale = Tensor(1 / loss_scale, mstype.float16) self.matrix_A_inv = Parameter(Tensor( np.zeros([vocab_size]).astype(np.float16)), name='matrix_A_inv', requires_grad=False) self.matrix_G_inv = Parameter(Tensor( np.zeros([embedding_size, embedding_size]).astype(np.float16)), name="matrix_G_inv", requires_grad=False) self.fake_G = Tensor( np.zeros([embedding_size, embedding_size]).astype(np.float16)) self.dampingA = Tensor(np.ones([vocab_size]).astype(np.float32)) self.dampingG = Tensor(np.identity(embedding_size), mstype.float32) self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.freq = Tensor(frequency, mstype.int32) self.axis = 0 self.damping = damping self.gather = P.GatherV2() self.sqrt = P.Sqrt() self.mul = P.Mul() self.cast = P.Cast() self.cube_matmul = P.CusMatMulCube(transpose_a=True) self.vector_matmul = P.CusBatchMatMul() self.cholesky = P.CusCholeskyTrsm() self.matrix_combine = P.CusMatrixCombine() self.reduce_sum = P.ReduceSum(keep_dims=False) self.inv = P.Inv() self.getG = P.InsertGradientOf(self.save_gradient) self.batch_size = batch_size
def __init__(self): super(BatchNormReshapeNet, self).__init__() self.vd = P._VirtualDataset() self.batch_norm = nn.BatchNorm1d(512, affine=False) self.reshape = P.Reshape() self.prelu = nn.PReLU(channel=256)
def __init__(self, shape): super(ReshapeNet, self).__init__() self.shape = shape self.op = P.Reshape()