def __infer__(self, x, name): src_type = x['dtype'] validator.check_subclass("input", src_type, [mstype.tensor], name["value"]) out = {'shape': None, 'dtype': None, 'value': None} return out
def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32): super(Embedding, self).__init__() validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) validator.check_value_type('use_one_hot', use_one_hot, [bool], self.cls_name) self.vocab_size = vocab_size self.embedding_size = embedding_size self.use_one_hot = use_one_hot self.embedding_table = Parameter(initializer( embedding_table, [vocab_size, embedding_size]), name='embedding_table') self.dtype = dtype self.expand = P.ExpandDims() self.reshape_flat = P.Reshape() self.shp_flat = (-1, ) self.gather = P.GatherV2() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, self.dtype) self.off_value = Tensor(0.0, self.dtype) self.array_mul = P.MatMul() self.reshape = P.Reshape() self.get_shp = P.Shape()
def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32, padding_idx=None): super(Embedding, self).__init__() self.vocab_size = validator.check_value_type('vocab_size', vocab_size, [int], self.cls_name) self.embedding_size = validator.check_value_type('embedding_size', embedding_size, [int], self.cls_name) validator.check_value_type('use_one_hot', use_one_hot, [bool], self.cls_name) validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) self.use_one_hot = use_one_hot self.dtype = dtype self.init_tensor = initializer(embedding_table, [vocab_size, embedding_size]) self.padding_idx = padding_idx if padding_idx is not None: self.padding_idx = validator.check_int_range(padding_idx, 0, vocab_size, Rel.INC_BOTH, "padding_idx", self.cls_name) if isinstance(self.init_tensor, Tensor) and self.init_tensor.init is not None: self.init_tensor = self.init_tensor.init_data() self.init_tensor = self.init_tensor.asnumpy() self.init_tensor[self.padding_idx] = 0 self.init_tensor = Tensor(self.init_tensor) self.embedding_table = Parameter(self.init_tensor, name='embedding_table') self.expand = P.ExpandDims() self.reshape_flat = P.Reshape() self.shp_flat = (-1,) self.gather = P.GatherV2() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, self.dtype) self.off_value = Tensor(0.0, self.dtype) self.array_mul = P.MatMul() self.reshape = P.Reshape() self.get_shp = P.Shape()
def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32, padding_idx=None): super(Embedding_Thor, self).__init__() self.vocab_size = Validator.check_value_type('vocab_size', vocab_size, [int], self.cls_name) self.embedding_size = Validator.check_value_type( 'embedding_size', embedding_size, [int], self.cls_name) Validator.check_value_type('use_one_hot', use_one_hot, [bool], self.cls_name) Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) self.use_one_hot = use_one_hot self.dtype = dtype self.init_tensor = initializer(embedding_table, [vocab_size, embedding_size]) self.padding_idx = padding_idx if padding_idx is not None: self.padding_idx = Validator.check_int_range( padding_idx, 0, vocab_size, Rel.INC_BOTH, "padding_idx", self.cls_name) self.init_tensor = self.init_tensor.to_tensor().asnumpy() self.init_tensor[self.padding_idx] = 0 self.embedding_table = Parameter(self.init_tensor, name='embedding_table') self.expand = P.ExpandDims() self.reshape_flat = P.Reshape() self.shp_flat = (-1, ) self.gather = P.GatherV2() self.one_hot = P.OneHot() self.on_value = Tensor(1.0, self.dtype) self.off_value = Tensor(0.0, self.dtype) self.array_mul = P.MatMul() self.reshape = P.Reshape() self.get_shp = P.Shape() self.thor = True self.matrix_A = Parameter(Tensor( np.zeros([vocab_size]).astype(np.float32)), name='matrix_A', requires_grad=False) self.matrix_G = Parameter(Tensor( np.zeros([embedding_size, embedding_size]).astype(np.float32)), name="matrix_G", requires_grad=False) self.reduce_sum = P.ReduceSum(keep_dims=False) self.getG = P.InsertGradientOf(self.save_gradient) self.cast = P.Cast() if context.get_context("device_target") == "Ascend": self.cube_matmul = P.CusMatMulCube(transpose_a=True) else: self.cube_matmul = P.MatMul(transpose_a=True) self.mul = P.Mul()
def __init__(self, keep_prob=0.5, dtype=mstype.float32): super(Dropout, self).__init__() if keep_prob <= 0 or keep_prob > 1: raise ValueError("dropout probability should be a number in range (0, 1], but got {}".format(keep_prob)) Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name) self.keep_prob = keep_prob seed0, seed1 = _get_graph_seed(0, "dropout") self.seed0 = seed0 self.seed1 = seed1 self.dropout = P.Dropout(keep_prob, seed0, seed1)
def __infer__(self, params, indices, axis): validator.check_subclass("params", params['dtype'], mstype.tensor, self.name) validator.check_tensor_type_same({"indices": indices['dtype']}, mstype.int_type, self.name) validator.check_subclass("axis", axis['dtype'], mstype.int_, self.name) axis_v = axis['value'] params_shp = params['shape'] rank = len(params_shp) validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name) if axis_v < 0: axis_v += rank out_shape = params_shp[:axis_v] + indices['shape'] + params_shp[axis_v + 1:] out = {'shape': out_shape, 'dtype': params['dtype'], 'value': None} return out
def __init__(self, keep_prob=0.5, dtype=mstype.float32): super(Dropout, self).__init__() if keep_prob <= 0 or keep_prob > 1: raise ValueError("dropout probability should be a number in range (0, 1], but got {}".format(keep_prob)) Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name) self.keep_prob = keep_prob seed0, seed1 = _get_graph_seed(0, "dropout") self.seed0 = seed0 self.seed1 = seed1 self.dtype = dtype self.get_shape = P.Shape() self.dropout_gen_mask = P.DropoutGenMask(Seed0=self.seed0, Seed1=self.seed1) self.dropout_do_mask = P.DropoutDoMask() self.cast = P.Cast() self.is_gpu = context.get_context('device_target') in ["GPU"] self.dropout = P.Dropout(keep_prob)