def __init__(self): super(NetArgmax, self).__init__() axis1 = 0 axis2 = -1 self.argmax1 = P.Argmax(axis1, output_type=mstype.int32) self.argmax2 = P.Argmax(axis2, output_type=mstype.int32) self.argmax3 = P.Argmax(output_type=mstype.int32)
def __init__(self, batch_size, conv_out_dim, encoder_hidden_size, decoder_hidden_size, decoder_output_size, max_length, dropout_p=0.1): super(AttentionOCR, self).__init__() self.encoder = Encoder(batch_size=batch_size, conv_out_dim=conv_out_dim, hidden_size=encoder_hidden_size) self.decoder = Decoder(hidden_size=decoder_hidden_size, output_size=decoder_output_size, max_length=max_length, dropout_p=dropout_p) self.init_decoder_hidden = Tensor( np.zeros((1, batch_size, decoder_hidden_size), dtype=np.float16), mstype.float16) self.shape = P.Shape() self.split = P.Split(axis=1, output_num=max_length) self.concat = P.Concat() self.expand_dims = P.ExpandDims() self.argmax = P.Argmax() self.select = P.Select()
def __init__(self): super(NetArgmax, self).__init__() self.argmax = P.Argmax(output_type=mstype.int32) x = Tensor(np.array([[1., 20., 5.], [67., 8., 9.], [130., 24., 15.]]).astype(np.float32)) self.x = Parameter(initializer(x, x.shape), name='x')
def __init__(self, context_dim, epsilon=100, delta=0.1, alpha=0.1, T=1e5): super(LinUCB, self).__init__() self.matmul = P.MatMul() self.expand_dims = P.ExpandDims() self.transpose = P.Transpose() self.reduce_sum = P.ReduceSum() self.squeeze = P.Squeeze(1) self.argmax = P.Argmax() self.reduce_max = P.ReduceMax() # Basic variables self._context_dim = context_dim self._epsilon = epsilon self._delta = delta self._alpha = alpha self._T = int(T) # Parameters self._V = Tensor(np.zeros((context_dim, context_dim), dtype=np.float32)) self._u = Tensor(np.zeros((context_dim, ), dtype=np.float32)) self._theta = Tensor(np.zeros((context_dim, ), dtype=np.float32)) # \sigma = 4*\sqrt{2*\ln{\farc{1.25}{\delta}}}/\epsilon self._sigma = 4 * \ math.sqrt(math.log(1.25 / self._delta)) / self._epsilon self._c = 0.1 self._step = 1 self._regret = 0 self._current_regret = 0 self.inverse_matrix()
def __init__(self, net, need_slice=False): super(UnetEval, self).__init__() self.net = net self.need_slice = need_slice self.transpose = ops.Transpose() self.softmax = ops.Softmax(axis=-1) self.argmax = ops.Argmax(axis=-1) self.squeeze = ops.Squeeze(axis=0)
def __init__(self, label, mask): super(Accuracy, self).__init__(auto_prefix=False) self.label = Tensor(label) self.mask = Tensor(mask) self.equal = P.Equal() self.argmax = P.Argmax() self.cast = P.Cast() self.mean = P.ReduceMean()
def __init__(self, num_class, label, mask): super(MaskedAccuracy, self).__init__() self.argmax = P.Argmax(axis=1) self.cast = P.Cast() self.reduce_mean = P.ReduceMean() self.equal = P.Equal() self.num_class = num_class self.label = Tensor(label, dtype=mstype.float32) self.mask = Tensor(mask, dtype=mstype.float32)
def __init__(self, network): super(ClassifyCorrectCell, self).__init__(auto_prefix=False) self._network = network self.argmax = P.Argmax() self.equal = P.Equal() self.cast = P.Cast() self.reduce_sum = P.ReduceSum() self.allreduce = P.AllReduce(P.ReduceOp.SUM, GlobalComm.WORLD_COMM_GROUP)
def __init__(self, config): super(BertPretrainEva, self).__init__() self.bert = GetLogProbs(config) self.argmax = P.Argmax(axis=-1, output_type=mstype.int32) self.equal = P.Equal() self.mean = P.ReduceMean() self.sum = P.ReduceSum() self.total = Parameter(Tensor([0], mstype.float32), name='total') self.acc = Parameter(Tensor([0], mstype.float32), name='acc') self.reshape = P.Reshape() self.shape = P.Shape() self.cast = P.Cast()
def __init__(self, probs=None, seed=None, dtype=mstype.int32, name="Categorical"): param = dict(locals()) param['param_dict'] = {'probs': probs} valid_dtype = mstype.int_type Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__) super(Categorical, self).__init__(seed, dtype, name, param) self._probs = self._add_parameter(probs, 'probs') if self.probs is not None: check_rank(self.probs) check_prob(self.probs) check_sum_equal_one(self.probs) # update is_scalar_batch and broadcast_shape # drop one dimension if self.probs.shape[:-1] == (): self._is_scalar_batch = True self._broadcast_shape = self._broadcast_shape[:-1] self.argmax = P.Argmax() self.broadcast = broadcast_to self.cast = P.Cast() self.clip_by_value = C.clip_by_value self.concat = P.Concat(-1) self.cumsum = P.CumSum() self.dtypeop = P.DType() self.exp = exp_generic self.expand_dim = P.ExpandDims() self.fill = P.Fill() self.floor = P.Floor() self.gather = P.GatherNd() self.less = P.Less() self.log = log_generic self.log_softmax = P.LogSoftmax() self.logicor = P.LogicalOr() self.multinomial = P.Multinomial(seed=self.seed) self.reshape = P.Reshape() self.reduce_sum = P.ReduceSum(keep_dims=True) self.select = P.Select() self.shape = P.Shape() self.softmax = P.Softmax() self.squeeze = P.Squeeze() self.squeeze_first_axis = P.Squeeze(0) self.squeeze_last_axis = P.Squeeze(-1) self.square = P.Square() self.transpose = P.Transpose() self.index_type = mstype.int32
def __init__(self, num_classes, feature_shape, backbone, channel, depth, infer_scale_sizes, atrous_rates, decoder_output_stride, output_stride, fine_tune_batch_norm, image_pyramid): super(DeepLabV3, self).__init__() self.infer_scale_sizes = [] if infer_scale_sizes is not None: self.infer_scale_sizes = infer_scale_sizes self.infer_scale_sizes = infer_scale_sizes if image_pyramid is None: image_pyramid = [1.0] self.image_pyramid = image_pyramid scale_sizes = [] for pyramid in image_pyramid: scale_sizes.append(pyramid) for scale in infer_scale_sizes: scale_sizes.append(scale) self.samples = [] for scale_size in scale_sizes: self.samples.append(SampleBlock(feature_shape, scale_size)) self.samples = nn.CellList(self.samples) self.deeplabv3 = SingleDeepLabV3( num_classes=num_classes, feature_shape=feature_shape, backbone=resnet50_dl(fine_tune_batch_norm), channel=channel, depth=depth, scale_sizes=scale_sizes, atrous_rates=atrous_rates, decoder_output_stride=decoder_output_stride, output_stride=output_stride, fine_tune_batch_norm=fine_tune_batch_norm) self.softmax = P.Softmax(axis=1) self.concat = P.Concat(axis=2) self.expand_dims = P.ExpandDims() self.reduce_mean = P.ReduceMean() self.argmax = P.Argmax(axis=1) self.sample_common = P.ResizeBilinear( (int(feature_shape[2]), int(feature_shape[3])), align_corners=True)
def __init__(self): super(Net, self).__init__() self.argmax = P.Argmax(axis=1)
test_case_array_ops = [ ('SpaceToDepth', { 'block': P.SpaceToDepth(2), 'desc_inputs': [[1, 3, 2, 2]], 'desc_bprop': [[1, 12, 1, 1]]}), ('DepthToSpace', { 'block': P.DepthToSpace(2), 'desc_inputs': [[1, 12, 1, 1]], 'desc_bprop': [[1, 3, 2, 2]]}), ('Split', { 'block': P.Split(1, 2), 'desc_inputs': [Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))], 'skip': ['backward']}), ('Argmax', { 'block': P.Argmax(), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [0], 'skip': ['backward']}), ('Argmin', { 'block': P.Argmin(), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [1], 'skip': ['backward']}), ('ArgMaxWithValue', { 'block': P.ArgMaxWithValue(), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [[1], [1]], 'skip': ['backward']}), ('ArgMinWithValue', { 'block': P.ArgMinWithValue(),
def __init__(self, net): super(UnetEval, self).__init__() self.net = net self.transpose = ops.Transpose() self.softmax = ops.Softmax(axis=-1) self.argmax = ops.Argmax(axis=-1)
def __init__(self, backbone, generate=False): super(EvalNet, self).__init__(auto_prefix=False) self.backbone = backbone self.argmax = P.Argmax() self.generate = generate
decoder_inputs = Tensor(data["decoder_input"]) decoder_targets = Tensor(data["decoder_target"]) decoder_hidden = Tensor( np.zeros( (1, config.eval_batch_size, config.decoder_hidden_size), dtype=np.float16), mstype.float16) decoder_input = Tensor((np.ones( (config.eval_batch_size, 1)) * sos_id).astype(np.int32)) encoder_outputs = network.encoder(images) batch_decoded_label = [] for di in range(decoder_inputs.shape[1]): decoder_output, decoder_hidden, _ = network.decoder( decoder_input, decoder_hidden, encoder_outputs) topi = P.Argmax()(decoder_output) ni = P.ExpandDims()(topi, 1) decoder_input = ni topi_id = topi.asnumpy() batch_decoded_label.append(topi_id) for b in range(config.eval_batch_size): text = data["annotation"][b].decode("utf8") text = text_standardization(text) decoded_label = list(np.array(batch_decoded_label)[:, b]) decoded_words = [] for idx in decoded_label: if idx == eos_id: break else: decoded_words.append(rev_vocab[idx])
def train(): """ 对训练函数进行定义 """ # 可设置训练结点个数,后续可把训练参数加入 parser = argparse.ArgumentParser(description='Graphsage') parser.add_argument('--data_dir', type=str, default='../data_mr/cora', help='Dataset directory') parser.add_argument('--train_nodes_num', type=int, default=1208, help='Nodes numbers for training') parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation') parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test') args = parser.parse_args() # 创建文件,保存最优训练模型 if not os.path.exists("ckpts_graphsage"): os.mkdir("ckpts_graphsage") # 对模式、环境进行定义 context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU", save_graphs=False) # 读取训练、验证、测试数据 features, labels, train_mask, test_mask, eval_mask = load_and_process(args.data_dir, args.train_nodes_num, args.eval_nodes_num, args.test_nodes_num) rand_incides = np.random.permutation(features.shape[0]) test_nodes = rand_incides[args.train_nodes_num+args.eval_nodes_num:] val_nodes = rand_incides[args.train_nodes_num:args.train_nodes_num+args.eval_nodes_num] train_nodes = rand_incides[:args.train_nodes_num] feature_size = features.shape[2] num_nodes = features.shape[0] num_class = labels.max() + 1 print("feature size: ", feature_size) print("nodes number: ", num_nodes) print("node classes: ", num_class) # 定义训练参数、损失函数、优化器、训练过程 early_stopping = 15 eval_acc_max = 0.8 net_original = Graphsage(input_dim=1433, hidden_dim=128, output_dim=7, hops=[10, 10]) loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt_Adam = nn.Adam(net_original.trainable_params()) net_with_loss = nn.WithLossCell(net_original, loss_fn=loss) net_train_step = nn.TrainOneStepCell(net_with_loss, opt_Adam) for epoch in range(10): net_train_step.set_train(mode=True) for batch in range(20): # 取每一个batch的训练数据 batch_src_index = np.random.choice(train_nodes, size=(16,)) features_sampled = [] for node in batch_src_index: features_sampled.append((features[node])) batch_train_mask = train_mask[batch_src_index] label_source = labels[batch_src_index] train_step_loss = net_train_step(Tensor(features_sampled, mindspore.float32), Tensor(label_source[:, 0], mindspore.int32)) step_loss = P.ReduceSum()(train_step_loss).asnumpy() # 取每一个batch的验证数据 batch_eval_index = val_nodes eval_fea_sampled = [] for node in batch_eval_index: eval_fea_sampled.append((features[node])) batch_eval_mask = eval_mask[batch_eval_index] eval_label_source = labels[batch_eval_index] eval_lable = Tensor(eval_label_source[:, 0], mindspore.int32) eval_soln = net_original(Tensor(eval_fea_sampled, mindspore.float32)) eval_logits = P.Argmax()(eval_soln) eval_acc = P.ReduceMean()(P.Cast()((P.Equal()(eval_lable, eval_logits)), mindspore.float32)) print("Epoch:", epoch + 1, " Batch: ", batch + 1, "'s train loss =", step_loss, " val accuracy =", eval_acc) # 保存最优模型 if eval_acc.asnumpy() > eval_acc_max: eval_acc_max = eval_acc print("a more accurate model!") if os.path.exists("ckpts_graphsage/graphsage.ckpt"): os.remove("ckpts_graphsage/graphsage.ckpt") save_checkpoint(net_train_step, "ckpts_graphsage/graphsage.ckpt") # 取测试数据 batch_test_index = test_nodes test_fea_sampled = [] for node in batch_test_index: test_fea_sampled.append((features[node])) batch_test_mask = eval_mask[batch_test_index] test_label_source = labels[batch_test_index] test_lable = Tensor(test_label_source[:, 0], mindspore.int32) # 读取最优模型,进行测试集上的预测 test_net = Graphsage(input_dim=1433, hidden_dim=128, output_dim=7, hops=[10, 10]) test_net.set_train(mode=False) load_checkpoint("ckpts_graphsage/graphsage.ckpt", net=test_net) loss_test = nn.SoftmaxCrossEntropyWithLogits(sparse=True) test_soln = test_net(Tensor(test_fea_sampled, mindspore.float32)) test_logits = P.Argmax()(test_soln) print("test accuracy:", P.ReduceMean()(P.Cast()((P.Equal()(test_lable, test_logits)), mindspore.float32))) test_with_loss = nn.WithLossCell(test_net, loss_fn=loss_test) test_loss = test_with_loss(Tensor(test_fea_sampled, mindspore.float32), Tensor(test_label_source[:, 0], mindspore.int32)) print("test loss:", P.ReduceSum()(test_loss).asnumpy())