Example #1
0
 def __init__(self, config, is_training=True):
     super(Decoder, self).__init__()
     self.hidden_size = config.hidden_size
     self.vocab_size = config.trg_vocab_size
     self.embedding_size = config.decoder_embedding_size
     self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
     self.rnn = GRU(input_size=self.embedding_size + self.hidden_size*2, \
         hidden_size=self.hidden_size).to_float(config.compute_type)
     self.text_len = config.max_length
     self.shape = P.Shape()
     self.transpose = P.Transpose()
     self.p = P.Print()
     self.cast = P.Cast()
     self.concat = P.Concat(axis=2)
     self.squeeze = P.Squeeze(axis=0)
     self.expandims = P.ExpandDims()
     self.log_softmax = P.LogSoftmax(axis=1)
     weight, bias = dense_default_state(
         self.embedding_size + self.hidden_size * 3, self.vocab_size)
     self.fc = nn.Dense(self.embedding_size + self.hidden_size * 3,
                        self.vocab_size,
                        weight_init=weight,
                        bias_init=bias).to_float(config.compute_type)
     self.attention = Attention(config)
     self.bmm = P.BatchMatMul()
     self.dropout = nn.Dropout(0.7)
     self.expandims = P.ExpandDims()
     self.dtype = config.dtype
Example #2
0
    def __init__(self, vggpath=''):
        super(OpenPoseNet, self).__init__()
        self.base = Base_model()
        self.stage_1 = Stage_1()
        self.stage_2 = Stage_x()
        self.stage_3 = Stage_x()
        self.stage_4 = Stage_x()
        self.stage_5 = Stage_x()
        self.stage_6 = Stage_x()
        self.shape = P.Shape()
        self.cat = P.Concat(axis=1)
        self.print = P.Print()
        # for m in self.modules():
        # if isinstance(m, Conv2d):
        # init.constant_(m.bias, 0)
        if loadvgg and vggpath:
            param_dict = load_checkpoint(vggpath)
            param_dict_new = {}
            trans_name = 'base.vgg_base.'
            for key, values in param_dict.items():

                #print('key:',key,self.shape(values))
                if key.startswith('moments.'):
                    continue
                elif key.startswith('network.'):
                    param_dict_new[trans_name + key[17:]] = values
                # else:
                # param_dict_new[key] = values
            #print(param_dict_new)
            load_param_into_net(self.base.vgg_base, param_dict_new)
    def set_train_local(self, config, training=False):
        """Set training flag."""
        self.training_local = training
        cfg = config
        self.topK_stage1 = ()
        self.topK_shape = ()
        total_max_topk_input = 0
        if not self.training_local:
            self.num_pre = cfg.rpn_nms_pre
            self.min_box_size = cfg.rpn_min_bbox_min_size
            self.nms_thr = cfg.rpn_nms_thr
            self.nms_post = cfg.rpn_nms_post
            self.max_num = cfg.rpn_max_num
        k_num = self.num_pre
        total_max_topk_input = k_num
        self.topK_stage1 = k_num
        self.topK_shape = (k_num, 1)

        self.topKv2 = P.TopK(sorted=True)
        self.topK_shape_stage2 = (self.max_num, 1)
        self.min_float_num = -65536.0
        self.topK_mask = Tensor(self.min_float_num *
                                np.ones(total_max_topk_input, np.float16))
        self.shape = P.Shape()
        self.print = P.Print()
Example #4
0
 def __init__(self, vocab_size, embedding_dims, num_class):
     super(FastTextNetWithLoss, self).__init__()
     self.fasttext = FastText(vocab_size, embedding_dims, num_class)
     self.loss_func = nn.SoftmaxCrossEntropyWithLogits(sparse=True,
                                                       reduction='mean')
     self.squeeze = P.Squeeze(axis=1)
     self.print = P.Print()
Example #5
0
 def __init__(self, model, criterion, con_loss, use_con=True):
     super(MyTrain, self).__init__(auto_prefix=True)
     self.use_con = use_con
     self.model = model
     self.con_loss = con_loss
     self.criterion = criterion
     self.p = P.Print()
     self.cast = P.Cast()
Example #6
0
 def __init__(self, in_channels, out_channels, bilinear=True):
     super().__init__()
     self.concat = F.Concat(axis=1)
     self.factor = 56.0 / 64.0
     self.center_crop = CentralCrop(central_fraction=self.factor)
     self.print_fn = F.Print()
     self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
     self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
     self.relu = nn.ReLU()
Example #7
0
 def __init__(self, backbone, config):
     super(WithLossCell, self).__init__(auto_prefix=False)
     self._backbone = backbone
     self.batch_size = config.batch_size
     self.onehot = nn.OneHot(depth=config.ch_vocab_size)
     self._loss_fn = NLLLoss()
     self.max_len = config.max_seq_length
     self.squeeze = P.Squeeze()
     self.cast = P.Cast()
     self.argmax = P.ArgMaxWithValue(axis=1, keep_dims=True)
     self.print = P.Print()
Example #8
0
 def construct(self, input_ids, input_mask, token_type_id, masked_pos, masked_ids, nsp_label, masked_weights):
     bs, _ = self.shape(input_ids)
     probs = self.bert(input_ids, input_mask, token_type_id, masked_pos)
     index = self.argmax(probs)
     index = self.reshape(index, (bs, -1))
     eval_acc = self.equal(index, masked_ids)
     eval_acc1 = self.cast(eval_acc, mstype.float32)
     acc = self.mean(eval_acc1)
     P.Print()(acc)
     self.total += self.shape(probs)[0]
     self.acc += self.sum(eval_acc1)
     return acc, self.total, self.acc
Example #9
0
 def __init__(self, weight_angle=10):
     super(LossFunc, self).__init__()
     self.split = P.Split(1, 5)
     self.min = P.Minimum()
     self.log = P.Log()
     self.cos = P.Cos()
     self.mean = P.ReduceMean()
     #self.flatten = P.Flatten()
     self.sum = P.ReduceSum()
     self.weight_angle = weight_angle
     self.max = P.Maximum()
     self.print = P.Print()
Example #10
0
 def __init__(self, config, is_training=True):
     super(Encoder, self).__init__()
     self.hidden_size = config.hidden_size
     self.vocab_size = config.src_vocab_size
     self.embedding_size = config.encoder_embedding_size
     self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
     self.rnn = BidirectionGRU(config, is_training=is_training).to_float(
         mstype.float16)
     self.fc = nn.Dense(2 * self.hidden_size,
                        self.hidden_size).to_float(mstype.float16)
     self.shape = P.Shape()
     self.transpose = P.Transpose()
     self.p = P.Print()
     self.cast = P.Cast()
     self.text_len = config.max_length
     self.squeeze = P.Squeeze(axis=0)
     self.tanh = P.Tanh()
Example #11
0
    def __init__(self):
        super(CriterionsFaceAttri, self).__init__()

        # label
        self.gatherv2 = P.Gather()
        self.squeeze = P.Squeeze(axis=1)
        self.cast = P.Cast()
        self.reshape = P.Reshape()
        self.mean = P.ReduceMean()

        self.label0_param = Tensor([0], dtype=mstype.int32)
        self.label1_param = Tensor([1], dtype=mstype.int32)
        self.label2_param = Tensor([2], dtype=mstype.int32)

        # loss
        self.ce_ignore_loss = CrossEntropyWithIgnoreIndex()
        self.printn = P.Print()
Example #12
0
 def __init__(self, config, is_training=True):
     super(Encoder, self).__init__()
     self.hidden_size = config.hidden_size
     self.vocab_size = config.src_vocab_size
     self.embedding_size = config.encoder_embedding_size
     self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
     self.rnn = GRU(input_size=self.embedding_size, \
         hidden_size=self.hidden_size, bidirectional=True).to_float(config.compute_type)
     self.fc = nn.Dense(2 * self.hidden_size,
                        self.hidden_size).to_float(config.compute_type)
     self.shape = P.Shape()
     self.transpose = P.Transpose()
     self.p = P.Print()
     self.cast = P.Cast()
     self.text_len = config.max_length
     self.squeeze = P.Squeeze(axis=0)
     self.tanh = P.Tanh()
     self.concat = P.Concat(2)
     self.dtype = config.dtype
Example #13
0
 def __init__(self, network, optimizer, sens=1.0):
     super(TrainingWrapper, self).__init__(auto_prefix=False)
     self.network = network
     self.network.add_flags(defer_inline=True)
     self.weights = optimizer.parameters
     self.optimizer = optimizer
     self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
     self.sens = sens
     self.reducer_flag = False
     self.grad_reducer = None
     parallel_mode = _get_parallel_mode()
     if parallel_mode in (ParallelMode.DATA_PARALLEL,
                          ParallelMode.HYBRID_PARALLEL):
         self.reducer_flag = True
     if self.reducer_flag:
         mean = _get_mirror_mean()
         degree = _get_device_num()
         self.grad_reducer = DistributedGradReducer(optimizer.parameters,
                                                    mean, degree)
     self.print = P.Print()