Ejemplo n.º 1
0
    def test_scalar_sub_tensor(self):
        # scalar(int) - tensor(int64)
        with program_guard(Program()):
            a = 1
            b = paddle.ones([2, 2, 2], dtype='int64')
            c = paddle.zeros([2, 2, 2], dtype="int64")
            self.check_operation(a, b, c, '-')

        # scalar(int) - tensor(float32)
        with program_guard(Program()):
            a = 1
            b = paddle.ones([2, 2, 2], dtype='float32')
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float, .0) - tensor(int64)
        with program_guard(Program()):
            a = 1.0
            b = paddle.ones([2, 2, 2], dtype='int64')
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float, .5) - tensor(int64)
        with program_guard(Program()):
            a = 1.5
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], -0.5, dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float) - tensor(float32)
        with program_guard(Program()):
            a = 1.5
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], -0.5, dtype="float32")
            self.check_operation(a, b, c, '-')
Ejemplo n.º 2
0
def label_box(anchors, gt_boxes, positive_overlap, negative_overlap,
              allow_low_quality):
    iou = bbox_overlaps(gt_boxes, anchors)
    if iou.numel() == 0:
        default_matches = paddle.full((iou.shape[1], ), 0, dtype='int64')
        default_match_labels = paddle.full((iou.shape[1], ), -1, dtype='int32')
        return default_matches, default_match_labels
    matched_vals, matches = paddle.topk(iou, k=1, axis=0)
    match_labels = paddle.full(matches.shape, -1, dtype='int32')
    match_labels = paddle.where(matched_vals < negative_overlap,
                                paddle.zeros_like(match_labels), match_labels)
    match_labels = paddle.where(matched_vals >= positive_overlap,
                                paddle.ones_like(match_labels), match_labels)
    if allow_low_quality:
        highest_quality_foreach_gt = iou.max(axis=1, keepdim=True)
        pred_inds_with_highest_quality = paddle.logical_and(
            iou > 0,
            iou == highest_quality_foreach_gt).cast('int32').sum(0,
                                                                 keepdim=True)
        match_labels = paddle.where(pred_inds_with_highest_quality > 0,
                                    paddle.ones_like(match_labels),
                                    match_labels)

    matches = matches.flatten()
    match_labels = match_labels.flatten()
    return matches, match_labels
Ejemplo n.º 3
0
    def test_tensor_div_scalar(self):
        # tensor(int64) / scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 2
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(float32) / scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 2
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(int64) / scalar(float, .0)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 2.0
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(int64) / scalar(float, .5)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 0.5
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(float32) / scalar(float)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 0.5
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')
Ejemplo n.º 4
0
    def net(self, inputs, is_infer=False):
        if is_infer:
            self.infer_net(inputs)
            return

        word2vec_model = Word2VecLayer(self.sparse_feature_number,
                                       self.sparse_feature_dim,
                                       self.neg_num,
                                       emb_name="emb",
                                       emb_w_name="emb_w",
                                       emb_b_name="emb_b")
        true_logits, neg_logits = word2vec_model.forward(inputs)

        label_ones = paddle.full(shape=[paddle.shape(true_logits)[0], 1],
                                 fill_value=1.0)
        label_zeros = paddle.full(
            shape=[paddle.shape(true_logits)[0], self.neg_num], fill_value=0.0)

        true_logits = paddle.nn.functional.sigmoid(true_logits)
        true_xent = paddle.nn.functional.binary_cross_entropy(
            true_logits, label_ones)
        neg_logits = paddle.nn.functional.sigmoid(neg_logits)
        neg_xent = paddle.nn.functional.binary_cross_entropy(
            neg_logits, label_zeros)
        cost = paddle.add(true_xent, neg_xent)
        avg_cost = paddle.mean(x=cost)

        self._cost = avg_cost
        self._metrics["LOSS"] = avg_cost
Ejemplo n.º 5
0
    def test_tensor_sub_scalar(self):
        # tensor(int64) - scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 1
            c = paddle.zeros([2, 2, 2], dtype="int64")
            self.check_operation(a, b, c, '-')

        # tensor(float32) - scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 1
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(int64) - scalar(float, .0)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 1.0
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(int64) - scalar(float, .5)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 2, dtype='int64')
            b = 1.5
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(float32) - scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 2, dtype='float32')
            b = 1.5
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '-')
Ejemplo n.º 6
0
 def test_tensor_floordiv_scalar(self):
     # tensor(int64) // scalar(int)
     with program_guard(Program()):
         a = paddle.full([2, 2, 2], 3, dtype='int64')
         b = 2
         c = paddle.full([2, 2, 2], 1, dtype="int64")
         self.check_operation(a, b, c, '//')
Ejemplo n.º 7
0
 def forward_test(self, src):
     bs = paddle.shape(src)[0]
     if self.encoder is not None:
         src = self.positional_encoding(paddle.transpose(src, [1, 0, 2]))
         memory = self.encoder(src)
     else:
         memory = paddle.transpose(paddle.squeeze(src, 2), [2, 0, 1])
     dec_seq = paddle.full((bs, 1), 2, dtype=paddle.int64)
     dec_prob = paddle.full((bs, 1), 1., dtype=paddle.float32)
     for len_dec_seq in range(1, 25):
         dec_seq_embed = paddle.transpose(self.embedding(dec_seq),
                                          [1, 0, 2])
         dec_seq_embed = self.positional_encoding(dec_seq_embed)
         tgt_mask = self.generate_square_subsequent_mask(
             paddle.shape(dec_seq_embed)[0])
         output = self.decoder(dec_seq_embed,
                               memory,
                               tgt_mask=tgt_mask,
                               memory_mask=None,
                               tgt_key_padding_mask=None,
                               memory_key_padding_mask=None)
         dec_output = paddle.transpose(output, [1, 0, 2])
         dec_output = dec_output[:, -1, :]
         word_prob = F.softmax(self.tgt_word_prj(dec_output), axis=1)
         preds_idx = paddle.argmax(word_prob, axis=1)
         if paddle.equal_all(
                 preds_idx,
                 paddle.full(paddle.shape(preds_idx), 3, dtype='int64')):
             break
         preds_prob = paddle.max(word_prob, axis=1)
         dec_seq = paddle.concat(
             [dec_seq, paddle.reshape(preds_idx, [-1, 1])], axis=1)
         dec_prob = paddle.concat(
             [dec_prob, paddle.reshape(preds_prob, [-1, 1])], axis=1)
     return [dec_seq, dec_prob]
Ejemplo n.º 8
0
    def func_scalar_sub_tensor(self):
        # scalar(int) - tensor(int64)
        a = 1
        b = paddle.ones([2, 2, 2], dtype='int64')
        c = paddle.zeros([2, 2, 2], dtype="int64")
        self.check_operation(a, b, c, '-')

        # scalar(int) - tensor(float32)
        a = 1
        b = paddle.ones([2, 2, 2], dtype='float32')
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')

        # scalar(float, .0) - tensor(int64)
        a = 1.0
        b = paddle.ones([2, 2, 2], dtype='int64')
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')

        # scalar(float, .5) - tensor(int64)
        a = 1.5
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], -0.5, dtype="float32")
        self.check_operation(a, b, c, '-')

        # scalar(float) - tensor(float32)
        a = 1.5
        b = paddle.full([2, 2, 2], 2, dtype='float32')
        c = paddle.full([2, 2, 2], -0.5, dtype="float32")
        self.check_operation(a, b, c, '-')
Ejemplo n.º 9
0
    def test_cpu_only_op(self):
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
            x = paddle.full(shape=[2, 255, 13, 13],
                            fill_value=0.3,
                            dtype='float32')
            gt_box = paddle.full(shape=[2, 6, 4],
                                 fill_value=0.5,
                                 dtype='float32')
            gt_label = paddle.full(shape=[2, 6], fill_value=1.0, dtype='int32')
            gt_score = paddle.full(shape=[2, 6],
                                   fill_value=0.5,
                                   dtype='float32')
            anchors = [
                10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156,
                198, 373, 326
            ]
            anchor_mask = [0, 1, 2]
            with paddle.static.device_guard("gpu"):
                # yolov3_loss only has cpu kernel, so its cpu kernel will be executed
                loss = fluid.layers.yolov3_loss(x=x,
                                                gt_box=gt_box,
                                                gt_label=gt_label,
                                                gt_score=gt_score,
                                                anchors=anchors,
                                                anchor_mask=anchor_mask,
                                                class_num=80,
                                                ignore_thresh=0.7,
                                                downsample_ratio=32)

        execute(main_program, startup_program)
Ejemplo n.º 10
0
    def test_device_guard_with_id(self):
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
            data1 = paddle.full(shape=[1, 3, 8, 8],
                                fill_value=0.5,
                                dtype='float32')
            data2 = paddle.full(shape=[1, 3, 5, 5],
                                fill_value=0.5,
                                dtype='float32')
            shape = paddle.shape(data2)
            with paddle.static.device_guard("cpu"):
                shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
                with paddle.static.device_guard("gpu:1"):
                    out = fluid.layers.crop_tensor(data1, shape=shape)
        # check if the device attr is set correctly
        all_ops = main_program.global_block().ops
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
        for op in all_ops:
            if op.type == 'slice':
                self.assertEqual(op.desc.attr(device_attr_name), "cpu")
            if op.type == 'crop_tensor':
                self.assertEqual(op.desc.attr(device_attr_name), "gpu:1")

        execute(main_program, startup_program)
Ejemplo n.º 11
0
    def test_without_kernel_op(self):
        main_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(main_program, startup_program):
            i = paddle.full(shape=[1], dtype='int64', fill_value=0)
            loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
            cond = paddle.less_than(x=i, y=loop_len)

            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter("always")
                with paddle.static.device_guard("cpu"):
                    while_op = fluid.layers.While(cond=cond)
                    with while_op.block():
                        i = paddle.increment(x=i, value=1)
                        fluid.layers.less_than(x=i, y=loop_len, cond=cond)

        warning = "The Op(while) is not support to set device."
        warning_num = get_vaild_warning_num(warning, w)
        assert warning_num == 1

        all_ops = main_program.global_block().ops
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
        for op in all_ops:
            if op.type == 'while':
                self.assertEqual(op.desc.attr(device_attr_name), "")

        execute(main_program, startup_program)
Ejemplo n.º 12
0
    def get_target_tensor(self, prediction, target_is_real):
        """Create label tensors with the same size as the input.

        Parameters:
            prediction (tensor) - - tpyically the prediction from a discriminator
            target_is_real (bool) - - if the ground truth label is for real images or fake images

        Returns:
            A label tensor filled with ground truth label, and with the size of the input
        """
        if target_is_real:
            if not hasattr(self, 'target_real_tensor'):
                self.target_real_tensor = paddle.full(
                    shape=paddle.shape(prediction),
                    fill_value=self.target_real_label,
                    dtype='float32')
            target_tensor = self.target_real_tensor
        else:
            if not hasattr(self, 'target_fake_tensor'):
                self.target_fake_tensor = paddle.full(
                    shape=paddle.shape(prediction),
                    fill_value=self.target_fake_label,
                    dtype='float32')
            target_tensor = self.target_fake_tensor

        # target_tensor.stop_gradient = True
        return target_tensor
Ejemplo n.º 13
0
 def _get_start_stop_tensor(self, batch_size):
     if self._start_tensor is None or self._stop_tensor is None or batch_size != self._start_tensor.shape[
             0]:
         self._start_tensor = paddle.full(
             (batch_size, 1), dtype='int64', fill_value=self.start_idx)
         self._stop_tensor = paddle.full(
             (batch_size, 1), dtype='int64', fill_value=self.stop_idx)
     return self._start_tensor, self._stop_tensor
Ejemplo n.º 14
0
def label_box(anchors,
              gt_boxes,
              positive_overlap,
              negative_overlap,
              allow_low_quality,
              ignore_thresh,
              is_crowd=None):
    iou = bbox_overlaps(gt_boxes, anchors)
    n_gt = gt_boxes.shape[0]
    if n_gt == 0 or is_crowd is None:
        n_gt_crowd = 0
    else:
        n_gt_crowd = paddle.nonzero(is_crowd).shape[0]
    if iou.shape[0] == 0 or n_gt_crowd == n_gt:
        # No truth, assign everything to background
        default_matches = paddle.full((iou.shape[1], ), 0, dtype='int64')
        default_match_labels = paddle.full((iou.shape[1], ), 0, dtype='int32')
        return default_matches, default_match_labels
    # if ignore_thresh > 0, remove anchor if it is closed to
    # one of the crowded ground-truth
    if n_gt_crowd > 0:
        N_a = anchors.shape[0]
        ones = paddle.ones([N_a])
        mask = is_crowd * ones

        if ignore_thresh > 0:
            crowd_iou = iou * mask
            valid = (paddle.sum((crowd_iou > ignore_thresh).cast('int32'),
                                axis=0) > 0).cast('float32')
            iou = iou * (1 - valid) - valid

        # ignore the iou between anchor and crowded ground-truth
        iou = iou * (1 - mask) - mask

    matched_vals, matches = paddle.topk(iou, k=1, axis=0)
    match_labels = paddle.full(matches.shape, -1, dtype='int32')
    # set ignored anchor with iou = -1
    neg_cond = paddle.logical_and(matched_vals > -1,
                                  matched_vals < negative_overlap)
    match_labels = paddle.where(neg_cond, paddle.zeros_like(match_labels),
                                match_labels)
    match_labels = paddle.where(matched_vals >= positive_overlap,
                                paddle.ones_like(match_labels), match_labels)
    if allow_low_quality:
        highest_quality_foreach_gt = iou.max(axis=1, keepdim=True)
        pred_inds_with_highest_quality = paddle.logical_and(
            iou > 0,
            iou == highest_quality_foreach_gt).cast('int32').sum(0,
                                                                 keepdim=True)
        match_labels = paddle.where(pred_inds_with_highest_quality > 0,
                                    paddle.ones_like(match_labels),
                                    match_labels)

    matches = matches.flatten()
    match_labels = match_labels.flatten()

    return matches, match_labels
Ejemplo n.º 15
0
 def test_variable_input1(self):
     start = paddle.full(shape=[1], fill_value=0, dtype='float32')
     stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
     num = paddle.full(shape=[1], fill_value=5, dtype='int32')
     out = paddle.linspace(start, stop, num, dtype='float32')
     exe = fluid.Executor(place=fluid.CPUPlace())
     res = exe.run(fluid.default_main_program(), fetch_list=[out])
     np_res = np.linspace(0, 10, 5, dtype='float32')
     self.assertEqual((res == np_res).all(), True)
Ejemplo n.º 16
0
 def test_variable_input2(self):
     paddle.disable_static()
     start = paddle.full(shape=[1], fill_value=0, dtype='float32')
     stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
     num = paddle.full(shape=[1], fill_value=5, dtype='int32')
     out = paddle.linspace(start, stop, num, dtype='float32')
     np_res = np.linspace(0, 10, 5, dtype='float32')
     self.assertEqual((out.numpy() == np_res).all(), True)
     paddle.enable_static()
Ejemplo n.º 17
0
 def __init__(self, num_features, eps=1e-5):
     super(ILN, self).__init__()
     self.eps = eps
     shape = (1, num_features, 1, 1)
     self.rho = self.create_parameter(shape)
     self.gamma = self.create_parameter(shape)
     self.beta = self.create_parameter(shape)
     self.rho.set_value(paddle.full(shape, 0.0))
     self.gamma.set_value(paddle.full(shape, 1.0))
     self.beta.set_value(paddle.full(shape, 0.0))
Ejemplo n.º 18
0
def create_loss(batch_size, margin, cos_pos, cos_neg):

    loss_part1 = paddle.subtract(
        paddle.full(shape=[batch_size, 1], fill_value=margin, dtype='float32'),
        cos_pos)
    loss_part2 = paddle.add(loss_part1, cos_neg)
    loss_part3 = paddle.maximum(
        paddle.full(shape=[batch_size, 1], fill_value=0.0, dtype='float32'),
        loss_part2)
    avg_cost = paddle.mean(loss_part3)
    return avg_cost
Ejemplo n.º 19
0
    def build_program(self):
        main_program = paddle.static.default_main_program()
        startup_program = paddle.static.default_startup_program()
        with paddle.static.program_guard(main_program, startup_program):
            out = paddle.full((1, ), 1)
            inp1 = paddle.full((1, ), 2)
            inp2 = paddle.full((1, ), 3)

            paddle.fluid.layers.assign(inp1, out)
            paddle.fluid.layers.assign(inp2, out)
        return main_program, startup_program, out
Ejemplo n.º 20
0
def create_loss(prediction):
    pos = paddle.slice(prediction, axes=[0, 1], starts=[0, 0], ends=[64, 1])
    neg = paddle.slice(prediction, axes=[0, 1], starts=[64, 0], ends=[128, 1])
    loss_part1 = paddle.subtract(
        paddle.full(shape=[64, 1], fill_value=1.0, dtype='float32'), pos)
    loss_part2 = paddle.add(loss_part1, neg)
    loss_part3 = paddle.maximum(
        paddle.full(shape=[64, 1], fill_value=0.0, dtype='float32'),
        loss_part2)

    avg_cost = paddle.mean(loss_part3)
    return avg_cost
Ejemplo n.º 21
0
    def __init__(self, alpha, beta):
        if isinstance(alpha, numbers.Real):
            alpha = paddle.full(shape=[1], fill_value=alpha)

        if isinstance(beta, numbers.Real):
            beta = paddle.full(shape=[1], fill_value=beta)

        self.alpha, self.beta = paddle.broadcast_tensors([alpha, beta])

        self._dirichlet = Dirichlet(paddle.stack([self.alpha, self.beta], -1))

        super(Beta, self).__init__(self._dirichlet._batch_shape)
Ejemplo n.º 22
0
 def forward(self, ipt, label):
     input_lengths = pp.full(shape=[BATCH_SIZE, 1],
                             fill_value=YZM_LENGTH + 4,
                             dtype='int64')
     label_lengths = pp.full(shape=[BATCH_SIZE, 1],
                             fill_value=YZM_LENGTH,
                             dtype='int64')
     loss = pp.nn.functional.ctc_loss(ipt,
                                      label,
                                      input_lengths,
                                      label_lengths,
                                      blank=len(CHAR_LIST))
     return loss
Ejemplo n.º 23
0
    def greedy_search(self, input_ids, logits_processors, max_length,
                      pad_token_id, eos_token_id, **model_kwargs):
        batch_size, cur_len = input_ids.shape
        origin_len = cur_len
        unfinished_flag = paddle.full([batch_size, 1], True, dtype='bool')
        scores = paddle.full([batch_size, 1],
                             0.0,
                             dtype=paddle.get_default_dtype())

        while cur_len < max_length:
            # prepare model inputs & get model output
            model_inputs = self.prepare_inputs_for_generation(
                input_ids, **model_kwargs)
            outputs = self(**model_inputs)
            logits = outputs[0] if isinstance(outputs, tuple) else outputs
            # [batch_size, vocab_size]
            logits = logits[:, -1, :]

            # pre-process distribution
            logits = self.adjust_logits_during_generation(logits)
            logits = logits_processors(input_ids, logits)

            # greedy
            probs = F.softmax(logits)
            probs = paddle.log(probs)
            next_tokens = paddle.argmax(probs, axis=-1).unsqueeze(-1)
            next_scores = paddle.index_sample(probs, next_tokens)

            if eos_token_id is not None:
                next_tokens = paddle.where(
                    unfinished_flag, next_tokens,
                    paddle.full_like(next_tokens, pad_token_id))

            scores = self.update_scores_for_generation(scores, next_scores,
                                                       cur_len - origin_len,
                                                       unfinished_flag)

            cur_len += 1
            input_ids = paddle.concat([input_ids, next_tokens], axis=1)

            if eos_token_id is not None:
                unfinished_flag = paddle.logical_and(
                    unfinished_flag, next_tokens != eos_token_id)

            # Stop when there is a </s> in all sentences
            if not paddle.any(unfinished_flag):
                break

            model_kwargs = self.update_model_kwargs_for_generation(
                outputs, model_kwargs)
        return input_ids[:, origin_len:], scores
Ejemplo n.º 24
0
 def _initialize_alpha(self, batch_size):
     # alpha accumulate the path value to get the different next tag
     if self._initial_alpha is None:
         # Initialized by a small value.
         initial_alpha = paddle.full((batch_size, self.num_tags - 1),
                                     dtype='float32',
                                     fill_value=-10000.)
         # alpha_start fill_value = 0. > -10000., means the first one step START gets the most score.
         alpha_start = paddle.full((batch_size, 1),
                                   dtype='float32',
                                   fill_value=0.)
         self._initial_alpha = paddle.concat([initial_alpha, alpha_start],
                                             axis=1)
     return self._initial_alpha
Ejemplo n.º 25
0
    def test_scalar_div_tensor(self):
        # scalar(int) / tensor(int64)
        with program_guard(Program()):
            a = 1
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(int) / tensor(float32)
        with program_guard(Program()):
            a = 1
            b = paddle.full([2, 2, 2], 0.5, dtype='float32')
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(float) / tensor(int64)
        with program_guard(Program()):
            a = 1.0
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(float) / tensor(float32)
        with program_guard(Program()):
            a = 1.0
            b = paddle.full([2, 2, 2], 0.5, dtype='float32')
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')
Ejemplo n.º 26
0
    def test_scalar_pow_tensor(self):
        # scalar(int) ** tensor(int64)
        with program_guard(Program()):
            a = 3
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 9, dtype="int64")
            self.check_operation(a, b, c, '**')

        # scalar(float) ** tensor(int64)
        with program_guard(Program()):
            a = 3.0
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')

        # scalar(int) ** tensor(float32)
        with program_guard(Program()):
            a = 3
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')

        # tensor(float32) ** scalar(float)
        with program_guard(Program()):
            a = 3.0
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')
Ejemplo n.º 27
0
    def test_tensor_mod_scalar(self):
        # tensor(int64) % scalar(int)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='int64')
            b = 2
            c = paddle.full([2, 2, 2], 1, dtype="int64")
            self.check_operation(a, b, c, '%')

        # tensor(int64) % scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='int64')
            b = 2.0
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')

        # tensor(float32) % scalar(int)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='float32')
            b = 2
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')

        # tensor(float32) % scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='float32')
            b = 2.0
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')
Ejemplo n.º 28
0
    def mask_tokens(self, batch_data):

        token_ids = [x[0] for x in batch_data]
        is_suffix = [x[1] for x in batch_data]

        # Create probability matrix where the probability of real tokens is
        # self.mlm_prob, while that of others is zero.
        data = self.add_special_tokens_and_set_maskprob(token_ids, is_suffix)
        token_ids, is_suffix, prob_matrix = data
        token_ids = paddle.to_tensor(token_ids,
                                     dtype='int64',
                                     stop_gradient=True)
        masked_token_ids = token_ids.clone()
        labels = token_ids.clone()

        # Create masks for words, where '百' must be masked if '度' is masked
        # for the word '百度'.
        prob_matrix = prob_matrix * (1 - is_suffix)
        word_mask_index = np.random.binomial(1, prob_matrix).astype('float')
        is_suffix_mask = (is_suffix == 1)
        word_mask_index_tmp = word_mask_index
        while word_mask_index_tmp.sum() > 0:
            word_mask_index_tmp = np.concatenate([
                np.zeros(
                    (word_mask_index.shape[0], 1)), word_mask_index_tmp[:, :-1]
            ],
                                                 axis=1)
            word_mask_index_tmp = word_mask_index_tmp * is_suffix_mask
            word_mask_index += word_mask_index_tmp
        word_mask_index = word_mask_index.astype('bool')
        labels[~word_mask_index] = -100

        # 80% replaced with [MASK].
        token_mask_index = paddle.bernoulli(paddle.full(
            labels.shape, 0.8)).astype('bool').numpy() & word_mask_index
        masked_token_ids[token_mask_index] = self._ids['mask']

        # 10% replaced with random token ids.
        token_random_index = paddle.to_tensor(
            paddle.bernoulli(paddle.full(labels.shape, 0.5)).astype(
                'bool').numpy() & word_mask_index & ~token_mask_index)
        random_tokens = paddle.randint(low=0,
                                       high=self.tokenizer.vocab_size,
                                       shape=labels.shape,
                                       dtype='int64')
        masked_token_ids = paddle.where(token_random_index, random_tokens,
                                        masked_token_ids)

        return masked_token_ids, token_ids, labels
Ejemplo n.º 29
0
def add_dyna_features(train_config, model_config, batch, step):
    """add `num_iter_recycling` and `use_clamped_fape`"""
    random_key = 32
    shape = batch['feat']['aatype'].shape[:2]

    num_iter_recycling = np.random.default_rng(random_key + step).integers(
        model_config.model.num_recycle + 1)
    batch['feat']['num_iter_recycling'] = paddle.full(shape,
                                                      num_iter_recycling)
    print(f'\tAdd dyna feature num_iter_recycling: {num_iter_recycling}')

    if train_config.unclamped_fape:
        if np.random.default_rng(random_key + step).uniform() < 0.1:
            batch['label']['use_clamped_fape'] = paddle.full(shape, 0.0)
            print(f'\tAdd dyna label use_clamped_fape: 0.0')
Ejemplo n.º 30
0
    def test_variable_input1(self):
        paddle.enable_static()
        prog = paddle.static.Program()
        with paddle.static.program_guard(prog):
            start = paddle.full(shape=[1], fill_value=0, dtype='float32')
            stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
            num = paddle.full(shape=[1], fill_value=5, dtype='int32')
            base = paddle.full(shape=[1], fill_value=2, dtype='float32')
            out = paddle.logspace(start, stop, num, base, dtype='float32')

        exe = paddle.static.Executor()
        res = exe.run(prog, fetch_list=[out])
        np_res = np.logspace(0, 10, 5, base=2, dtype='float32')
        self.assertEqual((res == np_res).all(), True)
        paddle.disable_static()