예제 #1
0
    def test_1(self):
        # type: float
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data = fluid.data("data", shape=[10, 10], dtype="float32")
            result_min = paddle.min(input=data, dim=1)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input_data = np.random.rand(10, 10).astype(np.float32)
            res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
        self.assertEqual((res == np.min(input_data, axis=1)).all(), True)

        # type: int
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data = fluid.data("data", shape=[10, 10], dtype="int64")
            result_min = paddle.min(input=data, dim=1)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
            res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
        self.assertEqual((res == np.min(input_data, axis=1)).all(), True)

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.array([10, 10]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.min(x, dim=0)
            np_z = z.numpy()
            z_expected = np.array(np.min(np_x, axis=0))
        self.assertEqual((np_z == z_expected).all(), True)
예제 #2
0
    def test_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data = paddle.static.data("data", shape=[10, 10], dtype="float32")
            result_min = paddle.min(x=data, axis=1)
            exe = paddle.static.Executor(self.place)
            input_data = np.random.rand(10, 10).astype(np.float32)
            res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
        self.assertEqual((res == np.min(input_data, axis=1)).all(), True)

        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data = paddle.static.data("data", shape=[10, 10], dtype="int64")
            result_min = paddle.min(x=data, axis=0)
            exe = paddle.static.Executor(self.place)
            input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
            res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
        self.assertEqual((res == np.min(input_data, axis=0)).all(), True)

        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data = paddle.static.data("data", shape=[10, 10], dtype="int64")
            result_min = paddle.min(x=data, axis=(0, 1))
            exe = paddle.static.Executor(self.place)
            input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
            res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
        self.assertEqual((res == np.min(input_data, axis=(0, 1))).all(), True)
예제 #3
0
    def relprop(self, R, alpha):
        if self.X.shape[1] == 3:
            pw = paddle.clip(self.weight, min=0)
            nw = paddle.clip(self.weight, max=0)
            X = self.X
            # print(X.shape)  # [1, 3, 224, 224]
            L = self.X * 0 + \
                paddle.min(paddle.min(paddle.min(self.X, axis=1, keepdim=True),
                                      axis=2, keepdim=True),
                           axis=3, keepdim=True)
            H = self.X * 0 + \
                paddle.max(paddle.max(paddle.max(self.X, axis=1, keepdim=True),
                                      axis=2, keepdim=True),
                           axis=3, keepdim=True)
            Za = F.conv2d(X, self.weight, bias=None, stride=self._stride, padding=self._padding) - \
                F.conv2d(L, pw, bias=None, stride=self._stride, padding=self._padding) - \
                F.conv2d(H, nw, bias=None, stride=self._stride,
                         padding=self._padding) + 1e-9

            S = R / Za
            C = X * self.gradprop2(S, self.weight) - L * \
                self.gradprop2(S, pw) - H * self.gradprop2(S, nw)
            R = C
        else:
            beta = alpha - 1
            pw = paddle.clip(self.weight, min=0)
            nw = paddle.clip(self.weight, max=0)
            px = paddle.clip(self.X, min=0)
            nx = paddle.clip(self.X, max=0)

            def f(w1, w2, x1, x2):
                Z1 = F.conv2d(x1,
                              w1,
                              bias=None,
                              stride=self._stride,
                              padding=self._padding)
                Z2 = F.conv2d(x2,
                              w2,
                              bias=None,
                              stride=self._stride,
                              padding=self._padding)
                S1 = safe_divide(R, Z1)
                S2 = safe_divide(R, Z2)
                C1 = x1 * self.gradprop(Z1, x1, S1)[0]
                C2 = x2 * self.gradprop(Z2, x2, S2)[0]
                return C1 + C2

            activator_relevances = f(pw, nw, px, nx)
            inhibitor_relevances = f(nw, pw, px, nx)

            R = alpha * activator_relevances - beta * inhibitor_relevances
        return R
예제 #4
0
    def softmax_with_cross_entropy(self, shard_logit, shard_one_hot):
        shard_max = paddle.max(shard_logit, axis=1, keepdim=True)
        global_max = shard_max
        paddle.distributed.all_reduce(global_max,
                                      op=paddle.distributed.ReduceOp.MAX)
        shard_logit_new = paddle.subtract(shard_logit, global_max)

        shard_exp = paddle.exp(shard_logit_new)
        shard_demon = paddle.sum(shard_exp, axis=1, keepdim=True)
        global_demon = shard_demon
        paddle.distributed.all_reduce(global_demon,
                                      op=paddle.distributed.ReduceOp.SUM)

        global_log_demon = paddle.log(global_demon)
        shard_log_prob = shard_logit_new - global_log_demon
        shard_prob = paddle.exp(shard_log_prob)

        target_log_prob = paddle.min(shard_log_prob * shard_one_hot,
                                     axis=1,
                                     keepdim=True)
        shard_loss = paddle.scale(target_log_prob, scale=-1.0)
        #TODO paddle.distributed.reducescatter not found
        global_loss = paddle.fluid.layers.collective._c_reducescatter(
            shard_loss, nranks=self.nranks, use_calc_stream=True)
        return global_loss, shard_prob
예제 #5
0
    def forward(self, input, target=None):
        #normalization
        features = input["features"]
        features = self._nomalize(features)
        samples_each_class = self.samples_each_class
        rerange_index = paddle.to_tensor(self.rerange_index)

        #calc sm
        diffs = paddle.unsqueeze(features, axis=1) - paddle.unsqueeze(features,
                                                                      axis=0)
        similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)

        #rerange
        tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
        tmp = paddle.gather(tmp, index=rerange_index)
        similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])

        #split
        ignore, pos, neg = paddle.split(
            similary_matrix,
            num_or_sections=[1, samples_each_class - 1, -1],
            axis=1)
        ignore.stop_gradient = True

        hard_pos = paddle.max(pos)
        hard_neg = paddle.min(neg)

        loss = hard_pos + self.margin - hard_neg
        loss = paddle.nn.ReLU()(loss)
        return {"msmloss": loss}
예제 #6
0
    def forward(self, pred, label, sample_weight=None):
        one_hot = label > 0.5
        sample_weight = label != self._ignore_label

        if not self._from_logits:
            pred = F.sigmoid(pred)
        alpha = paddle.where(one_hot, self._alpha * sample_weight,
                             (1 - self._alpha) * sample_weight)
        pt = paddle.where(one_hot, 1.0 - paddle.abs(label - pred),
                          paddle.ones_like(pred))

        beta = (1 - pt)**self._gamma

        loss = -alpha * beta * paddle.log(
            paddle.min(pt + self._eps, paddle.ones(1, dtype='float32')))
        loss = self._weight * (loss * sample_weight)

        if self._size_average:
            tsum = paddle.sum(label == 1,
                              axis=misc.get_dims_with_exclusion(
                                  len(label.shape), self._batch_axis))
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape),
                                  self._batch_axis)) / (tsum + self._eps)
        else:
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape), self._batch_axis))
        return self._scale * loss
예제 #7
0
def visual_in_traning(log_writer, vis_dict, step):
    """
    Visual in vdl

    Args:
        log_writer (LogWriter): The log writer of vdl.
        vis_dict (dict): Dict of tensor. The shape of thesor is (C, H, W)
    """
    for key, value in vis_dict.items():
        value_shape = value.shape
        if value_shape[0] not in [1, 3]:
            value = value[0]
            value = value.unsqueeze(0)
        value = paddle.transpose(value, (1, 2, 0))
        min_v = paddle.min(value)
        max_v = paddle.max(value)
        if (min_v > 0) and (max_v < 1):
            value = value * 255
        elif (min_v < 0 and min_v >= -1) and (max_v <= 1):
            value = (1 + value) / 2 * 255
        else:
            value = (value - min_v) / (max_v - min_v) * 255

        value = value.astype('uint8')
        value = value.numpy()
        log_writer.add_image(tag=key, img=value, step=step)
예제 #8
0
    def forward(self, input, target):
        """
        Args:
            inputs: feature matrix with shape (batch_size, feat_dim)
            target: ground truth labels with shape (num_classes)
        """
        inputs = input["features"]

        if self.normalize_feature:
            inputs = 1. * inputs / (paddle.expand_as(
                paddle.norm(inputs, p=2, axis=-1, keepdim=True), inputs) +
                                    1e-12)

        bs = inputs.shape[0]

        # compute distance
        dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs])
        dist = dist + dist.t()
        dist = paddle.addmm(input=dist,
                            x=inputs,
                            y=inputs.t(),
                            alpha=-2.0,
                            beta=1.0)
        dist = paddle.clip(dist, min=1e-12).sqrt()

        # hard negative mining
        is_pos = paddle.expand(target, (bs, bs)).equal(
            paddle.expand(target, (bs, bs)).t())
        is_neg = paddle.expand(target, (bs, bs)).not_equal(
            paddle.expand(target, (bs, bs)).t())

        # `dist_ap` means distance(anchor, positive)
        ## both `dist_ap` and `relative_p_inds` with shape [N, 1]
        '''
        dist_ap, relative_p_inds = paddle.max(
            paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an, relative_n_inds = paddle.min(
            paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True)
        '''
        dist_ap = paddle.max(paddle.reshape(paddle.masked_select(dist, is_pos),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an = paddle.min(paddle.reshape(paddle.masked_select(dist, is_neg),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # shape [N]
        dist_ap = paddle.squeeze(dist_ap, axis=1)
        dist_an = paddle.squeeze(dist_an, axis=1)

        # Compute ranking hinge loss
        y = paddle.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        return {"TripletLossV2": loss}
예제 #9
0
def corner_to_standup_nd(boxes_corner):
    ndim = boxes_corner.shape[2]
    standup_boxes = []
    for i in range(ndim):
        standup_boxes.append(paddle.min(boxes_corner[:, :, i], axis=1)[0])
    for i in range(ndim):
        standup_boxes.append(paddle.max(boxes_corner[:, :, i], axis=1)[0])
    return paddle.stack(standup_boxes, axis=1)
예제 #10
0
 def test_imperative_api(self):
     paddle.disable_static()
     np_x = np.array([10, 10]).astype('float64')
     x = paddle.to_tensor(np_x)
     z = paddle.min(x, axis=0)
     np_z = z.numpy()
     z_expected = np.array(np.min(np_x, axis=0))
     self.assertEqual((np_z == z_expected).all(), True)
예제 #11
0
def normalize(x, eps=1e-6):
    """Apply min-max normalization."""
    # x = x.contiguous()
    N, C, H, W = x.shape
    x_ = paddle.reshape(x, (N * C, -1))
    max_val = paddle.max(x_, axis=1, keepdim=True)[0]
    min_val = paddle.min(x_, axis=1, keepdim=True)[0]
    x_ = (x_ - min_val) / (max_val - min_val + eps)
    out = paddle.reshape(x_, (N, C, H, W))
    return out
예제 #12
0
 def test_axis_type():
     with paddle.static.program_guard(paddle.static.Program(),
                                      paddle.static.Program()):
         data = paddle.static.data("data",
                                   shape=[10, 10],
                                   dtype="int64")
         axis = paddle.static.data("axis",
                                   shape=[10, 10],
                                   dtype="int64")
         result_min = paddle.min(data, axis)
예제 #13
0
 def _choose_paddle_func(self, func, x):
     if func is 'amax':
         out = paddle.amax(x, self.axis, self.keepdim)
     elif func is 'amin':
         out = paddle.amin(x, self.axis, self.keepdim)
     elif func is 'max':
         out = paddle.max(x, self.axis, self.keepdim)
     elif func is 'min':
         out = paddle.min(x, self.axis, self.keepdim)
     else:
         print('This unittest only test amax/amin/max/min, but now is', func)
     return out
예제 #14
0
    def get_coord_features(self, points, batchsize, rows, cols):
        if self.cpu_mode:
            coords = []
            for i in range(batchsize):
                norm_delimeter = (1.0 if self.use_disks else
                                  self.spatial_scale * self.norm_radius)
                coords.append(
                    self._get_dist_maps(points[i].numpy().astype("float32"),
                                        rows, cols, norm_delimeter))
            coords = paddle.to_tensor(np.stack(coords,
                                               axis=0)).astype("float32")
        else:
            num_points = points.shape[1] // 2
            points = points.reshape([-1, points.shape[2]])
            points, points_order = paddle.split(points, [2, 1], axis=1)
            invalid_points = paddle.max(points, axis=1, keepdim=False) < 0
            row_array = paddle.arange(start=0,
                                      end=rows,
                                      step=1,
                                      dtype="float32")
            col_array = paddle.arange(start=0,
                                      end=cols,
                                      step=1,
                                      dtype="float32")

            coord_rows, coord_cols = paddle.meshgrid(row_array, col_array)
            coords = paddle.unsqueeze(paddle.stack([coord_rows, coord_cols],
                                                   axis=0),
                                      axis=0).tile([points.shape[0], 1, 1, 1])

            add_xy = (points * self.spatial_scale).reshape(
                [points.shape[0], points.shape[1], 1, 1])
            coords = coords - add_xy
            if not self.use_disks:
                coords = coords / (self.norm_radius * self.spatial_scale)

            coords = coords * coords
            coords[:, 0] += coords[:, 1]
            coords = coords[:, :1]
            invalid_points = invalid_points.numpy()

            coords[invalid_points, :, :, :] = 1e6
            coords = coords.reshape([-1, num_points, 1, rows, cols])
            coords = paddle.min(coords, axis=1)
            coords = coords.reshape([-1, 2, rows, cols])

        if self.use_disks:
            coords = (coords <= (self.norm_radius * self.spatial_scale)**
                      2).astype("float32")
        else:
            coords = paddle.tanh(paddle.sqrt(coords) * 2)
        return coords
예제 #15
0
        def final_backward(R_p, pw, nw, X1):
            X = X1
            L = X * 0 + \
                paddle.min(paddle.min(paddle.min(X,
                                                 dim=1, keepdim=True),
                                      dim=2, keepdim=True),
                           dim=3, keepdim=True)
            H = X * 0 + \
                paddle.max(paddle.max(paddle.max(X,
                                                 dim=1, keepdim=True),
                                      dim=2, keepdim=True),
                           dim=3, keepdim=True)
            Za = F.conv2d(X, self.weight, bias=None, stride=self._stride, padding=self._padding) - \
                F.conv2d(L, pw, bias=None, stride=self._stride, padding=self._padding) - \
                F.conv2d(H, nw, bias=None, stride=self._stride,
                         padding=self._padding)

            Sp = safe_divide(R_p, Za)

            Rp = X * self.gradprop2(Sp, self.weight) - L * \
                self.gradprop2(Sp, pw) - H * self.gradprop2(Sp, nw)
            return Rp
 def forward(self):
     multi_level_rois = self.input('MultiLevelRois')
     multi_level_scores = self.input('MultiLevelScores')
     multi_level_rois = paddle.concat(multi_level_rois, axis=0)
     multi_level_scores = paddle.concat(multi_level_scores, axis=0)
     proposal_num = paddle.shape(multi_level_scores)[0]
     post_nms_top_n_tensor = paddle.assign(
         np.array([self.post_nms_top_n]).astype('int32'))
     k_candidate = paddle.concat([proposal_num, post_nms_top_n_tensor])
     k = paddle.min(k_candidate)
     scores, index = paddle.topk(multi_level_scores, k=k, axis=0)
     rois = paddle.gather(multi_level_rois, index, axis=0)
     return {"FpnRois": [rois]}
예제 #17
0
    def proposal_for_single_sample(self, anchors, bbox_deltas, im_info, scores,
                                   variances):
        proposal_num = paddle.shape(scores)[0]
        pre_nms_top_n_tensor = paddle.assign(
            np.asarray([self.pre_nms_topN], dtype='int32'))
        k_candidate = paddle.concat([proposal_num, pre_nms_top_n_tensor])
        k = paddle.min(k_candidate)
        scores, index = paddle.topk(scores, k=k, axis=0)
        bbox_deltas = paddle.gather(bbox_deltas, index, axis=0)
        anchors = paddle.gather(anchors, index, axis=0)
        variances = paddle.gather(variances, index, axis=0)

        proposal = self.box_encode(anchors, bbox_deltas, variances)

        im_h, im_w, im_s = paddle.tensor.split(im_info,
                                               axis=1,
                                               num_or_sections=3)
        proposal = self.clip_tiled_boxes(im_w, im_h, proposal)

        keep = self.filter_boxes(proposal, im_w, im_h, im_s, self.min_size)

        tail_proposal = paddle.zeros(shape=[1, 4], dtype=proposal.dtype)
        proposal_num = paddle.shape(proposal)[0]
        tail_keep = paddle.reshape(proposal_num, shape=[1, 1])
        tail_keep = paddle.cast(tail_keep, dtype=keep.dtype)
        tail_scores = paddle.zeros(shape=[1, 1], dtype=scores.dtype)
        proposal = paddle.concat([proposal, tail_proposal])
        keep = paddle.concat([keep, tail_keep])
        scores = paddle.concat([scores, tail_scores])

        bbox_sel = paddle.gather(proposal, keep, axis=0)
        scores_sel = paddle.gather(scores, keep, axis=0)
        proposal = paddle.unsqueeze(bbox_sel, axis=0)
        scores = paddle.transpose(scores_sel, perm=[1, 0])
        scores = paddle.unsqueeze(scores, axis=0)
        out = layers.multiclass_nms(proposal,
                                    scores,
                                    background_label=-1,
                                    nms_top_k=self.pre_nms_topN,
                                    score_threshold=-1.,
                                    keep_top_k=self.post_nms_topN,
                                    nms_threshold=self.nms_thresh,
                                    normalized=False,
                                    nms_eta=self.eta)
        label, scores, proposal = paddle.tensor.split(
            out, axis=1, num_or_sections=[1, 1, 4])
        return scores, proposal
예제 #18
0
def box_overlap_ignore_opr(box, gt, ignore_label=-1):
    assert box.ndim == 2
    assert gt.ndim == 2
    assert gt.shape[-1] > 4
    area_box = (box[:, 2] - box[:, 0] + 1) * (box[:, 3] - box[:, 1] + 1)
    area_gt = (gt[:, 2] - gt[:, 0] + 1) * (gt[:, 3] - gt[:, 1] + 1)
    width_height = torch.min(box[:, None, 2:], gt[:, 2:4]) - torch.max(
        box[:, None, :2], gt[:, :2])  # [N,M,2]
    width_height.clamp_(min=0)  # [N,M,2]
    inter = width_height.prod(dim=2)  # [N,M]
    del width_height
    # handle empty boxes
    iou = torch.where(inter > 0, inter / (area_box[:, None] + area_gt - inter),
                      torch.zeros((1), dtype=inter.dtype))
    ioa = torch.where(inter > 0, inter / (area_box[:, None]),
                      torch.zeros((1), dtype=inter.dtype))
    gt_ignore_mask = gt[:, 4].eq(ignore_label).repeat(box.shape[0], 1)
    iou *= ~gt_ignore_mask
    ioa *= gt_ignore_mask
    return iou, ioa
예제 #19
0
def rough_ROI(ref_scribble_labels):
    #### b*1*h*w
    dist = 20
    b, _, h, w = ref_scribble_labels.shape
    filter_ = paddle.zeros_like(ref_scribble_labels)
    to_fill = paddle.zeros_like(ref_scribble_labels)
    for i in range(b):
        no_background = (ref_scribble_labels[i] != -1)
        no_background = no_background.squeeze(0)

        no_b = no_background.nonzero()
        (h_min, w_min) = paddle.min(no_b, 0)
        (h_max, w_max) = paddle.max(no_b, 0)
        filter_[i, 0,
                max(h_min - dist, 0):min(h_max + dist, h - 1),
                max(w_min - dist, 0):min(w_max + dist, w - 1)] = 1

    final_scribble_labels = paddle.where(byte_(filter_), ref_scribble_labels,
                                         to_fill)
    return final_scribble_labels
예제 #20
0
    def get_points_test(self, seg_logits, uncertainty_func):  # finish
        """
        Sample points for testing.
        Find ``num_points`` most uncertain points from ``uncertainty_map``.

        Args:
            seg_logits (Tensor): A tensor of shape (batch_size, num_classes,
                height, width) for class-specific or class-agnostic prediction.
            uncertainty_func (func): uncertainty calculation function.
            cfg (dict): Testing config of point head.
        Returns:
            point_indices (Tensor): A tensor of shape (batch_size, num_points)
                that contains indices from [0, height x width) of the most
                uncertain points.
            point_coords (Tensor): A tensor of shape (batch_size, num_points,
                2) that contains [0, 1] x [0, 1] normalized coordinates of the
                most uncertain points from the ``height x width`` grid .
        """

        num_points = self.subdivision_num_points
        uncertainty_map = uncertainty_func(seg_logits)
        batch_size = paddle.shape(uncertainty_map)[0]
        height = paddle.shape(uncertainty_map)[2]
        width = paddle.shape(uncertainty_map)[3]
        h_step = 1.0 / height
        w_step = 1.0 / width

        uncertainty_map = uncertainty_map.reshape([batch_size, height * width])
        num_points = paddle.min(paddle.concat([height * width, num_points]))
        point_indices = paddle.topk(uncertainty_map, num_points, axis=1)[1]
        point_coords = paddle.zeros([batch_size, num_points, 2],
                                    dtype='float32')
        point_coords[:, :,
                     0] = w_step / 2.0 + (point_indices %
                                          width).astype('float32') * w_step
        point_coords[:, :,
                     1] = h_step / 2.0 + (point_indices //
                                          width).astype('float32') * h_step
        return point_indices, point_coords
예제 #21
0
def _nn_features_per_object_for_chunk(reference_embeddings, query_embeddings,
                                      wrong_label_mask, k_nearest_neighbors,
                                      ys):
    """Extracts features for each object using nearest neighbor attention.
  Args:
    reference_embeddings: Tensor of shape [n_chunk, embedding_dim],
      the embedding vectors for the reference frame.
    query_embeddings: Tensor of shape [m_chunk, embedding_dim], the embedding
      vectors for the query frames.
    wrong_label_mask:
    k_nearest_neighbors: Integer, the number of nearest neighbors to use.
  Returns:
    nn_features: A float32 tensor of nearest neighbor features of shape
      [m_chunk, n_objects, feature_dim].
    """
    #    reference_embeddings_key = reference_embeddings
    #    query_embeddings_key = query_embeddings
    dists, ys = _flattened_pairwise_distances(reference_embeddings,
                                              query_embeddings, ys)

    dists = (paddle.unsqueeze(dists, 1) +
             paddle.unsqueeze(float_(wrong_label_mask), 0) *
             WRONG_LABEL_PADDING_DISTANCE)
    if k_nearest_neighbors == 1:
        features = paddle.min(dists, 2, keepdim=True)
    else:
        dists, _ = paddle.topk(-dists, k=k_nearest_neighbors, axis=2)
        dists = -dists
        valid_mask = (dists < WRONG_LABEL_PADDING_DISTANCE)
        masked_dists = dists * valid_mask.float()
        pad_dist = paddle.max(masked_dists, axis=2, keepdim=True)[0].tile(
            (1, 1, masked_dists.shape[-1]))
        dists = paddle.where(valid_mask, dists, pad_dist)
        # take mean of distances
        features = paddle.mean(dists, axis=2, keepdim=True)

    return features, ys
예제 #22
0
def reduce_min(name: str, x, axis=None, keepdim=False):
    import paddle
    paddle.enable_static()

    with paddle.static.program_guard(paddle.static.Program(),
                                     paddle.static.Program()):
        data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
        out = paddle.min(data_x, axis=axis, keepdim=keepdim)

        cpu = paddle.static.cpu_places(1)
        exe = paddle.static.Executor(cpu[0])

        # startup program will call initializer to initialize the parameters.
        exe.run(paddle.static.default_startup_program())
        outs = exe.run(feed={'x': x}, fetch_list=[out])
        saveModel(name,
                  exe,
                  feedkeys=['x'],
                  fetchlist=[out],
                  inputs=[x],
                  outputs=[outs[0]],
                  target_dir=sys.argv[1])

    return outs[0]
예제 #23
0
def local_previous_frame_nearest_neighbor_features_per_object(
        prev_frame_embedding,
        query_embedding,
        prev_frame_labels,
        gt_ids,
        max_distance=12):
    """Computes nearest neighbor features while only allowing local matches.
  Args:
    prev_frame_embedding: Tensor of shape [height, width, embedding_dim],
      the embedding vectors for the last frame.
    query_embedding: Tensor of shape [height, width, embedding_dim],
      the embedding vectors for the query frames.
    prev_frame_labels: Tensor of shape [height, width, 1], the class labels of
      the previous frame.
    gt_ids: Int Tensor of shape [n_objs] of the sorted unique ground truth
      ids in the first frame.
    max_distance: Integer, the maximum distance allowed for local matching.
  Returns:
    nn_features: A float32 np.array of nearest neighbor features of shape
      [1, height, width, n_objects, 1].
    """
    #     print(query_embedding.shape, prev_frame_embedding.shape)
    #     print(query_embedding.place, prev_frame_embedding.place)
    #     query_embedding = query_embedding.cpu()
    #     prev_frame_embedding = prev_frame_embedding.cpu()
    #     prev_frame_labels = prev_frame_labels.cpu()
    #     print(prev_frame_labels.place, prev_frame_embedding.place, query_embedding.place)

    d = local_pairwise_distances2(query_embedding,
                                  prev_frame_embedding,
                                  max_distance=max_distance)
    height, width = prev_frame_embedding.shape[:2]

    if MODEL_UNFOLD:

        labels = float_(prev_frame_labels).transpose([2, 0, 1]).unsqueeze(0)
        padded_labels = F.pad(labels, (
            2 * max_distance,
            2 * max_distance,
            2 * max_distance,
            2 * max_distance,
        ))
        offset_labels = F.unfold(padded_labels,
                                 kernel_sizes=[height, width],
                                 strides=[2,
                                          2]).reshape([height, width, -1, 1])
        offset_masks = paddle.equal(
            offset_labels,
            float_(gt_ids).unsqueeze(0).unsqueeze(0).unsqueeze(0))
    else:

        masks = paddle.equal(prev_frame_labels,
                             gt_ids.unsqueeze(0).unsqueeze(0))
        padded_masks = nn.functional.pad(masks, (
            0,
            0,
            max_distance,
            max_distance,
            max_distance,
            max_distance,
        ))
        offset_masks = []
        for y_start in range(2 * max_distance + 1):
            y_end = y_start + height
            masks_slice = padded_masks[y_start:y_end]
            for x_start in range(2 * max_distance + 1):
                x_end = x_start + width
                offset_mask = masks_slice[:, x_start:x_end]
                offset_masks.append(offset_mask)
        offset_masks = paddle.stack(offset_masks, axis=2)

    d_tiled = d.unsqueeze(-1).tile((1, 1, 1, gt_ids.shape[0]))
    pad = paddle.ones_like(d_tiled)
    d_masked = paddle.where(offset_masks, d_tiled, pad)
    dists = paddle.min(d_masked, axis=2)
    dists = dists.reshape([1, height, width, gt_ids.shape[0], 1])

    return dists
예제 #24
0
def train(args):
    # 使用 GPU训练
    if paddle.is_compiled_with_cuda():
        paddle.set_device("gpu:0")
    # 创建多进程的游戏环境
    envs = MultipleEnvironments(args.game, args.num_processes)
    # 固定初始化状态
    paddle.seed(123)
    # 创建模型
    model = Model(envs.num_states, envs.num_actions)
    # 加载预训练模型
    if args.trained_model is not None:
        model.load_dict(paddle.load(args.trained_model))
    # 创建保存模型的文件夹
    if not os.path.isdir(args.saved_path):
        os.makedirs(args.saved_path)
    paddle.save(model.state_dict(),
                "{}/model_{}.pdparams".format(args.saved_path, args.game))
    # 为游戏评估单独开一个进程
    mp = _mp.get_context("spawn")
    process = mp.Process(target=eval,
                         args=(args, envs.num_states, envs.num_actions))
    process.start()
    # 创建优化方法
    clip_grad = paddle.nn.ClipGradByNorm(clip_norm=0.5)
    optimizer = paddle.optimizer.Adam(parameters=model.parameters(),
                                      learning_rate=args.lr,
                                      grad_clip=clip_grad)
    # 刚开始给每个进程的游戏执行初始化
    [agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
    # 获取游戏初始的界面
    curr_states = [agent_conn.recv() for agent_conn in envs.agent_conns]
    curr_states = paddle.to_tensor(np.concatenate(curr_states, 0),
                                   dtype='float32')
    curr_episode = 0
    while True:
        curr_episode += 1
        old_log_policies, actions, values, states, rewards, dones = [], [], [], [], [], []
        for _ in range(args.num_local_steps):
            states.append(curr_states)
            # 执行预测
            logits, value = model(curr_states)
            # 计算每个动作的概率值
            policy = F.softmax(logits)
            # 根据每个标签的概率随机生成符合概率的标签
            old_m = Categorical(policy)
            action = old_m.sample([1]).squeeze()
            # 记录预测数据
            actions.append(action)
            values.append(value.squeeze())
            # 计算类别的概率的对数
            old_log_policy = old_m.log_prob(paddle.unsqueeze(action, axis=1))
            old_log_policy = paddle.squeeze(old_log_policy)
            old_log_policies.append(old_log_policy)
            # 向各个进程游戏发送动作
            [
                agent_conn.send(("step", int(act[0])))
                for agent_conn, act in zip(envs.agent_conns, action)
            ]
            # 将多进程的游戏数据打包
            state, reward, done, info = zip(
                *[agent_conn.recv() for agent_conn in envs.agent_conns])
            # 进行数据转换
            state = paddle.to_tensor(np.concatenate(state, 0), dtype='float32')
            # 转换为tensor数据
            reward = paddle.to_tensor(reward, dtype='float32')
            done = paddle.to_tensor(done, dtype='float32')
            # 记录预测数据
            rewards.append(reward)
            dones.append(done)
            curr_states = state
        # 根据上面最后的图像预测
        _, next_value, = model(curr_states)
        next_value = next_value.squeeze()
        old_log_policies = paddle.concat(old_log_policies).detach().squeeze()
        actions = paddle.concat(actions).squeeze()
        values = paddle.concat(values).squeeze().detach()
        states = paddle.concat(states).squeeze()

        gae = 0.0
        R = []
        for value, reward, done in list(zip(values, rewards, dones))[::-1]:
            gae = gae * args.gamma * args.tau
            gae = gae + reward + args.gamma * next_value.detach() * (
                1.0 - done) - value.detach()
            next_value = value
            R.append(gae + value)
        R = R[::-1]
        R = paddle.concat(R).detach()
        advantages = R - values
        for i in range(args.num_epochs):
            indice = paddle.randperm(args.num_local_steps * args.num_processes)
            for j in range(args.batch_size):
                batch_indices = indice[int(j * (
                    args.num_local_steps * args.num_processes / args.batch_size
                )):int((j + 1) * (args.num_local_steps * args.num_processes /
                                  args.batch_size))]
                # 根据拿到的图像执行预测
                logits, value = model(paddle.gather(states, batch_indices))
                # 计算每个动作的概率值
                new_policy = F.softmax(logits)
                # 计算类别的概率的对数
                new_m = Categorical(new_policy)
                new_log_policy = new_m.log_prob(
                    paddle.unsqueeze(paddle.gather(actions, batch_indices),
                                     axis=1))
                new_log_policy = paddle.squeeze(new_log_policy)
                # 计算actor损失
                ratio = paddle.exp(
                    new_log_policy -
                    paddle.gather(old_log_policies, batch_indices))
                advantage = paddle.gather(advantages, batch_indices)
                actor_loss = paddle.clip(ratio, 1.0 - args.epsilon,
                                         1.0 + args.epsilon) * advantage
                actor_loss = paddle.concat([
                    paddle.unsqueeze(ratio * advantage, axis=0),
                    paddle.unsqueeze(actor_loss, axis=0)
                ])
                actor_loss = -paddle.mean(paddle.min(actor_loss, axis=0))
                # 计算critic损失
                critic_loss = F.smooth_l1_loss(paddle.gather(R, batch_indices),
                                               value.squeeze())
                entropy_loss = paddle.mean(new_m.entropy())
                # 计算全部损失
                total_loss = actor_loss + critic_loss - args.beta * entropy_loss
                # 计算梯度
                total_loss.backward()
                optimizer.step()
                optimizer.clear_grad()
            paddle.save(
                model.state_dict(),
                "{}/model_{}.pdparams".format(args.saved_path, args.game))
        print("Episode: {}. Total loss: {:.4f}".format(curr_episode,
                                                       total_loss.numpy()[0]))
예제 #25
0
def train():
    global epoch
    total_reward = 0
    # 重置游戏状态
    state = env.reset()
    while True:
        action = actor.select_action(state)
        noisy = paddle.normal(0,
                              exploration_noise,
                              shape=[env.action_space.shape[0]
                                     ]).clip(env.action_space.low,
                                             env.action_space.high)
        action = (action + noisy).clip(env.action_space.low,
                                       env.action_space.high).numpy()

        next_state, reward, done, info = env.step(action)
        env.render()
        rpm.append((state, action, reward, next_state, np.float(done)))

        state = next_state
        if done:
            break
        total_reward += reward

        if len(rpm) > batch_size:
            # 获取训练数据
            batch_state, batch_action, batch_reward, batch_next_state, batch_done = rpm.sample(
                batch_size)
            # 计算损失函数
            best_v_1 = target_critic_1(batch_next_state,
                                       target_actor(batch_next_state))
            best_v_2 = target_critic_2(batch_next_state,
                                       target_actor(batch_next_state))
            best_v = paddle.min(paddle.concat([best_v_1, best_v_2], axis=1),
                                axis=1,
                                keepdim=True)
            best_v = batch_reward + (gamma * best_v *
                                     (1 - batch_done)).detach()

            current_v_1 = critic_1(batch_state, batch_action)
            critic_loss = F.mse_loss(current_v_1, best_v)
            critic_1_optimizer.clear_grad()
            critic_loss.backward()
            critic_1_optimizer.step()

            current_v_2 = critic_2(batch_state, batch_action)
            critic_loss = F.mse_loss(current_v_2, best_v)
            critic_2_optimizer.clear_grad()
            critic_loss.backward()
            critic_2_optimizer.step()

            if epoch % policy_delay == 0:
                actor_loss = -critic_1(batch_state, actor(batch_state)).mean()
                actor_optimizer.clear_grad()
                actor_loss.backward()
                actor_optimizer.step()

            # 指定的训练次数更新一次目标模型的参数
            if epoch % 200 == 0:
                for target_param, param in zip(target_actor.parameters(),
                                               actor.parameters()):
                    target_param.set_value(target_param * (1.0 - ratio) +
                                           param * ratio)
                for target_param, param in zip(target_critic_1.parameters(),
                                               critic_1.parameters()):
                    target_param.set_value(target_param * (1.0 - ratio) +
                                           param * ratio)
                for target_param, param in zip(target_critic_2.parameters(),
                                               critic_2.parameters()):
                    target_param.set_value(target_param * (1.0 - ratio) +
                                           param * ratio)
            epoch += 1

    return total_reward
예제 #26
0
    def __call__(self,
                 seg_preds,
                 seg_masks,
                 cate_labels,
                 cate_scores,
                 sum_masks=None):
        # sort and keep top nms_pre
        sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)
        seg_masks = paddle.gather(seg_masks, index=sort_inds)
        seg_preds = paddle.gather(seg_preds, index=sort_inds)
        sum_masks = paddle.gather(sum_masks, index=sort_inds)
        cate_scores = paddle.gather(cate_scores, index=sort_inds)
        cate_labels = paddle.gather(cate_labels, index=sort_inds)

        seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)
        # inter.
        inter_matrix = paddle.mm(seg_masks,
                                 paddle.transpose(seg_masks, [1, 0]))
        n_samples = paddle.shape(cate_labels)
        # union.
        sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])
        # iou.
        iou_matrix = (inter_matrix /
                      (sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) -
                       inter_matrix))
        iou_matrix = paddle.triu(iou_matrix, diagonal=1)
        # label_specific matrix.
        cate_labels_x = paddle.expand(cate_labels,
                                      shape=[n_samples, n_samples])
        label_matrix = paddle.cast(
            (cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),
            'float32')
        label_matrix = paddle.triu(label_matrix, diagonal=1)

        # IoU compensation
        compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)
        compensate_iou = paddle.expand(compensate_iou,
                                       shape=[n_samples, n_samples])
        compensate_iou = paddle.transpose(compensate_iou, [1, 0])

        # IoU decay
        decay_iou = iou_matrix * label_matrix

        # matrix nms
        if self.kernel == 'gaussian':
            decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))
            compensate_matrix = paddle.exp(-1 * self.sigma *
                                           (compensate_iou**2))
            decay_coefficient = paddle.min(decay_matrix / compensate_matrix,
                                           axis=0)
        elif self.kernel == 'linear':
            decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
            decay_coefficient = paddle.min(decay_matrix, axis=0)
        else:
            raise NotImplementedError

        # update the score.
        cate_scores = cate_scores * decay_coefficient
        y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')
        keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,
                            y)
        keep = paddle.nonzero(keep)
        keep = paddle.squeeze(keep, axis=[1])
        # Prevent empty and increase fake data
        keep = paddle.concat(
            [keep,
             paddle.cast(paddle.shape(cate_scores)[0] - 1, 'int64')])

        seg_preds = paddle.gather(seg_preds, index=keep)
        cate_scores = paddle.gather(cate_scores, index=keep)
        cate_labels = paddle.gather(cate_labels, index=keep)

        # sort and keep top_k
        sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)
        seg_preds = paddle.gather(seg_preds, index=sort_inds)
        cate_scores = paddle.gather(cate_scores, index=sort_inds)
        cate_labels = paddle.gather(cate_labels, index=sort_inds)
        return seg_preds, cate_scores, cate_labels
예제 #27
0
 def is_done(self):
     return paddle.min(self._done) == 1
예제 #28
0
def min(x, dim=None, keepdim=False):
    return varbase_to_tensor(paddle.min(x, dim, keepdim=keepdim))
예제 #29
0
def min(x1, x2):
    """compare 2 tensors and take min values"""
    return paddle.min(paddle.concat([x1, x2]))
예제 #30
0
 def test_input_type():
     with paddle.static.program_guard(paddle.static.Program(),
                                      paddle.static.Program()):
         data = np.random.rand(10, 10)
         result_min = paddle.min(x=data, axis=0)