示例#1
0
    def bbox_transform(self, deltas, weights=[0.1, 0.1, 0.2, 0.2]):
        wx, wy, ww, wh = weights

        deltas = paddle.reshape(deltas, shape=(0, -1, 4))

        dx = paddle.slice(deltas, axes=[2], starts=[0], ends=[1]) * wx
        dy = paddle.slice(deltas, axes=[2], starts=[1], ends=[2]) * wy
        dw = paddle.slice(deltas, axes=[2], starts=[2], ends=[3]) * ww
        dh = paddle.slice(deltas, axes=[2], starts=[3], ends=[4]) * wh

        dw = paddle.clip(dw, -1.e10, np.log(1000. / 16))
        dh = paddle.clip(dh, -1.e10, np.log(1000. / 16))

        pred_ctr_x = dx
        pred_ctr_y = dy
        pred_w = paddle.exp(dw)
        pred_h = paddle.exp(dh)

        x1 = pred_ctr_x - 0.5 * pred_w
        y1 = pred_ctr_y - 0.5 * pred_h
        x2 = pred_ctr_x + 0.5 * pred_w
        y2 = pred_ctr_y + 0.5 * pred_h

        x1 = paddle.reshape(x1, shape=(-1, ))
        y1 = paddle.reshape(y1, shape=(-1, ))
        x2 = paddle.reshape(x2, shape=(-1, ))
        y2 = paddle.reshape(y2, shape=(-1, ))

        return paddle.concat([x1, y1, x2, y2])
示例#2
0
def get_positive_expectation(p_samples, measure, average=True):
    """Get the expectation from positive samples for given measurement."""
    if measure == 'GAN':
        Ep = -F.softplus(-p_samples)
    elif measure == 'JSD':
        Ep = np.log(2.0) - F.softplus(-p_samples)
    elif measure == 'X2':
        Ep = p_samples * p_samples
    elif measure == 'KL':
        Ep = p_samples + 1.
    elif measure == 'RKL':
        Ep = -paddle.exp(-p_samples)
    elif measure == 'DV':
        Ep = p_samples
    elif measure == 'H2':
        Ep = 1. - paddle.exp(-p_samples)
    elif measure == 'W1':
        Ep = p_samples
    else:
        raise ValueError

    if average:
        return paddle.sum(Ep)
    else:
        return Ep
示例#3
0
    def softmax_with_cross_entropy(self, shard_logit, shard_one_hot):
        shard_max = paddle.max(shard_logit, axis=1, keepdim=True)
        global_max = shard_max
        paddle.distributed.all_reduce(global_max,
                                      op=paddle.distributed.ReduceOp.MAX)
        shard_logit_new = paddle.subtract(shard_logit, global_max)

        shard_exp = paddle.exp(shard_logit_new)
        shard_demon = paddle.sum(shard_exp, axis=1, keepdim=True)
        global_demon = shard_demon
        paddle.distributed.all_reduce(global_demon,
                                      op=paddle.distributed.ReduceOp.SUM)

        global_log_demon = paddle.log(global_demon)
        shard_log_prob = shard_logit_new - global_log_demon
        shard_prob = paddle.exp(shard_log_prob)

        target_log_prob = paddle.min(shard_log_prob * shard_one_hot,
                                     axis=1,
                                     keepdim=True)
        shard_loss = paddle.scale(target_log_prob, scale=-1.0)
        #TODO paddle.distributed.reducescatter not found
        global_loss = paddle.fluid.layers.collective._c_reducescatter(
            shard_loss, nranks=self.nranks, use_calc_stream=True)
        return global_loss, shard_prob
示例#4
0
def delta2bbox(deltas, boxes, weights):
    clip_scale = math.log(1000.0 / 16)

    widths = boxes[:, 2] - boxes[:, 0]
    heights = boxes[:, 3] - boxes[:, 1]
    ctr_x = boxes[:, 0] + 0.5 * widths
    ctr_y = boxes[:, 1] + 0.5 * heights

    wx, wy, ww, wh = weights
    dx = deltas[:, 0::4] / wx
    dy = deltas[:, 1::4] / wy
    dw = deltas[:, 2::4] / ww
    dh = deltas[:, 3::4] / wh
    # Prevent sending too large values into paddle.exp()
    dw = paddle.clip(dw, max=clip_scale)
    dh = paddle.clip(dh, max=clip_scale)

    pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
    pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
    pred_w = paddle.exp(dw) * widths.unsqueeze(1)
    pred_h = paddle.exp(dh) * heights.unsqueeze(1)

    pred_boxes = []
    pred_boxes.append(pred_ctr_x - 0.5 * pred_w)
    pred_boxes.append(pred_ctr_y - 0.5 * pred_h)
    pred_boxes.append(pred_ctr_x + 0.5 * pred_w)
    pred_boxes.append(pred_ctr_y + 0.5 * pred_h)
    pred_boxes = paddle.stack(pred_boxes, axis=-1)

    return pred_boxes
示例#5
0
def bbox_transform_inv_opr(bbox, deltas):
    max_delta = math.log(1000.0 / 16)
    """ Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
    bbox_width = bbox[:, 2] - bbox[:, 0] + 1
    bbox_height = bbox[:, 3] - bbox[:, 1] + 1
    bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
    bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
    pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
    pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height

    dw = deltas[:, 2]
    dh = deltas[:, 3]
    dw = clamp(dw, max=max_delta, min=float('-inf'))
    dh = clamp(dh, max=max_delta, min=float('-inf'))
    pred_width = bbox_width * torch.exp(dw)
    pred_height = bbox_height * torch.exp(dh)

    pred_x1 = pred_ctr_x - 0.5 * pred_width
    pred_y1 = pred_ctr_y - 0.5 * pred_height
    pred_x2 = pred_ctr_x + 0.5 * pred_width
    pred_y2 = pred_ctr_y + 0.5 * pred_height
    pred_boxes = cat((pred_x1.reshape((-1, 1)), pred_y1.reshape(
        (-1, 1)), pred_x2.reshape((-1, 1)), pred_y2.reshape((-1, 1))),
                     axis=1)
    return pred_boxes
def decode_yolo(box, anchor, downsample_ratio):
    """decode yolo box

    Args:
        box (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
        anchor (list): anchor with the shape [na, 2]
        downsample_ratio (int): downsample ratio, default 32
        scale (float): scale, default 1.

    Return:
        box (list): decoded box, [x, y, w, h], all have the shape [b, na, h, w, 1]
    """
    x, y, w, h = box
    na, grid_h, grid_w = x.shape[1:4]
    grid = make_grid(grid_h, grid_w, x.dtype).reshape(
        (1, 1, grid_h, grid_w, 2))
    x1 = (x + grid[:, :, :, :, 0:1]) / grid_w
    y1 = (y + grid[:, :, :, :, 1:2]) / grid_h

    anchor = paddle.to_tensor(anchor)
    anchor = paddle.cast(anchor, x.dtype)
    anchor = anchor.reshape((1, na, 1, 1, 2))
    w1 = paddle.exp(w) * anchor[:, :, :, :, 0:1] / (downsample_ratio * grid_w)
    h1 = paddle.exp(h) * anchor[:, :, :, :, 1:2] / (downsample_ratio * grid_h)

    return [x1, y1, w1, h1]
示例#7
0
    def forward(self, true_binary, rule_masks, raw_logits):
        """
        tbd
        """
        if cmd_args.loss_type == 'binary':
            exp_pred = paddle.exp(raw_logits) * rule_masks

            norm = paddle.sum(exp_pred, axis=2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            return F.binary_cross_entropy(
                prob, true_binary) * cmd_args.max_decode_steps

        if cmd_args.loss_type == 'perplexity':
            my_perp_loss = MyPerpLoss()
            return my_perp_loss(true_binary, rule_masks, raw_logits)

        if cmd_args.loss_type == 'vanilla':
            exp_pred = paddle.exp(raw_logits) * rule_masks + 1e-30
            norm = paddle.sum(exp_pred, 2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            ll = paddle.abs(paddle.sum(true_binary * prob, 2))
            mask = 1 - rule_masks[:, :, -1]
            logll = mask * paddle.log(ll)

            loss = -paddle.sum(logll) / true_binary.shape[1]

            return loss
        print('unknown loss type %s' % cmd_args.loss_type)
        raise NotImplementedError
示例#8
0
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
    """box decode for VoxelNet in lidar
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
        anchors ([N, 7] Tensor): anchors
    """
    xa, ya, wa, la, ra = paddle.split(anchors, 5, axis=-1)
    if encode_angle_to_vector:
        xt, yt, wt, lt, rtx, rty = paddle.split(
            box_encodings, 1, axis=-1)

    else:
        xt, yt, wt, lt, rt = paddle.split(box_encodings, 5, axis=-1)

    # xt, yt, zt, wt, lt, ht, rt = paddle.split(box_encodings, 1, axis=-1)
    diagonal = paddle.sqrt(la**2 + wa**2)
    xg = xt * diagonal + xa
    yg = yt * diagonal + ya
    if smooth_dim:
        lg = (lt + 1) * la
        wg = (wt + 1) * wa
    else:
        lg = paddle.exp(lt) * la
        wg = paddle.exp(wt) * wa
    if encode_angle_to_vector:
        rax = paddle.cos(ra)
        ray = paddle.sin(ra)
        rgx = rtx + rax
        rgy = rty + ray
        rg = atan2(rgy, rgx)
    else:
        rg = rt + ra
    return paddle.concat([xg, yg, wg, lg, rg], axis=-1)
def delta2bbox(deltas, boxes, weights):
    clip_scale = math.log(1000.0 / 16)
    if boxes.shape[0] == 0:
        return paddle.zeros((0, deltas.shape[1]), dtype='float32')

    widths = boxes[:, 2] - boxes[:, 0]
    heights = boxes[:, 3] - boxes[:, 1]
    ctr_x = boxes[:, 0] + 0.5 * widths
    ctr_y = boxes[:, 1] + 0.5 * heights

    wx, wy, ww, wh = weights
    dx = deltas[:, 0::4] / wx
    dy = deltas[:, 1::4] / wy
    dw = deltas[:, 2::4] / ww
    dh = deltas[:, 3::4] / wh
    # Prevent sending too large values into np.exp()
    dw = paddle.clip(dw, max=clip_scale)
    dh = paddle.clip(dh, max=clip_scale)

    pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
    pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
    pred_w = paddle.exp(dw) * widths.unsqueeze(1)
    pred_h = paddle.exp(dh) * heights.unsqueeze(1)

    pred_boxes = paddle.zeros_like(deltas)

    pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
    pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
    pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
    pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
    return pred_boxes
示例#10
0
    def box_encode(self, anchors, bbox_deltas, variances):
        anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax = paddle.tensor.split(
            anchors, axis=1, num_or_sections=4)
        anchor_width = anchor_xmax - anchor_xmin + 1.0
        anchor_height = anchor_ymax - anchor_ymin + 1.0
        anchor_center_x = anchor_xmin + 0.5 * anchor_width
        anchor_center_y = anchor_ymin + 0.5 * anchor_height
        var_center_x, var_center_y, var_width, var_height = paddle.tensor.split(
            variances, axis=1, num_or_sections=4)
        delta_center_x, delta_center_y, delta_width, delta_height = paddle.tensor.split(
            bbox_deltas, axis=1, num_or_sections=4)

        bbox_center_x = var_center_x * delta_center_x * anchor_width + anchor_center_x
        bbox_center_y = var_center_y * delta_center_y * anchor_height + anchor_center_y
        bbox_width = paddle.exp(
            paddle.clip(var_width * delta_width,
                        max=BBOX_CLIP_DEFAULT)) * anchor_width
        bbox_height = paddle.exp(
            paddle.clip(var_height * delta_height,
                        max=BBOX_CLIP_DEFAULT)) * anchor_height

        proposal_xmin = bbox_center_x - bbox_width / 2
        proposal_ymin = bbox_center_y - bbox_height / 2
        proposal_xmax = bbox_center_x + bbox_width / 2 - 1
        proposal_ymax = bbox_center_y + bbox_height / 2 - 1
        proposal = paddle.concat(
            [proposal_xmin, proposal_ymin, proposal_xmax, proposal_ymax],
            axis=1)
        return proposal
示例#11
0
文件: wavenet.py 项目: gkbxs/Parakeet
    def compute_mog_loss(self, y, t):
        """Compute the loss where output distributions is a mixture of 
        Gaussians distributions.

        Parameters
        -----------
        y : Tensor [shape=(B, T, C_output)]
            The parameterd of the output distribution. It is the concatenation 
            of 3 parts, the logits of every distribution, the mean of each 
            distribution and the log standard deviation of each distribution. 
            
            Each part's shape is (B, T, n_mixture), where ``n_mixture`` means 
            the number of Gaussians in the mixture.
            
        t : Tensor [shape=(B, T)]
            The target audio. 
            
        Notes
        -------
        Output distributions whose input contains padding is neglected in 
        loss computation. So the first ``context_size`` steps does not 
        contribute to the loss.

        Returns
        --------
        Tensor: [shape=(1,)]
            The loss.
        """
        n_mixture = self.output_dim // 3

        # context size is not taken in to account
        y = y[:, self.context_size:, :]
        t = t[:, self.context_size:]

        w, mu, log_std = paddle.split(y, 3, axis=2)
        # 100.0 is just a large float
        log_std = paddle.clip(log_std, min=self.log_scale_min, max=100.)
        inv_std = paddle.exp(-log_std)
        p_mixture = F.softmax(w, -1)

        t = paddle.unsqueeze(t, -1)
        if n_mixture > 1:
            # t = F.expand_as(t, log_std)
            t = paddle.expand(t, [-1, -1, n_mixture])

        x_std = inv_std * (t - mu)
        exponent = paddle.exp(-0.5 * x_std * x_std)
        pdf_x = 1.0 / math.sqrt(2.0 * math.pi) * inv_std * exponent

        pdf_x = p_mixture * pdf_x
        # pdf_x: [bs, len]
        pdf_x = paddle.sum(pdf_x, -1)
        per_sample_loss = -paddle.log(pdf_x + 1e-9)

        loss = paddle.mean(per_sample_loss)
        return loss
示例#12
0
 def forward(self, r):
     batch_size = r.size
     K = self.K
     ratio_r = r / self.cut_r
     phi = 1 - 6 * ratio_r.pow(5) + 15 * ratio_r.pow(4) - 10 * ratio_r.pow(
         3)
     phi = paddle.expand(phi, shape=[batch_size, K])
     local_r = paddle.expand(r, shape=[batch_size, K])
     g = phi * paddle.exp(
         -self.beta.expand([batch_size, K]) *
         (paddle.exp(-local_r) - self.mu.expand([batch_size, K]))**2)
     return g
示例#13
0
 def decode_delta(self, delta, fg_anchor_list):
     px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
                     fg_anchor_list[:, 2], fg_anchor_list[:,3]
     dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]
     gx = pw * dx + px
     gy = ph * dy + py
     gw = pw * paddle.exp(dw)
     gh = ph * paddle.exp(dh)
     gx1 = gx - gw * 0.5
     gy1 = gy - gh * 0.5
     gx2 = gx + gw * 0.5
     gy2 = gy + gh * 0.5
     return paddle.stack([gx1, gy1, gx2, gy2], axis=1)
示例#14
0
    def forward(self, x_float):
        x_embedding = 0
        for i in range(x_float.shape[1]):
            x = x_float[:, i]
            x = paddle.reshape(x, [-1, 1])
            if i == 0:
                gaussian_expansion = paddle.exp(-(x - self.centers1)**2 /
                                                self.width**2)
            elif i == 1:
                gaussian_expansion = paddle.exp(-(x - self.centers2)**2 /
                                                self.width**2)

            x_embedding += self.atom_embedding_list[i](gaussian_expansion)
        return x_embedding
示例#15
0
    def __init__(self, channels, scale):
        super(AntiAliasInterpolation2d, self).__init__()
        sigma = (1 / scale - 1) / 2
        kernel_size = 2 * round(sigma * 4) + 1
        self.ka = kernel_size // 2
        self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka

        kernel_size = [kernel_size, kernel_size]
        sigma = [sigma, sigma]
        # The gaussian kernel is the product of the
        # gaussian function of each dimension.
        kernel = 1
        meshgrids = paddle.meshgrid(
            [paddle.arange(size, dtype='float32') for size in kernel_size])
        for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
            mean = (size - 1) / 2
            kernel *= paddle.exp(-(mgrid - mean)**2 / (2 * std**2 + 1e-9))

        # Make sure sum of values in gaussian kernel equals 1.
        kernel = kernel / paddle.sum(kernel)
        # Reshape to depthwise convolutional weight
        kernel = kernel.reshape([1, 1, *kernel.shape])
        kernel = paddle.tile(kernel, [channels, *[1] * (kernel.dim() - 1)])

        self.register_buffer('weight', kernel)
        self.groups = channels
        self.scale = scale
示例#16
0
def exponential(M: int,
                center=None,
                tau=1.,
                sym: bool = True,
                dtype: str = 'float64') -> Tensor:
    """Compute an exponential (or Poisson) window.
    Parameters:
        M(int): window size.
        tau(float): the window-specific parameter.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.exponential().
    """
    if sym and center is not None:
        raise ValueError("If sym==True, center must be None.")
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)

    if center is None:
        center = (M - 1) / 2

    n = paddle.arange(0, M, dtype=dtype)
    w = paddle.exp(-paddle.abs(n - center) / tau)

    return _truncate(w, needs_trunc)
示例#17
0
    def forward(self, fpn_feats, is_training):
        assert len(fpn_feats) == len(
            self.fpn_stride
        ), "The size of fpn_feats is not equal to size of fpn_stride"
        cls_logits_list = []
        bboxes_reg_list = []
        centerness_list = []
        for scale_reg, fpn_stride, fpn_feat in zip(self.scales_regs,
                                                   self.fpn_stride, fpn_feats):
            fcos_cls_feat, fcos_reg_feat = self.fcos_feat(fpn_feat)
            cls_logits = self.fcos_head_cls(fcos_cls_feat)
            bbox_reg = scale_reg(self.fcos_head_reg(fcos_reg_feat))
            if self.centerness_on_reg:
                centerness = self.fcos_head_centerness(fcos_reg_feat)
            else:
                centerness = self.fcos_head_centerness(fcos_cls_feat)
            if self.norm_reg_targets:
                bbox_reg = F.relu(bbox_reg)
                if not is_training:
                    bbox_reg = bbox_reg * fpn_stride
            else:
                bbox_reg = paddle.exp(bbox_reg)
            cls_logits_list.append(cls_logits)
            bboxes_reg_list.append(bbox_reg)
            centerness_list.append(centerness)

        if not is_training:
            locations_list = []
            for fpn_stride, feature in zip(self.fpn_stride, fpn_feats):
                location = self._compute_locations_by_level(fpn_stride, feature)
                locations_list.append(location)

            return locations_list, cls_logits_list, bboxes_reg_list, centerness_list
        else:
            return cls_logits_list, bboxes_reg_list, centerness_list
示例#18
0
 def forward(self, r):
     batch_size = r.size
     K = self.K
     local_r = paddle.expand(r, shape=[batch_size, K])
     g = paddle.exp(-self.beta.expand([batch_size, K]) *
                    (local_r - self.mu.expand([batch_size, K]))**2)
     return g
示例#19
0
    def train_step(self, state_batch, mcts_probs, winner_batch, lr=0.002):
        """perform a training step"""
        # wrap in Variable
        state_batch = paddle.to_tensor(state_batch)
        mcts_probs = paddle.to_tensor(mcts_probs)
        winner_batch = paddle.to_tensor(winner_batch)

        # zero the parameter gradients
        self.optimizer.clear_gradients()
        # set learning rate
        self.optimizer.set_lr(lr)

                                     

        # forward
        log_act_probs, value = self.policy_value_net(state_batch)
        # define the loss = (z - v)^2 - pi^T * log(p) + c||theta||^2
        # Note: the L2 penalty is incorporated in optimizer
        value = paddle.reshape(x=value, shape=[-1])
        value_loss = F.mse_loss(input=value, label=winner_batch)
        policy_loss = -paddle.mean(paddle.sum(mcts_probs*log_act_probs, axis=1))
        loss = value_loss + policy_loss
        # backward and optimize
        loss.backward()
        self.optimizer.minimize(loss)
        # calc policy entropy, for monitoring only
        entropy = -paddle.mean(
                paddle.sum(paddle.exp(log_act_probs) * log_act_probs, axis=1)
                )
        return loss.numpy(), entropy.numpy()[0]    
示例#20
0
    def get_output_and_grid(self, output, k, stride):
        grid = self.grids[k]

        batch_size = output.shape[0]
        n_ch = 5 + self.num_classes
        hsize, wsize = output.shape[-2:]
        if grid.shape[2:4] != output.shape[2:4]:
            yv, xv = paddle.meshgrid(
                [paddle.arange(hsize),
                 paddle.arange(wsize)])
            grid = paddle.stack((xv, yv), 2)
            grid = paddle.reshape(grid, (1, 1, hsize, wsize, 2))
            grid = paddle.cast(grid, dtype=output.dtype)
            self.grids[k] = grid

        output = paddle.reshape(
            output, (batch_size, self.n_anchors, n_ch, hsize, wsize))
        output = paddle.transpose(output, [0, 1, 3, 4, 2])
        output = paddle.reshape(output,
                                (batch_size, self.n_anchors * hsize * wsize,
                                 -1))  # [N, 1 * 80 * 80, 85]
        grid = paddle.reshape(grid, (1, -1, 2))  # [1, 1 * 80 * 80, 2]

        xy = (output[:, :, :2] + grid) * stride  # [N, 1 * 80 * 80, 2]  xy解码
        wh = paddle.exp(output[:, :,
                               2:4]) * stride  # [N, 1 * 80 * 80, 2]  wh解码
        output = paddle.concat([xy, wh, output[:, :, 4:]],
                               2)  # [N, 1 * 80 * 80, 85]   解码后的xywh放回output里面
        return output, grid
示例#21
0
    def forward(self, fpn_feats, spatial_scale, mode):
        cls_logits_list = []
        bboxes_reg_list = []
        centerness_list = []
        assert len(fpn_feats) == len(
            self.fpn_stride
        ), "The size of fpn_feats is not equal to size of fpn_stride"
        fcos_cls_feats, fcos_reg_feats = self.fcos_feat(fpn_feats)

        for scale_reg, fpn_stride, fcos_cls_feat, fcos_reg_feat in zip(
                self.scales_regs, self.fpn_stride, fcos_cls_feats,
                fcos_reg_feats):
            cls_logits = self.fcos_head_cls[0](fcos_cls_feat)
            bbox_reg = self.fcos_head_reg[0](fcos_reg_feat)
            bbox_reg = scale_reg(bbox_reg)
            if self.centerness_on_reg:
                centerness = self.fcos_head_centerness[0](fcos_reg_feat)
            else:
                centerness = self.fcos_head_centerness[0](fcos_cls_feat)

            if self.norm_reg_targets:
                bbox_reg = F.relu(bbox_reg)
                if mode == 'infer':
                    bbox_reg = bbox_reg * fpn_stride
            else:
                bbox_reg = paddle.exp(bbox_reg)

            cls_logits_list.append(cls_logits)
            bboxes_reg_list.append(bbox_reg)
            centerness_list.append(centerness)
        return cls_logits_list, bboxes_reg_list, centerness_list
示例#22
0
def decode_yolo(box, anchor, downsample_ratio):
    """decode yolo box

    Args:
        box (Tensor): pred with the shape [b, h, w, na, 4]
        anchor (list): anchor with the shape [na, 2]
        downsample_ratio (int): downsample ratio, default 32
        scale (float): scale, default 1.
    
    Return:
        box (Tensor): decoded box, with the shape [b, h, w, na, 4]
    """
    h, w, na = box.shape[1:4]
    grid = make_grid(h, w, box.dtype).reshape((1, h, w, 1, 2))
    box[:, :, :, :, 0:2] = box[:, :, :, :, :2] + grid
    box[:, :, :, :, 0] = box[:, :, :, :, 0] / w
    box[:, :, :, :, 1] = box[:, :, :, :, 1] / h

    anchor = paddle.to_tensor(anchor)
    anchor = paddle.cast(anchor, box.dtype)
    anchor = anchor.reshape((1, 1, 1, na, 2))
    box[:, :, :, :, 2:4] = paddle.exp(box[:, :, :, :, 2:4]) * anchor
    box[:, :, :, :, 2] = box[:, :, :, :, 2] / (downsample_ratio * w)
    box[:, :, :, :, 3] = box[:, :, :, :, 3] / (downsample_ratio * h)
    return box
示例#23
0
def gaussian(M: int,
             std: float,
             sym: bool = True,
             dtype: str = 'float64') -> Tensor:
    """Compute a Gaussian window.
    The Gaussian widows has a Gaussian shape defined by the standard deviation(std).

    Parameters:
        M(int): window size.
        std(float): the window-specific parameter.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.gaussian().
    """
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)

    n = paddle.arange(0, M, dtype=dtype) - (M - 1.0) / 2.0
    sig2 = 2 * std * std
    w = paddle.exp(-n**2 / sig2)

    return _truncate(w, needs_trunc)
示例#24
0
 def forward(self, inputs):
     x = self.bn1(inputs)
     x = paddle.reshape(x, [1, 3 * 16 * 16])
     x = self.fc1(x)
     x = paddle.fluid.layers.unsqueeze(input=x, axes=[2])
     x = self.relu1(x)
     y = paddle.fluid.layers.fill_constant(x.shape,
                                           dtype=paddle.float32,
                                           value=1)
     # x = paddle.stack([x, y], axis=3)
     x = paddle.slice(x, axes=[0], starts=[0], ends=[1])
     x = paddle.exp(x)
     # y += paddle.fluid.layers.uniform_random(y.shape)
     y = paddle.expand(y, shape=[1, 768, 768, 2])
     x = paddle.expand(x, shape=[1, 768, 768, 2])
     out = paddle.concat([x, y])
     out = self.dp(out)
     out = channel_shuffle(out, 2)
     out1, out2 = paddle.split(out, num_or_sections=2, axis=1)
     outshape = out1.shape
     max_idx = paddle.argmax(out1.reshape(
         (outshape[0], outshape[1], outshape[2] * outshape[3])),
                             axis=-1)
     out2 = out2.reshape(
         (outshape[0], outshape[1], outshape[2] * outshape[3]))
     res, _ = self.lstm(out2)
     return res, max_idx
示例#25
0
文件: math.py 项目: Yelrose/PGL
def segment_softmax(data, segment_ids):
    """
    Segment softmax operator.
    
    This operator calculate the softmax elements of input `data` which with
    the same index in `segment_ids`.
    
    Args:
        data (tensor): a tensor, available data type float32, float64.
        segment_ids (tensor): a 1-d tensor, which have the same size
                            with the first dimension of input data. 
                            available data type is int32, int64.
    
    Returns:
       output (Tensor): the softmax result.
    
    Examples:
    
        .. code-block:: python
    
            import paddle
            import pgl
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = pgl.math.segment_softmax(data, segment_ids)
            #Outputs: [[0.11920292, 0.50000000, 0.88079703], [0.88079709, 0.50000000, 0.11920292], [1., 1., 1.]]
    
    """
    data_max = segment_max(data, segment_ids)
    data_max = paddle.gather(data, segment_ids, axis=0)
    data = data - data_max
    data = paddle.exp(data)
    sum_data = segment_sum(data, segment_ids)
    sum_data = paddle.gather(sum_data, segment_ids, axis=0)
    return data / sum_data
示例#26
0
def convert_locations_to_boxes(locations, priors, center_variance,
                               size_variance):
    """Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).

    The conversion:
        $$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
        $$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
    We do it in the inverse direction here.
    Args:
        locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
        priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
        center_variance: a float used to change the scale of center.
        size_variance: a float used to change of scale of size.
    Returns:
        boxes:  priors: [[center_x, center_y, h, w]]. All the values
            are relative to the image size.
    """
    # priors can have one dimension less.
    if priors.dim() + 1 == locations.dim():
        priors = priors.unsqueeze(0)
    return paddle.concat([
        locations[..., :2] * center_variance * priors[..., 2:] +
        priors[..., :2],
        paddle.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
    ],
                         locations.dim() - 1)
示例#27
0
def soft_nms(box_scores, score_threshold, sigma=0.5, top_k=-1):
    """Soft NMS implementation.

    References:
        https://arxiv.org/abs/1704.04503
        https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx

    Args:
        box_scores (N, 5): boxes in corner-form and probabilities.
        score_threshold: boxes with scores less than value are not considered.
        sigma: the parameter in score re-computation.
            scores[i] = scores[i] * exp(-(iou_i)^2 / simga)
        top_k: keep top_k results. If k <= 0, keep all the results.
    Returns:
         picked_box_scores (K, 5): results of NMS.
    """
    picked_box_scores = []
    while box_scores.size(0) > 0:
        max_score_index = paddle.argmax(box_scores[:, 4])
        cur_box_prob = paddle.to_tensor(box_scores[max_score_index, :])
        picked_box_scores.append(cur_box_prob)
        if len(picked_box_scores) == top_k > 0 or box_scores.size(0) == 1:
            break
        cur_box = cur_box_prob[:-1]
        box_scores[max_score_index, :] = box_scores[-1, :]
        box_scores = box_scores[:-1, :]
        ious = iou_of(cur_box.unsqueeze(0), box_scores[:, :-1])
        box_scores[:,
                   -1] = box_scores[:, -1] * paddle.exp(-(ious * ious) / sigma)
        box_scores = box_scores[box_scores[:, -1] > score_threshold, :]
    if len(picked_box_scores) > 0:
        return paddle.stack(picked_box_scores)
    else:
        return paddle.to_tensor([])
示例#28
0
def ddpm_steps(x, seq, model, b, **kwargs):
    with paddle.no_grad():
        n = x.shape[0]
        seq_next = [-1] + list(seq[:-1])
        xs = [x]
        x0_preds = []
        betas = b
        for i, j in zip(reversed(seq), reversed(seq_next)):
            t = (paddle.ones([n]) * i)
            next_t = (paddle.ones([n]) * j)
            at = compute_alpha(betas, t.astype('int64'))
            atm1 = compute_alpha(betas, next_t.astype('int64'))
            beta_t = 1 - at / atm1
            x = xs[-1]

            output = model(x, t.astype('float32'))
            e = output

            x0_from_e = (1.0 / at).sqrt() * x - (1.0 / at - 1).sqrt() * e
            x0_from_e = paddle.clip(x0_from_e, -1, 1)
            x0_preds.append(x0_from_e)
            mean_eps = ((atm1.sqrt() * beta_t) * x0_from_e +
                        ((1 - beta_t).sqrt() * (1 - atm1)) * x) / (1.0 - at)

            mean = mean_eps
            noise = paddle.randn(x.shape)
            mask = 1 - (t == 0).astype('float32')
            mask = mask.reshape([-1, 1, 1, 1])
            logvar = beta_t.log()
            sample = mean + mask * paddle.exp(0.5 * logvar) * noise
            xs.append(sample)
    return xs, x0_preds
    def forward(self, inputs):
        #deal with features with different length
        #1. padding to same lenght, make a tensor
        #2. make a mask tensor with the same shpae with 1
        #3. compute output using mask tensor, s.t. output is nothing todo with padding
        assert (len(inputs) == self.feature_num
                ), "Input tensor does not contain {} features".format(
                    self.feature_num)
        att_outs = []
        for i in range(len(inputs)):
            ###1. fc
            m = getattr(self, "fc_feature{}".format(i))
            output_fc = m(inputs[i][0])
            output_fc = paddle.tanh(output_fc)

            ###2. bi_lstm
            m = getattr(self, "bi_lstm{}".format(i))
            lstm_out, _ = m(inputs=output_fc, sequence_length=inputs[i][1])

            lstm_dropout = self.dropout(lstm_out)

            ###3. att_fc
            m = getattr(self, "att_fc{}".format(i))
            lstm_weight = m(lstm_dropout)

            ###4. softmax replace start, for it's relevant to sum in time step
            lstm_exp = paddle.exp(lstm_weight)
            lstm_mask = paddle.mean(inputs[i][2], axis=2)
            lstm_exp_with_mask = paddle.multiply(x=lstm_exp,
                                                 y=lstm_mask,
                                                 axis=0)
            lstm_sum_with_mask = paddle.sum(lstm_exp_with_mask, axis=1)
            exponent = -1
            lstm_denominator = paddle.pow(lstm_sum_with_mask, exponent)
            lstm_softmax = paddle.multiply(x=lstm_exp,
                                           y=lstm_denominator,
                                           axis=0)
            lstm_weight = lstm_softmax
            ###softmax replace end

            lstm_scale = paddle.multiply(x=lstm_dropout, y=lstm_weight, axis=0)

            ###5. sequence_pool's replace start, for it's relevant to sum in time step
            lstm_scale_with_mask = paddle.multiply(x=lstm_scale,
                                                   y=lstm_mask,
                                                   axis=0)
            fea_lens = inputs[i][1]
            fea_len = int(fea_lens[0])
            lstm_pool = paddle.sum(lstm_scale_with_mask, axis=1)
            ###sequence_pool's replace end
            att_outs.append(lstm_pool)
        att_out = paddle.concat(att_outs, axis=1)
        fc_out1 = self.fc_out1(att_out)
        fc_out1_act = self.relu(fc_out1)
        fc_out2 = self.fc_out2(fc_out1_act)
        fc_out2_act = paddle.tanh(fc_out2)
        fc_logit = self.fc_logit(fc_out2_act)
        output = self.sigmoid(fc_logit)
        return fc_logit, output
示例#30
0
 def sampling(self, z_mean, z_log_var):
     """
     Reparameterization trick 
     """
     # By default, random_normal has mean=0 and std=1.0
     epsilon = paddle.normal(shape=(z_mean.shape[0], self.latent_size))
     epsilon.stop_gradient = True
     return z_mean + paddle.exp(0.5 * z_log_var) * epsilon