def get_sample_region(self, gt, strides, num_points_per, gt_xs, gt_ys, radius=1):
     num_gts = gt.shape[0]
     K = len(gt_xs)
     gt = gt[None].expand(K, num_gts, 4)
     center_x = (gt[..., 0] + gt[..., 2]) / 2
     center_y = (gt[..., 1] + gt[..., 3]) / 2
     center_gt = gt.new_zeros(gt.shape)
     # no gt
     if center_x[..., 0].sum() == 0:
         return gt_xs.new_zeros(gt_xs.shape, dtype='uint8')
     beg = 0
     for level, n_p in enumerate(num_points_per):
         end = beg + n_p
         stride = strides[level] * radius
         xmin = center_x[beg:end] - stride
         ymin = center_y[beg:end] - stride
         xmax = center_x[beg:end] + stride
         ymax = center_y[beg:end] + stride
         # limit sample region in gt
         center_gt[beg:end, :, 0] = jt.ternary(xmin > gt[beg:end, :, 0], xmin, gt[beg:end, :, 0])
         center_gt[beg:end, :, 1] = jt.ternary(ymin > gt[beg:end, :, 1], ymin, gt[beg:end, :, 1])
         center_gt[beg:end, :, 2] = jt.ternary(xmax > gt[beg:end, :, 2], gt[beg:end, :, 2], xmax)
         center_gt[beg:end, :, 3] = jt.ternary(ymax > gt[beg:end, :, 3], gt[beg:end, :, 3], ymax)
         beg = end
     left = gt_xs[:, None] - center_gt[..., 0]
     right = center_gt[..., 2] - gt_xs[:, None]
     top = gt_ys[:, None] - center_gt[..., 1]
     bottom = center_gt[..., 3] - gt_ys[:, None]
     center_bbox = jt.stack((left, top, right, bottom), -1)
     inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
     return inside_gt_bbox_mask
Exemplo n.º 2
0
 def test_with_np(self):
     np.random.seed(0)
     a = np.random.rand(5,10).astype("float32")
     b = np.random.rand(5,10).astype("float32")
     ja = jt.array(a)
     jb = jt.array(b)
     jc = jt.ternary(ja>jb, ja, jb)
     assert (jc.data==np.maximum(a,b)).all(), f"\n{jc.data}\n{np.maximum(a,b)}\n{a}\n{b}"
     jda, jdb = jt.grad(jc, [ja, jb])
     assert (jda.data==(a>b)*1).all()
     assert (jdb.data==1-(a>b)).all()
Exemplo n.º 3
0
def cross_entropy_loss(output, target, ignore_index=None):
    if len(output.shape) == 4:
        c_dim = output.shape[1]
        output = output.transpose((0, 2, 3, 1))
        output = output.reshape((-1, c_dim))
    if ignore_index is not None:
        target = jt.ternary(target == ignore_index,
                            jt.array(-1).broadcast(target), target)
        mask = jt.logical_and(target >= 0, target < output.shape[1])
    target = target.reshape((-1, ))
    target = target.broadcast(output, [1])
    target = target.index(1) == target

    output = output - output.max([1], keepdims=True)
    loss = output.exp().sum(1).log()
    loss = loss - (output * target).sum(1)
    if ignore_index is None:
        return loss.mean()
    else:
        return loss.sum() / jt.maximum(mask.int().sum(), 1)
Exemplo n.º 4
0
def leaky_relu(x, scale):
    return jt.ternary(x > 0, x, x * scale)
Exemplo n.º 5
0
def sign(x):
    one = jt.ones(x.shape)
    x = jt.ternary(x > 0, one, x)
    return jt.ternary(x < 0, -one, x)
Exemplo n.º 6
0
def leaky_relu(x, scale=0.01): return jt.ternary(x>0, x, x*scale)
def relu6(x): return jt.minimum(jt.maximum(x, 0), 6)
Exemplo n.º 7
0
def leaky_relu(x, scale): return jt.ternary(x>0, x, x*scale)

#TODO dims is 4 will cause slowly execution
def cross_entropy_loss(output, target, ignore_index=None):
Exemplo n.º 8
0
def sign(x):
    x = jt.ternary(x > 0, 1, x)
    x = jt.ternary(x < 0, -1, x)
    return x