예제 #1
0
파일: utils.py 프로젝트: jpata/particleflow
def batch_event_into_regions(data, regions):
    """
    Given an event and a set of regions in (eta,phi) space, returns a binned version of the event.

    Args
        data: a Batch() object containing the event and its information
        regions: a tuple of tuples containing the defined regions to bin an event (see define_regions)

    Returns
        data: a modified Batch() object of based on data, where data.batch seperates the events in the different bins
    """

    x = None
    for region in range(len(regions)):
        in_region_msk = ((data.x[:, 2] > regions[region][0][0])
                         & (data.x[:, 2] < regions[region][0][1])
                         & (torch.arcsin(data.x[:, 3]) > regions[region][1][0])
                         &
                         (torch.arcsin(data.x[:, 3]) < regions[region][1][1]))

        if in_region_msk.sum() != 0:  # if region is not empty
            if x is None:  # first iteration
                x = data.x[in_region_msk]
                ygen = data.ygen[in_region_msk]
                ygen_id = data.ygen_id[in_region_msk]
                ycand = data.ycand[in_region_msk]
                ycand_id = data.ycand_id[in_region_msk]
                batch = region + torch.zeros(
                    [len(data.x[in_region_msk])]
                )  # assumes events were already fed one at a time (i.e. batch_size=1)
            else:
                x = torch.cat([x, data.x[in_region_msk]])
                ygen = torch.cat([ygen, data.ygen[in_region_msk]])
                ygen_id = torch.cat([ygen_id, data.ygen_id[in_region_msk]])
                ycand = torch.cat([ycand, data.ycand[in_region_msk]])
                ycand_id = torch.cat([ycand_id, data.ycand_id[in_region_msk]])
                batch = torch.cat(
                    [
                        batch,
                        region + torch.zeros([len(data.x[in_region_msk])])
                    ]
                )  # assumes events were already fed one at a time (i.e. batch_size=1)

    data = Batch(
        x=x,
        ygen=ygen,
        ygen_id=ygen_id,
        ycand=ycand,
        ycand_id=ycand_id,
        batch=batch.long(),
    )
    return data
예제 #2
0
def approx_rot_dist(q1,q2,beta=0.1):
    """
    Creates an approximate rotational distance between q1 and q2, with bounded
    derivatives. In the equation which converts euclidean distance to rotational
    distance, the derivative will approach infinity as dist -> 2. For values of
    x > 2 - beta, the function is extended with a linear function.
    """
    t = 2 - beta

    # parameters for linear extention
    m = 1/np.sqrt(beta - (beta**2)/4)
    b = 2*np.arcsin(1 - beta/2) - m*t

    # Compute distance for q and -q
    q1_w_neg = torch.stack((q1,-q1),dim=-2)
    q2 = q2[...,None,:]
    d = torch.norm(q1_w_neg-q2,dim=-1)

    # For d < 2-beta, use arcsin equation
    # for d > 2-b, use linear extention
    d_clip = torch.clamp(d,max=t)
    y_lin = m*d + b
    y_rot = 2*torch.arcsin(d_clip/2)
    y_out = y_rot * (d < t).float() + y_lin * (d >= t).float()

    # Of the distances for q and -q, take the smaller
    y_out_min = y_out.min(-1)[0]
    
    return y_out_min
예제 #3
0
    def __getitem__(self, item):
        """
        img: (3, H, W)
        coord: (n_points, 2)
        img_coord: (3, n_points)
        """
        img_path = os.path.join(self.data_root, 'images',
                                'train_%d' % self.group, self.data_list[item])
        img_0 = Image.open(img_path).convert('RGB')
        img = self.transform(img_0)
        # coord = torch.randn(self.n_point, 2) / 3
        # coord = coord.clip(-1, 1)
        coord = torch.arcsin((torch.rand(self.n_point, 2) - 0.5) * 2) / 1.5
        coord = coord.clip(-1, 1)
        # sample image points from original resolution
        img_coord = F.grid_sample(
            self.tot(img_0)[None],
            coord[None, None, :, :])  # img_coord: (1, 3, 1, n_points)

        # (n_P, 2) -> (n_P, 16 * 2) -> (n_P, 16, 2)
        coord_mapped = coord.repeat(1, 16).view(self.n_point, 16, 2)
        # (n_P, 16, 2) * (1, 16, 2) -> (n_P, 16, 2)
        coord_mapped = coord_mapped * torch.exp2(
            (torch.arange(0, 16) / 2)[:, None].repeat(1, 2))[None]
        # (n_P, 32)
        coord_mapped = torch.sin(coord_mapped.view(self.n_point, 32))

        return img, coord_mapped, img_coord.view(3, self.n_point)
예제 #4
0
 def forward(self, x, Psi):
     a = torch.arcsin(torch.sqrt(closure(x)))  # B x D
     a = torch.log(closure(x + 1))
     a = a - a.mean(axis=1).reshape(-1, 1)     # center around mean
     x_ = a[:, :, None] * self.embed           # B x D x H
     fx = self.ffn(x_).squeeze()
     fx = (Psi @ fx.T).T                       # B x D-1
     return fx
예제 #5
0
def pto_ang_map(pc_velo, H=64, W=512, slice=1):
    """
        :param H: the row num of depth map, could be 64(default), 32, 16
        :param W: the col num of depth map
        :param slice: output every slice lines
        """
    valid_inds =    torch.where((pc_velo[:, 0] < 120)    & \
                    (pc_velo[:, 0] >= 0)     & \
                    (pc_velo[:, 1] < 50)     & \
                    (pc_velo[:, 1] >= -50)   & \
                    (pc_velo[:, 2] < 1.5)    & \
                    (pc_velo[:, 2] >= -2.5))

    pc_velo = pc_velo[valid_inds]

    def radians(x):
        return x * 0.0174532925

    dtheta = radians(0.4 * 64.0 / H)
    dphi = radians(90.0 / W)

    x, y, z = pc_velo[:, 0], pc_velo[:, 1], pc_velo[:, 2]

    x_y = x**2 + y**2

    d = torch.sqrt(x_y + z**2)
    r = torch.sqrt(x_y)
    d = d.clamp(0.000001)
    r = r.clamp(0.000001)
    phi = radians(45.) - torch.arcsin(y / r)
    phi_ = (phi / dphi).long()
    phi_ = phi_.clamp(min=0, max=W - 1)

    theta = radians(2.) - torch.arcsin(z / d)
    theta_ = (theta / dtheta).long()
    theta_ = theta_.clamp(min=0, max=H - 1)

    depth_map = -torch.ones((H, W, 3), device=pc_velo.device)
    depth_map[theta_, phi_, 0] = x
    depth_map[theta_, phi_, 1] = y
    depth_map[theta_, phi_, 2] = z
    depth_map = depth_map[0::slice, :, :]
    depth_map = depth_map.reshape((-1, 3))
    depth_map = depth_map[depth_map[:, 0] != -1.0]
    return depth_map
예제 #6
0
    def _step_symplectic(self, func, y, t, h):
        dy = torch.zeros(y.size(), dtype=self.dtype, device=self.device)
        n = y.size(-1) // 2

        dy[..., n:] = y[..., :n] - y[..., n:]

        k_ = func(t + self.eps, y[..., :n])

        sin_q_delta = torch.sin(y[..., :n] - y[..., n:]) + (h**2) * k_
        dy[..., :n] = torch.arcsin(torch.clip(sin_q_delta, -(1.-1e-4), 1-1e-4))

        return dy
예제 #7
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
예제 #8
0
    def _calculate_absolute_boxes_from_anchors(self, pred_anchors: torch.Tensor) -> torch.Tensor:
        """Returns a view of all predicted, absolute bounding boxes.

        :param pred_anchors: tensor(batch_size, img_leng, img_with, nbr_anchors, nbr_attributes=7)

        :returns: tensor(batch_size, img_leng, img_width, nbr_anchors, nbr_attributes=7)
        """
        logger.info("Calculating absolute_boxes_from_anchors...")

        bs, n_x, n_y, n_a = pred_anchors.shape[0:4]
        x_min =  self.pillars_cfg.getfloat('x_min')
        y_min =  self.pillars_cfg.getfloat('y_min')
        x_step = (self.pillars_cfg.getfloat('x_max') - x_min) / n_x
        y_step = (self.pillars_cfg.getfloat('y_max') - y_min) / n_y

        # create tensor containing the pillar indicies in the correct dimension
        x = torch.cuda.FloatTensor(range(n_x)).unsqueeze(1).expand(n_x, n_y)
        y = torch.cuda.FloatTensor(range(n_y),).unsqueeze(0).expand(n_x, n_y)
        ind = torch.stack((x,y), dim=2).unsqueeze(0).expand(bs, -1, -1, -1)
        del x, y

        # add anchors to receive other absolute values
        anchors_tens = torch.cuda.FloatTensor(self.anchors)
        anchors_tens = anchors_tens.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand(bs, n_x, n_y, -1, -1)

        # step with current pseudo image size
        # calculate pillar center from index
        pil_xy = ind * torch.cuda.FloatTensor([x_step, y_step])
        pil_xy += torch.cuda.FloatTensor([x_min, y_min])

        # expand for anchor nbr
        pil_xy = pil_xy.unsqueeze(3).expand(-1, -1, -1, n_a, -1)

        diag = torch.sqrt(torch.pow(pred_anchors[:,:,:,:,5], 2) + torch.pow(pred_anchors[:,:,:,:,4], 2))
        diag = diag.unsqueeze(4).expand(-1, -1, -1, -1, 2)
        # add center to offset
        pred_anchors_xy = pred_anchors[:,:,:,:,:2] * diag + pil_xy

        # add anchors z offset
        pred_anchors_z = pred_anchors[:,:,:,:,2] * pred_anchors[:,:,:,:,3] + anchors_tens[:,:,:,:,0]
        pred_anchors_z = pred_anchors_z.unsqueeze(4)
        # bb_hwl = exp(pred_hwl) * gt_hwl
        pred_anchors_hwl = torch.exp(pred_anchors[:,:,:,:,3:6]) * anchors_tens[:,:,:,:,1:4]
        #pred_anchors_hwl = F.softplus(pred_anchors[:,:,:,:,3:6]) * anchors_tens[:,:,:,:,1:4]

        # add radiant offset
        #pred_anchors_theta = -2 * pi * torch.sigmoid(pred_anchors[:,:,:,:,6]) + anchors_tens[:,:,:,:,4]
        epsilon = 1e-7
        pred_anchors_theta = torch.clamp(pred_anchors[:,:,:,:,6], -1 + epsilon, +1 - epsilon)
        pred_anchors_theta = -1 * torch.arcsin(pred_anchors_theta) + anchors_tens[:,:,:,:,4]
        pred_anchors_theta = pred_anchors_theta.unsqueeze(4)

        pred_anchors = torch.cat((pred_anchors_xy, pred_anchors_z, pred_anchors_hwl, pred_anchors_theta), dim=4)
        del anchors_tens, pred_anchors_z, pred_anchors_hwl, pred_anchors_theta

        """
         # h, l, w must be positive even with random weights, use softpos to ensure
        pred_anchors_hwl = pred_anchors[:,:,:,:,3:6]
        pred_anchors_hwl = F.softplus(pred_anchors_hwl)
        pred_anchors = torch.cat((pred_anchors[:,:,:,:,:3], pred_anchors_hwl, pred_anchors[:,:,:,:,6:]), dim=4)
        del pred_anchors_hwl
        """

        logger.debug(f"Absolute boxes from anchors calculation complete.\n"
                     f"pred_anchors: {pred_anchors}{pred_anchors.shape}")

        return pred_anchors
예제 #9
0
def f(x):
    return ln(arctan(x)) / (arcsin(x))**2
예제 #10
0
 def forward(self, x, Psi):
     a = torch.arcsin(torch.sqrt(closure(x)))  # B x D
     x_ = a[:, :, None] * self.embed           # B x D x H
     fx = self.ffn(x_).squeeze()
     fx = (Psi @ fx.T).T                         # B x D-1
     return fx
예제 #11
0
 def r(theta, alpha = alpha, D = distance, b = b):
     return D * torch.sin(alpha) / (torch.sin(np.pi - torch.abs(theta[:,None]) - torch.arcsin(D/b * torch.sin(alpha))))
예제 #12
0
def arcsin(a: Numeric):
    return torch.arcsin(a)
예제 #13
0
              torch.randn(3, 1),
              torch.randn(1, 3),
              value=0.1)

# addcmul
torch.addcmul(torch.randn(1, 3),
              torch.randn(3, 1),
              torch.randn(1, 3),
              value=0.1)

# angle
torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) * 180 / 3.14159

# asin/arcsin
torch.asin(a)
torch.arcsin(a)

# asinh/arcsinh
torch.asinh(a)
torch.arcsinh(a)

# atan/arctan
torch.atan(a)
torch.arctan(a)

# atanh/arctanh
torch.atanh(a.uniform_(-1, 1))
torch.arctanh(a.uniform_(-1, 1))

# atan2
torch.atan2(a, a)
예제 #14
0
    return model.eval()


def forward(self, coords, act_fn=None, dim=2):
    h, w = coords.shape[:2]
    coords = coords.reshape(-1, dim)
    s = int(coords.shape[0]**0.5)
    if act_fn is not None:
        self.net._periodic_activation = act_fn
    output = self.net(coords)
    return output.view(1, h, w, 3).permute(0, 3, 1, 2)


# Periodic activation functions
x = torch.linspace(-2 * np.pi, 2 * np.pi, 100)
triangle = lambda x: (2 / np.pi) * torch.arcsin(torch.sin(x))
funky = lambda x: (torch.relu(torch.sin(x)) - torch.relu(
    torch.sin(x * 0.5 + np.pi * 0.5)))
funky = lambda x: torch.sin(x)
funky = lambda x: (-2 / np.pi) * torch.arctan(1 / torch.tan(x / 2 + np.pi * 0.5
                                                            ))
funky = lambda x: torch.arccos(torch.cos(x + np.pi * 0.5)) / np.pi * 2 - 1
funky = lambda x: torch.relu(torch.cos(x)) - torch.relu(
    torch.cos(x + np.pi * 0.5))
step = 0
funky_step = lambda x: torch.relu(torch.cos(x + step)) - torch.relu(
    torch.cos(x + step + np.pi * 0.5))
# funky = triangle

plt.plot(x, torch.sin(x), label="sin")
# plt.plot(x, torch.cos(x), label='cos')