Exemplo n.º 1
0
def divide(a, b, limit=1, tol=1e-6):
    a = tp.tensor(a)
    b = tp.tensor(b)
    a_s, b_s = a.shape ^ b.shape
    a = a.view(a_s)
    b = b.view(b_s)
    shape = tp.Size(max(x, y) for x, y in zip(a_s, b_s))
    return tp.where(b.abs() < tol, limit * tp.ones(shape), a / tp.where(b.abs() < tol, tol * tp.ones(shape), b))
Exemplo n.º 2
0
def dBspline(i, U):
    i = tp.tensor(i)
    U = tp.tensor(U)
    return (tp.where(
        i == -1, -3 * (1 - U)**2 / 6,
        tp.where(
            i == 0, 3 * U**2 / 2 - 2 * U,
            tp.where(i == 1, (-3 * U**2 + 2 * U + 1) / 2,
                     tp.where(i == 2, 3 * U**2 / 6, tp.zeros_like(U))))))
Exemplo n.º 3
0
def Bspline(i, U):
    i = tp.tensor(i)
    U = tp.tensor(U)
    return (tp.where(
        i == -1, (1 - U)**3 / 6,
        tp.where(
            i == 0, U**3 / 2 - U * U + 2 / 3,
            tp.where(i == 1, (-3 * U**3 + 3 * U * U + 3 * U + 1) / 6,
                     tp.where(i == 2, U**3 / 6, tp.zeros_like(U))))))
Exemplo n.º 4
0
 def __init__(self,
              num_features: int,
              eps: float = 1e-5,
              momentum: float = 0.1,
              affine: bool = True,
              track_running_stats: bool = True) -> None:
     super(_NormBase, self).__init__()
     self.num_features = num_features
     self.eps = eps
     self.momentum = momentum
     self.affine = affine
     self.track_running_stats = track_running_stats
     if self.affine:
         self.weight = Parameter(torch.Tensor(num_features))
         self.bias = Parameter(torch.Tensor(num_features))
     else:
         self.register_parameter('weight', None)
         self.register_parameter('bias', None)
     if self.track_running_stats:
         self.register_buffer('running_mean', torch.zeros(num_features))
         self.register_buffer('running_var', torch.ones(num_features))
         self.register_buffer('num_batches_tracked',
                              torch.tensor(0, dtype=torch.long))
     else:
         self.register_parameter('running_mean', None)
         self.register_parameter('running_var', None)
         self.register_parameter('num_batches_tracked', None)
     self.reset_parameters()
Exemplo n.º 5
0
def to_image(data: Array,
             nslice: [int, null] = None,
             dim: int = -1,
             has_cmap=False):
    data = tp.tensor(data).squeeze()
    if data.ndim <= 1:
        raise TypeError(
            "Please don't use 'plot.imshow' to demonstrate an array or a scalar. "
        )
    if data.nspace > 3:
        raise TypeError(
            f"'plot.imshow' takes 2 or 3D-data as input (currently {data.shape}), please reduce the dimension manually or specify special dimensions to reduce. "
        )
    if data.nspace == 3:
        if data.has_batch: data = data.sample(random=False, dim=[])
        if data.has_channel: data = data.sample(random=False, dim={})
        if nslice is None:
            if data.space[-1] <= 3: pass
            elif data.space[0] <= 3: data = data.mvdim(0, 2)
            else:
                nslice = data.space[-1] // 2
                data = data.pick(nslice, dim)
        else:
            data = data.pick(nslice, dim)
    elif data.nspace == 2:
        if data.has_batch: data = data.sample(random=False, dim=[])
        if data.has_channel:
            if has_cmap: data = data.sample(random=False, dim={})
            else:
                data = data.sample(number=min(data.channel_size, 3),
                                   random=False,
                                   dim={}).mvdim(data.channel_dimension, -1)
    elif data.ndim == 3:
        data = data.sample(random=False, dim=[])
    return data.float().normalize()
Exemplo n.º 6
0
def down_scale(image, *scaling:int):
    image = tp.tensor(image)
    if len(scaling) == 0:
        scaling = (1,)
    elif len(scaling) == 1 and iterable(scaling[0]):
        scaling = scaling[0]
    if len(scaling) == 1:
        if isinstance(scaling[0], int):
            scaling *= image.nspace
            scaling = add_special(scaling, image.special, 1)
        else: raise TypeError("Unknown scaling type for 'down_scale'. ")
    elif len(scaling) < image.ndim and len(scaling) == image.nspace:
        scaling = add_special(scaling, image.special, 1)
    return image[tuple(slice(None, None, s) for s in scaling)]
Exemplo n.º 7
0
def grad_image(array):
    '''
        Gradient image of array
        array: (n_batch, n_feature, n_1, ..., n_{n_dim})
        output: (n_batch, n_dim, n_feature, n_1, ..., n_{n_dim})
    '''
    array = tp.tensor(array)
    output = tp.zeros_like(array)
    grad_dim = int(array.has_batch)
    output = []
    for d in range(array.ndim):
        if d in array.special: continue
        b = (slice(None, None),) * d + (slice(2, None),) + (slice(None, None),) * (array.ndim - d - 1)
        a = (slice(None, None),) * d + (slice(None, -2),) + (slice(None, None),) * (array.ndim - d - 1)
        output.append(tp.crop_as((array[b] - array[a]) / 2, array))
    return tp.stack(output, {grad_dim})
Exemplo n.º 8
0
    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):
        version = local_metadata.get('version', None)

        if (version is None or version < 2) and self.track_running_stats:
            # at version 2: added num_batches_tracked buffer
            #               this should have a default value of 0
            num_batches_tracked_key = prefix + 'num_batches_tracked'
            if num_batches_tracked_key not in state_dict:
                state_dict[num_batches_tracked_key] = torch.tensor(
                    0, dtype=torch.long)

        super(_NormBase,
              self)._load_from_state_dict(state_dict, prefix, local_metadata,
                                          strict, missing_keys,
                                          unexpected_keys, error_msgs)
Exemplo n.º 9
0
def border(mask, min_length=10):
    grid = tp.image_grid(*mask.shape)
    mask = mask > 0.5
    idx = mask[1:, :] ^ mask[:-1, :]
    idx = idx.expand_to(2, -1, mask.size(1))
    locs1 = (grid[:, 1:, :] + grid[:, :-1, :])[idx] / 2
    idx = mask[:, 1:] ^ mask[:, :-1]
    idx = idx.expand_to(2, mask.size(0), -1)
    locs2 = (grid[:, :, 1:] + grid[:, :, :-1])[idx] / 2
    locs = tp.cat(locs1.reshape(2, -1), locs2.reshape(2, -1), dim=1)
    if locs.size == 0: return []
    curves = []
    unvisited = tp.ones(locs.shape[-1])
    while True:
        if not any(unvisited): break
        first = tp.argmax(unvisited).item()
        cloc = locs[:, first:first + 1]
        unvisited[first] = 0
        curve = cloc
        while True:
            dissq = tp.sum((locs - cloc)**2, 0)
            inloc = tp.argmax(
                tp.where((unvisited > 0) & (dissq > 0),
                         1 / dissq.clamp(min=0.1),
                         tp.tensor(0).float()))
            if dissq[inloc] > 2: break
            cloc = locs[:, inloc:inloc + 1]
            curve = tp.cat(curve, cloc, dim=1)
            unvisited[inloc] = 0
            if not any(unvisited): break
        sloc = locs[:, first:first + 1]
        if tp.sum((cloc - sloc)**2) <= 2:
            curve = tp.cat(curve, sloc, dim=1)
        if curve.shape[1] <= min_length: continue
        scurve = curve
        for _ in range(100):
            scurve = constraint(smooth(scurve), scurve, curve)
        ccurve = scurve
        for _ in range(100):
            scurve = constraint(sharpen(scurve, curve), scurve, ccurve)
            scurve = constraint(smooth(scurve), scurve, curve)
        curves.append(scurve)
    return curves
Exemplo n.º 10
0
def clip_grad_norm_(parameters: _tensor_or_tensors,
                    max_norm: float,
                    norm_type: float = 2.0) -> torch.Tensor:
    r"""Clips gradient norm of an iterable of parameters.

    The norm is computed over all gradients together, as if they were
    concatenated into a single vector. Gradients are modified in-place.

    Arguments:
        parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
            single Tensor that will have gradients normalized
        max_norm (float or int): max norm of the gradients
        norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
            infinity norm.

    Returns:
        Total norm of the parameters (viewed as a single vector).
    """
    if isinstance(parameters, torch.Tensor):
        parameters = [parameters]
    parameters = [p for p in parameters if p.grad is not None]
    max_norm = float(max_norm)
    norm_type = float(norm_type)
    if len(parameters) == 0:
        return torch.tensor(0.)
    device = parameters[0].grad.device
    if norm_type == inf:
        total_norm = max(p.grad.detach().abs().max().to(device)
                         for p in parameters)
    else:
        total_norm = torch.norm(
            torch.stack([
                torch.norm(p.grad.detach(), norm_type).to(device)
                for p in parameters
            ]), norm_type)
    clip_coef = max_norm / (total_norm + 1e-6)
    if clip_coef < 1:
        for p in parameters:
            p.grad.detach().mul_(clip_coef.to(p.grad.device))
    return total_norm
Exemplo n.º 11
0
    def forward(ctx, I1, I2, nbin=100):
        with tp.no_grad():
            if hasattr(ctx, 'JH'): del ctx.JH
            nbin = tp.tensor(nbin)
            data_pair = tp.stack(I1.flatten(1), I2.flatten(1), dim={1})
            nbatch, nhist, ndata = data_pair.ishape
            indices = []
            values = []
            ctx.window = (tp.image_grid(4, 4) - 1).flatten(1).transpose(0, 1)
            for shift in ctx.window:
                # [nbatch] x {nhist} x ndata
                hist_pos = data_pair * nbin
                index = tp.clamp(
                    tp.floor(hist_pos).long() + shift, 0, nbin - 1)
                batch_idx = tp.arange(nbatch).expand_to([nbatch], {1}, ndata)
                index = tp.cat(batch_idx, index, 1)
                value = Bspline(shift.expand_to(data_pair),
                                tp.decimal(hist_pos)).prod(1)
                indices.append(index)
                values.append(value)
            # n_batch x (1 + n_hist) x (n_data x 4 ** n_hist)
            Mindices = tp.cat(indices, -1)
            # n_batch x (n_data x 4 ** n_hist)
            Mvalues = tp.cat(values, -1)
            # (1 + n_hist) x (n_batch x n_data x 4 ** n_hist)
            indices = Mindices.transpose(0, 1).flatten(1)
            # (n_batch x n_data x 4 ** n_hist)
            values = Mvalues.flatten(0)
            if tp.Device == tp.DeviceCPU: creator = torch.sparse.FloatTensor
            else: creator = torch.cuda.sparse.FloatTensor
            collected = creator(indices, values,
                                (nbatch, nbin, nbin)).to_dense()
            collected = tp.Tensor(collected, batch_dim=0)

            ctx.nbin = nbin
            ctx.Ishape = I1.shape
            ctx.data_pair = data_pair
            ctx.JH = collected / ndata
        return ctx.JH
Exemplo n.º 12
0
def up_scale(image, *scaling:int):
    image = tp.tensor(image)
    if len(scaling) == 0:
        scaling = (1,)
    elif len(scaling) == 1 and iterable(scaling[0]):
        scaling = scaling[0]
    if len(scaling) == 1:
        if isinstance(scaling[0], int):
            scaling *= image.nspace
            scaling = add_special(scaling, image.special, 1)
        else: raise TypeError("Unknown scaling type for 'up_scale'. ")
    elif len(scaling) < image.ndim and len(scaling) == image.nspace:
        scaling = add_special(scaling, image.special, 1)
    for i, s in enumerate(scaling):
        image = (
            image
            .transpose(i, -1)
            .unsqueeze(-1)
            .repeat((1,) * image.ndim + (int(s),))
            .flatten(-2)
            .transpose(i, -1)
        )
    return image
Exemplo n.º 13
0
torch.manual_seed(0)
with scope("test torch, gpu"):
    t_ = torch.randn(3000, 400).to(tp.Device).requires_grad_(True)
    a_ = t_
    LP_ = torch.nn.Linear(400, 400).to(tp.Device)
    for _ in range(10): a_ = LP_(a_).relu()
    a_.sum().backward()

assert a.is_cuda is True
assert t.allclose(t_)
assert isinstance(t, tp.Tensor)
assert isinstance(a, tp.Tensor)
assert isinstance(LP.weight, tp.nn.Parameter)
assert isinstance(LP.bias, tp.nn.Parameter)
assert isinstance(tp.tensor(np.array([1., 2.])), tp.Tensor)
if torch.cuda.is_available():
    assert a.is_cuda
    assert t.is_cuda
    assert tp.tensor(np.array([1., 2.])).is_cuda

tp.set_autodevice(False)
tp.manual_seed(0)
with scope("test tp, cpu"):
    t = tp.randn(3000, 400, requires_grad=True)
    a = t
    LP = tp.nn.Linear(400, 400)
    for _ in range(10): a = LP(a).relu()
    a.sum().backward()

torch.manual_seed(0)
Exemplo n.º 14
0
def maskshow(*masks,
             on=None,
             alpha=0.5,
             nslice=None,
             dim=-1,
             stretch=False,
             **kwargs):
    global canvas
    if on is not None:
        if isinstance(on, (int, tuple)): background(*on)
        elif isarray(on): canvas = to_image(on, nslice, dim)
        elif isinstance(on, list): canvas = to_image(Tensor(on), nslice, dim)
        else: raise TypeError("Unrecognized argument 'on' for 'maskshow'. ")
    elif canvas is None:
        canvas = (1., ) * 3
    if len(masks) == 0: return imshow
    alpha = totuple(alpha, len(masks))
    new_masks = []
    new_alpha = []
    for m, a in zip(masks, alpha):
        img = to_image(m, nslice, dim)
        if img.ndim == 3:
            new_masks.extend(x.squeeze(-1) for x in img.split(1, dim=dim))
            new_alpha.extend([a] * img.size(dim))
        else:
            new_masks.append(img)
            new_alpha.append(a)
    color_mask_map = [
        (to_RGB(c), m, a)
        for c, m, a in zip(colors * (len(new_masks) // len(colors) +
                                     1), new_masks, new_alpha)
    ]
    color_mask_map.extend((to_RGB(c), m, alpha[0]) for c, m in kwargs.items())

    if not stretch:
        shapes = [m.ishape for _, m, _ in color_mask_map]
        target_shape = shapes[0]
        if len(set(shapes)) > 1 or not isinstance(
                canvas, tuple) and target_shape != canvas.shape:
            raise TypeError(
                "Please use masks of the same size as the background image, "
                "or use 'stretch=True' in 'maskshow' to automatically adjust the image sizes. "
            )
    else:

        def adjust(m, to):
            ms = tuple(m.shape)
            scaling = tuple((a // b, b // a) for a, b in zip(to, ms))
            return m.down_scale([max(v, 1) for u, v in scaling
                                 ]).up_scale([max(u, 1)
                                              for u, v in scaling]).crop_as(to)

        shapes = [m.ishape for _, m, _ in color_mask_map]
        if not isinstance(canvas, tuple): shapes.append(canvas.shape[:2])
        areas = [u * v for u, v in shapes]
        target_shape = shapes[areas.index(max(areas))]
        color_mask_map = [(c, adjust(m, to=target_shape), a)
                          for c, m, a in color_mask_map]
        canvas = adjust(canvas, to=target_shape)

    target_shape = tp.Size(*target_shape, {3})
    if isinstance(canvas, tuple):
        canvas = tp.tensor(list(canvas)).expand_to(target_shape)
    elif canvas.ndim == 2:
        canvas = canvas.expand_to(target_shape)
    coeff = vector(1 - a * m for _, m, a in color_mask_map).prod()
    canvas *= coeff
    for i, (c, m, a) in enumerate(color_mask_map):
        coeff = vector(a * m if j == i else 1 - a * m
                       for j, (_, m, a) in enumerate(color_mask_map)).prod()
        canvas += coeff.unsqueeze(-1) * m.unsqueeze(-1) * tp.tensor(
            list(c)).unsqueeze(0, 1)

    return plt.imshow(canvas.numpy(), **kwargs)