Esempio n. 1
0
def decimate(tensor: torch.tensor, m: list) -> torch.tensor:
    """n
    Function to decimate tensor every m steps

    Parameters
    ----------
    tensor : torch.tensor
        Tensor to be decimated.
    m : list
        List of factor for each axis in the tensor.

    Returns
    -------
    torch.tensor
        Decimated tensor
    """
    assert tensor.dim() == len(m)
    for d in range(tensor.dim()):
        if m[d] is not None:
            tensor = tensor.index_select(
                dim=d,
                index=torch.arange(start=0, end=tensor.size(d),
                                   step=m[d]).long(),
            )

    return tensor
Esempio n. 2
0
def heatmap_blend(img:torch.tensor, heatmap:torch.tensor, heatmap_blend_alpha=0.5, heatmap_clip_range=None, cmap='jet'):
    """
    Blend the colormap onto original image
    :param img: original image in RGB, dim (N, 3, H, W)
    :param heatmap: input heatmap, dim (N, H, W) or (N, 1, H, W)
    :param heatmap_blend_alpha: blend factor, 'heatmap_blend_alpha = 0' means the output is identical to original image
    :param cmap: colormap to blend
    :return: blended heatmap image, dim (N, 3, H, W)
    """
    if heatmap.dim() == 4:
        if heatmap.size(1) == 1:
            heatmap = heatmap.view(heatmap.size(0), heatmap.size(2), heatmap.size(3))
        else:
            raise Exception("The heatmap should be (N, 1, H, W) or (N, H, W)")
    N, C3, H, W = img.shape

    assert heatmap_blend_alpha < 1.0
    assert H == heatmap.size(1)
    assert W == heatmap.size(2)
    assert N == heatmap.size(0)
    assert C3 == 3                      # input image has three channel RGB

    color_map = colormap(heatmap, cmap=cmap, clip_range=heatmap_clip_range, chw_order=True).to(img.device)
    output_heat_map = img.clone()*(1.0 - heatmap_blend_alpha) + color_map * heatmap_blend_alpha
    return output_heat_map
Esempio n. 3
0
    def forward(self,
                graph_obj: dgl.DGLGraph,
                feature: torch.tensor,
                weight=True) -> torch.tensor:
        # local_scope needed to ensure that the stored data (in messages) doesn't accumulate
        # When implementing a layer you have a choice of initializing your own weights and matmul, or using nn.Linear
        # For performance reasons, DGL's implementation performs operations in order according to input/output size
        # It should be possible however, to use matmul(features, weights) or nn.Linear(features) anyplace anytime
        with graph_obj.local_scope():
            if self.norm == 'both':
                degs = graph_obj.out_degrees().to(
                    feature.device).float().clamp(min=1)
                norm = torch.pow(degs, -0.5)
                shp = norm.shape + (1, ) * (feature.dim() - 1)
                norm = torch.reshape(norm, shp)
                feat = feature * norm

            # feat = torch.matmul(feat, weight)
            graph_obj.srcdata['h'] = feat
            graph_obj.update_all(fn.copy_src(src='h', out='m'),
                                 fn.sum(msg='m', out='h'))
            feat = graph_obj.dstdata['h']

            if self.bias is not None:
                feat = self.bias + feat

        return self.linear(feat)
Esempio n. 4
0
def reverseSequence(inputs: Tensor,
                    lengths: list[int],
                    batch_first=True) -> Tensor:
    if batch_first:
        inputs = inputs.transpose(0, 1)
    maxLen, batchSize = inputs.size(0), inputs.size(1)

    if len(lengths) != batchSize:
        raise RuntimeError(
            "inputs error with batchSize[%d] Required length[%d]" %
            (len(lengths), batchSize))

    ind = [
        list(reversed(range(0, l))) + list(range(l, maxLen)) for l in lengths
    ]
    ind = torch.LongTensor(ind).transpose(0, 1)
    for dim in range(2, inputs.dim()):
        ind = ind.unsqueeze(dim)
    ind = Variable(ind.expand_as(inputs))
    if inputs.is_cuda:
        ind = ind.cuda(inputs.get_device())

    reversedInput = torch.gather(inputs, 0, ind)
    if batch_first:
        reversedInput = reversedInput.transpose(0, 1)
    return reversedInput
Esempio n. 5
0
    def __call__(self, x: torch.tensor):
        assert x.dim() == 4, 'input tensor should be 4D'

        in_channels = x.size(1)
        outputs = [
            self.vision_conv2d(x[:, i:i + 1, ...]) for i in range(in_channels)
        ]
        return torch.cat(outputs, dim=1)
Esempio n. 6
0
def lag_tensor(arr: torch.tensor, lag, dim):
    if lag == 0:
        return arr
    elif lag > 0:
        selector = slice(lag, None)
    elif lag < 0:
        selector = slice(0, lag)
    index = [slice(None)] * arr.dim()
    index[dim] = selector

    return arr[index]
 def _flatten(self, tensor: torch.tensor) -> torch.tensor:
     """
     Flattens a given tensor such that the channel axis is first.
     The shapes are transformed as follows:
        (N, C, D, H, W) -> (C, N * D * H * W)
     """
     C = tensor.size(1)  # number of channels
     axis_order = (1, 0) + tuple(range(2, tensor.dim()))  # new axis order
     transposed = tensor.permute(
         axis_order)  # Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
     return transposed.contiguous().view(
         C, -1)  # Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
    def forward(self, input: torch.tensor,
                target: torch.tensor) -> Tuple[float, float, list]:

        target = utils.expand_as_one_hot(target.long(), self.classes)

        assert input.dim() == target.dim(
        ) == 5, f"'input' {input.dim()} and 'target' {target.dim()} have different number of dims "

        input = self.normalization(input.float())

        if self.eval_regions:
            input_wt, input_tc, input_et = self._reformat_labels(input)
            target_wt, target_tc, target_et = self._reformat_labels(target)

            wt_dice = torch.mean(
                self.dice(input_wt, target_wt, weight=self.weight))
            tc_dice = torch.mean(
                self.dice(input_tc, target_tc, weight=self.weight))
            et_dice = torch.mean(
                self.dice(input_et, target_et, weight=self.weight))

            wt_loss = 1 - wt_dice
            tc_loss = 1 - tc_dice
            et_loss = 1 - et_dice

            loss = 1 / 3 * (wt_loss + tc_loss + et_loss)
            score = 1 / 3 * (wt_dice + tc_dice + et_dice)

            return loss, score, [wt_loss, tc_loss, et_loss]

        else:
            per_channel_dice = self.dice(
                input, target,
                weight=self.weight)  # compute per channel Dice coefficient

            mean = torch.mean(per_channel_dice)
            loss = (1. - mean)
            # average Dice score across all channels/classes
            return loss, mean, per_channel_dice[1:]
Esempio n. 9
0
 def get_dense_matrix(self,
                      data: torch.tensor,
                      c: torch.tensor,
                      to_numpy=True):
     batch_size = c[-1, -1] + 1
     if data.dim() == 1:
         data = spconv.SparseConvTensor(data.unsqueeze(1),
                                        c[:, self.permute_tensor],
                                        self.spatial_size, batch_size)
     else:
         data = spconv.SparseConvTensor(data, c[:, self.permute_tensor],
                                        self.spatial_size, batch_size)
     data = data.dense()
     if to_numpy:
         data = data.detach().cpu().numpy()
     return data
Esempio n. 10
0
def _mask_token_mask_like(
    input: torch.tensor, mask_sampling: MaskSampling
) -> torch.Tensor:
    assert input.dim() == 2

    # Sample one MASK token per example, uniformly among all tokens of the example
    if isinstance(mask_sampling, UniformMask):
        assert input.shape == mask_sampling.target_key_padding_mask.shape
        return torch.zeros_like(input, dtype=torch.bool).scatter(
            dim=1,
            index=torch.multinomial(mask_sampling.target_key_padding_mask.float(), 1),
            value=True,
        )

    # Each token is independently converted to MASK with probability p_mask
    elif isinstance(mask_sampling, BernoulliMask):
        assert input.shape == mask_sampling.target_key_padding_mask.shape
        return (
            torch.bernoulli(
                torch.full_like(
                    input, fill_value=mask_sampling.p_mask, dtype=torch.float
                )
            )
            .bool()
            .masked_fill(mask=~mask_sampling.target_key_padding_mask, value=False)
        )

    # Custom mask
    elif isinstance(mask_sampling, CustomMask):
        return torch.zeros_like(input, dtype=torch.bool).scatter(
            dim=1, index=mask_sampling.custom_target_mask, value=True
        )

    # No mask
    elif isinstance(mask_sampling, NoMask):
        return torch.zeros_like(input, dtype=torch.bool)

    # Unsupported mask
    else:
        raise ValueError(
            f"Unrecognized value for mask_sampling_method: {mask_sampling}"
        )
Esempio n. 11
0
def plot_images_labels(x: torch.tensor,
                       label,
                       export_img,
                       title: str = '',
                       nrow=8,
                       padding=2,
                       normalize=False,
                       pad_value=0):
    """Plot separate images of shape (H x W) colored by their binary label."""

    if (x.dim() == 4):  # if there is more than one channel
        x = x[0]
    grid = make_grid(x,
                     nrow=nrow,
                     padding=padding,
                     normalize=normalize,
                     pad_value=pad_value)
    npgrid = grid.cpu().numpy()

    plt.imshow(np.transpose(npgrid, (1, 2, 0)), interpolation='nearest')
    # plt.imshow(x[i].squeeze(), cmap='gray', interpolation='nearest')

    ax = plt.gca()
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)

    # Border color:
    if label == 0:
        color = 'green'
    elif label == 1:
        color = 'red'
    else:
        print('Invalid label assigned!')
    plt.setp(ax.spines.values(), color=color, linewidth=5)
    # plt.setp([ax.get_xticklines(), ax.get_yticklines()], color='green')

    if not (title == ''):
        plt.title(title)

    plt.savefig(export_img, bbox_inches='tight', pad_inches=0.1)
    plt.clf()
Esempio n. 12
0
def intersectionAndUnionGPU(
        preds: torch.tensor,
        target: torch.tensor,
        num_classes: int,
        ignore_index=255) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
    """
    inputs:
        preds : shape [H, W]
        target : shape [H, W]
        num_classes : Number of classes

    returns :
        area_intersection : shape [num_class]
        area_union : shape [num_class]
        area_target : shape [num_class]
    """
    assert (preds.dim() in [1, 2, 3])
    assert preds.shape == target.shape
    preds = preds.view(-1)
    target = target.view(-1)
    preds[target == ignore_index] = ignore_index
    intersection = preds[preds == target]

    # Addind .float() becausue histc not working with long() on CPU
    area_intersection = torch.histc(intersection.float(),
                                    bins=num_classes,
                                    min=0,
                                    max=num_classes - 1)
    area_output = torch.histc(preds.float(),
                              bins=num_classes,
                              min=0,
                              max=num_classes - 1)
    area_target = torch.histc(target.float(),
                              bins=num_classes,
                              min=0,
                              max=num_classes - 1)
    area_union = area_output + area_target - area_intersection
    # print(torch.unique(intersection))
    return area_intersection, area_union, area_target
Esempio n. 13
0
def heatmaps_to_coords(heatmaps: torch.tensor, thresh: float = 0):
    """
    Get predictions from heatmaps in torch Tensor.

    Args:
        heatmaps: Tensor of shape [N, 1, H, W]
        thresh: Threshold for a keypoint in a heatmap to be considered a heatmap
                thresh should be in range [0,1]

    Returns: torch.LongTensor

    """
    heatmaps = sure_to_torch(heatmaps)

    assert heatmaps.dim() == 4, 'Heatmaps should be 4-dim'
    maxval, idx = torch.max(
        heatmaps.view(heatmaps.size(0), heatmaps.size(1), -1), 2)

    maxval = maxval.view(heatmaps.size(0), heatmaps.size(1), 1)
    idx = idx.view(heatmaps.size(0), heatmaps.size(1), 1) + 1

    preds = idx.repeat(1, 1, 2).float()

    preds[:, :, 0] = (preds[:, :, 0] - 1) % heatmaps.size(3) + 1
    preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / heatmaps.size(3)) + 1

    pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
    preds *= pred_mask

    if thresh != 0:
        assert thresh > 0 or thresh <= 1, f"Thresh must be in range [0, 1], got {thresh}"
        # Get the indices where the values are smaller then the threshold
        indices = maxval <= thresh
        # Set these values to 0
        preds[indices.squeeze(-1)] = 0
    return preds, maxval
Esempio n. 14
0
def colormap(tensor: torch.tensor, cmap='jet', clip_range=None, scale_each=True, chw_order=True):
    """
    Create colormap for each single channel input map
    :param tensor: input single-channel image, dim (N, H, W) or (N, 1, H, W)
    :param cmap: the type of color map
    :param chw_order: the output type of tensor, either CHW or HWC
    :param clip_range: the minimal or maximal clip on input tensor
    :param scale_each: normalize the input based on each image instead of the whole batch
    :return: colormap tensor, dim (N, 3, H, W) if 'chw_order' is True or (N, H, W, 3)
    """
    if cmap == 'gray':
        cmap_tag = cv2.COLORMAP_BONE
    elif cmap == 'hsv':
        cmap_tag = cv2.COLORMAP_HSV
    elif cmap == 'hot':
        cmap_tag = cv2.COLORMAP_HOT
    elif cmap == 'cool':
        cmap_tag = cv2.COLORMAP_COOL
    else:
        cmap_tag = cv2.COLORMAP_JET

    if tensor.dim() == 2: # single image
        tensor = tensor.view(1, tensor.size(0), tensor.size(1))
    elif tensor.dim() == 4:
        if tensor.size(1) == 1:
            tensor = tensor.view(tensor.size(0), tensor.size(2), tensor.size(3))
        else:
            raise Exception("The input image should has one channel.")
    elif tensor.dim() > 4:
        raise Exception("The input image should has dim of (N, H, W) or (N, 1, H, W).")

    # normalize
    tensor = tensor.clone()  # avoid modifying tensor in-place
    if clip_range is not None:
        assert isinstance(clip_range, tuple), \
            "range has to be a tuple (min, max) if specified. min and max are numbers"

    def norm_ip(img, min, max):
        img.clamp_(min=min, max=max)
        img.add_(-min).div_(max - min + 1e-5)

    def norm_range(t, range):
        if range is not None:
            norm_ip(t, range[0], range[1])
        else:
            norm_ip(t, float(t.min()), float(t.max()))

    if scale_each is True:
        for t in tensor:  # loop over mini-batch dimension
            norm_range(t, clip_range)
    else:
        norm_range(tensor, clip_range)

    # apply color map
    N, H, W = tensor.shape
    color_tensors = []
    for n in range(N):
        sample = tensor[n, ...].detach().cpu().numpy()
        colormap_sample = cv2.applyColorMap((sample * 255).astype(np.uint8), cmap_tag)
        colormap_sample = cv2.cvtColor(colormap_sample, cv2.COLOR_BGR2RGB)
        color_tensors.append(torch.from_numpy(colormap_sample).cpu())
    color_tensors = torch.stack(color_tensors, dim=0).float() / 255.0

    return color_tensors.permute(0, 3, 1, 2) if chw_order else color_tensors
Esempio n. 15
0
 def __call__(self, x: torch.tensor):
     assert x.dim() == 4, 'input tensor should be 4D'
     return F.conv2d(x,
                     self.kernel.to(x),
                     padding=self.padding,
                     dilation=self.dilation)