Exemple #1
0
def bbox_transform_inv_opr(bbox, deltas):
    max_delta = math.log(1000.0 / 16)
    """ Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
    bbox_width = bbox[:, 2] - bbox[:, 0] + 1
    bbox_height = bbox[:, 3] - bbox[:, 1] + 1
    bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
    bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
    pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
    pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height

    dw = deltas[:, 2]
    dh = deltas[:, 3]
    dw = clamp(dw, max=max_delta, min=float('-inf'))
    dh = clamp(dh, max=max_delta, min=float('-inf'))
    pred_width = bbox_width * torch.exp(dw)
    pred_height = bbox_height * torch.exp(dh)

    pred_x1 = pred_ctr_x - 0.5 * pred_width
    pred_y1 = pred_ctr_y - 0.5 * pred_height
    pred_x2 = pred_ctr_x + 0.5 * pred_width
    pred_y2 = pred_ctr_y + 0.5 * pred_height
    pred_boxes = cat((pred_x1.reshape((-1, 1)), pred_y1.reshape(
        (-1, 1)), pred_x2.reshape((-1, 1)), pred_y2.reshape((-1, 1))),
                     axis=1)
    return pred_boxes
Exemple #2
0
def save_image(tensor,
               fp,
               nrow=8,
               padding=4,
               normalize=False,
               range=None,
               scale_each=False,
               pad_value=0,
               format=None):
    """Save a given Tensor into an image file.

    Args:
        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images by calling ``make_grid``.
        fp (string or file object): A filename or a file object
        format(Optional):  If omitted, the format to use is determined from the filename extension.
            If a file object was used instead of a filename, this parameter should always be used.
        **kwargs: Other arguments are documented in ``make_grid``.
    """
    from PIL import Image
    grid = make_grid(tensor,
                     nrow=nrow,
                     padding=padding,
                     pad_value=pad_value,
                     normalize=normalize,
                     range=range,
                     scale_each=scale_each)
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
    ndarr = F.clamp((grid * 255 + 0.5), 0, 255)
    ndarr = F.transpose(ndarr, [1, 2, 0]).numpy().astype('uint8')
    # ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
    # cv2.imshow('img', ndarr)
    # cv2.waitKey(2000)
    # im = Image.fromarray(ndarr)
    cv2.imwrite(fp, ndarr)
Exemple #3
0
    def forward(self, x, mask_in=None):
        assert len(x.shape) == 4

        if mask_in is not None or self.last_size != tuple(x.shape):
            self.last_size = tuple(x.shape)

            with dg.no_grad():
                if self.weight_maskUpdater.dtype != x.dtype:
                    self.weight_maskUpdater = self.weight_maskUpdater.astype(
                        x.dtype)

                if mask_in is None:
                    # If mask is not provided, create a mask.
                    if self.multi_channel:
                        mask = L.ones(x.shape, dtype=x.dtype)
                    else:
                        mask = L.ones((1, 1, x.shape[2], x.shape[3]),
                                      dtype=x.dtype)
                else:
                    mask = mask_in

                self.update_mask = nn.functional.conv2d(
                    mask,
                    self.weight_maskUpdater,
                    bias=None,
                    stride=self.stride,
                    padding=self.padding,
                    dilation=self.dilation,
                    groups=1)
                # For mixed precision training, eps from 1e-8 ~ 1e-6
                eps = 1e-6
                self.mask_ratio = self.slide_winsize / (self.update_mask + eps)
                self.update_mask = L.clamp(self.update_mask, 0, 1)
                self.mask_ratio = self.mask_ratio * self.update_mask

        raw_out = super(PartialConv2D,
                        self).forward(x * mask if mask_in is not None else x)

        if self.bias is not None:
            bias_view = L.reshape(self.bias, (1, self.out_channels, 1, 1))
            output = (raw_out - bias_view) * self.mask_ratio + bias_view
            output = output * self.update_mask
        else:
            output = raw_out * self.mask_ratio

        if self.return_mask:
            return output, self.update_mask
        else:
            return output
Exemple #4
0
 def __call__(self, module):
     # if hasattr(module, 'rho'):
     #     # w = module.rho.data
     #     # w = w.clamp(self.clip_min, self.clip_max)
     #     # module.rho.data = w
     #     module.rho.set_value(layers.clamp(module.rho, min=self.clip_min, max=self.clip_max))
     for param in module.parameters():
         if param.name.startswith('ada_iln') or (
                 param.name.startswith('iln')
                 and param.name.endswith('w_0')):
             # print('clipped!')
             clipped_param = layers.clamp(param,
                                          min=self.clip_min,
                                          max=self.clip_max)
             param.set_value(clipped_param)
Exemple #5
0
 def neighbor_aggregator(self, sent_repr):
     #norm = L.clamp(L.reshape(L.cast(self.graph_wrapper.indegree(), dtype="float32"), [-1, 1]), min=1.)
     norm = L.ones_like(sent_repr)
     def send_func(src, dst , edge):
         return src["h"]
     msg = self.graph_wrapper.send(send_func, nfeat_list=[("h", norm)])
     norm = self.graph_wrapper.recv(msg, "sum")
     norm = L.reduce_mean(norm, -1, keep_dim=True)
     norm = L.clamp(norm, min=1.0)
     
     return gcn(self.graph_wrapper,
                sent_repr,
                self.hidden_size,
                activation="relu",
                name="gcn") / norm
Exemple #6
0
def box_overlap_opr(box, gt):
    assert box.ndim == 2
    assert gt.ndim == 2
    area_box = (box[:, 2] - box[:, 0] + 1) * (box[:, 3] - box[:, 1] + 1)
    area_gt = (gt[:, 2] - gt[:, 0] + 1) * (gt[:, 3] - gt[:, 1] + 1)
    width_height = torch.minimum(
        box[:, 2:].unsqueeze(axis=-2), gt[:, 2:]) - torch.maximum(
            box[:, :2].unsqueeze(axis=-2), gt[:, :2]) + 1  # [N,M,2]
    width_height = clamp(width_height, min=0, max=float('inf'))  # [N,M,2]
    inter = width_height.prod(axis=2)  # [N,M]
    del width_height
    # handle empty boxes
    iou = torch.where(
        inter > 0,
        inter / (area_box.unsqueeze(axis=-1) + area_gt - inter),
        torch.zeros(torch.to_tensor([1]), dtype=inter.dtype),
    )
    return iou
 def forward(self, output1, output2, label):
     """
     :param output1: [n, 128]
     :param output2: [n, 128]
     :param label: [n, 1]
     :return: [1]
     """
     distance = layers.elementwise_sub(output1, output2)
     distance = layers.square(distance)
     euclidean_distance = layers.reduce_sum(distance, dim=1, keep_dim=True)
     euclidean_distance = layers.sqrt(euclidean_distance)
     loss_contrastive = layers.elementwise_mul(
         1 - label, layers.square(euclidean_distance),
         axis=0) + layers.elementwise_mul(
             label,
             layers.square(
                 layers.clamp(self.margin - euclidean_distance, min=0.0)),
             axis=0)
     return loss_contrastive, euclidean_distance.numpy(), label.numpy()
Exemple #8
0
def get_norm(indegree):
    """Get Laplacian Normalization"""
    float_degree = L.cast(indegree, dtype="float32")
    float_degree = L.clamp(float_degree, min=1.0)
    norm = L.pow(float_degree, factor=-0.5)
    return norm
Exemple #9
0
        def norm_ip(img, min, max):
            img = F.clamp(img, min=min, max=max)

            img = (img - min) / (max - min + 1e-5)
Exemple #10
0
 def get_norm(indegree):
     float_degree = L.cast(indegree, dtype="float32")
     float_degree = L.clamp(float_degree, min=1.0)
     norm = L.pow(float_degree, factor=-0.5)
     return norm