Exemple #1
0
    def rectangle(self, rec, norm_box, pres, global_step):
        '''
        rec: [N, C, H_img, W_img]
        norm_box: [N, H*W, 4], y_center, x_center, height, width
        pres: [N, H*W, 1]
        '''
        with torch.no_grad():
            norm_box[:, :, :2] *= self.spair.encoder.image_size
            norm_box[:, :, 2:] *= self.spair.encoder.image_size / 2
            norm_box = torch.round_(
                norm_box)  # y_center, x_center, height / 2, width / 2
            norm_box = torch.stack(
                (
                    norm_box[:, :, 1] - norm_box[:, :, 3],  # xmin
                    norm_box[:, :, 0] - norm_box[:, :, 2],  # ymin
                    norm_box[:, :, 1] + norm_box[:, :, 3],  # xmax
                    norm_box[:, :, 0] + norm_box[:, :, 2],  # ymax
                ),
                dim=-1)
            pres = torch.round_(pres).bool().squeeze_(-1)

            for i, (img, box, zpres) in enumerate(zip(rec, norm_box, pres)):
                box = torch.stack([b for b, z in zip(box, zpres) if z])
                self.summary.add_image_with_boxes('detect/rec_%d' % i, img,
                                                  box, global_step)
Exemple #2
0
def safe_real_exp(values: torch.Tensor) -> torch.Tensor:
    assert values.dim() == 2 and values.size(1) == 2
    amplitude = values[:, 0]
    amplitude -= torch.max(amplitude)
    torch.exp_(amplitude)
    phase = values[:, 1]
    phase /= 3.141592653589793
    torch.round_(phase)
    torch.abs_(phase)
    phase = torch.fmod(phase, 2.0)
    return amplitude * (1.0 - 2.0 * phase)
Exemple #3
0
def run_fwd(x, layer, fn):
    with torch.no_grad():
        y = fn(x, layer.weight, layer.bias)
        if layer.rounding or layer.quant_step:
            # also round input layer (with only quant_step) because quant_step
            # has been absorbed into BN scale and the conv should be purely
            # integral
            y = torch.round_(y)
        if layer.bias is not None:
            return (y.mul_(layer.sign) >= 0).to(dtype=x.dtype)
        return y
Exemple #4
0
 def getDistribution(self, title):
     stats = (i.getDistribution(title) for i in self.datasets)
     stats = [i for i in stats if i is not None]
     if not stats:
         return
     if len(stats) == len(self.datasets):
         return torch.stack(stats).sum(dim=0)
     else:
         stats = torch.stack(stats).sum(dim=0)
         stats = stats / stats.sum()
         return torch.round_(len(self) * stats)
Exemple #5
0
 def forward(self, x):
     if isinstance(x, (BoolTensor, QuantizedInputRangeTensor)):
         if isinstance(x, QuantizedInputRangeTensor):
             assert x.quant_step == self.quant_step, (
                 f'quant step mismatch: {x.quant_step} vs {self.quant_step}'
             )
         assert not self.output_numeric and self.bias is not None
         bias, sign = (torch_as_npy(self.bias),
                       torch_as_npy(self.sign).flatten())
         return x.matmul(self.full_layer_name, torch_as_npy(self.weight.T),
                         bias, sign)
     if self.quant_step is not None:
         x = torch.round_(x / self.quant_step)
     if self.output_numeric:
         return (F.linear(x, self.weight, self.bias) *
                 self.output_numeric_scale)
     return run_fwd(x, self, F.linear)
Exemple #6
0
 def forward(self, x):
     if isinstance(x, (BoolTensor, QuantizedInputRangeTensor)):
         if isinstance(x, QuantizedInputRangeTensor):
             assert x.quant_step == self.quant_step, (
                 f'quant step mismatch: {x.quant_step} vs {self.quant_step}'
             )
         return x.conv2d(self.full_layer_name, torch_as_npy(self.weight),
                         torch_as_npy(self.bias),
                         torch_as_npy(self.sign).flatten(), self.stride,
                         self.padding)
     if self.quant_step is not None:
         x = torch.round_(x / self.quant_step)
     return run_fwd(
         x, self,
         functools.partial(F.conv2d,
                           stride=self.stride,
                           padding=self.padding))
Exemple #7
0
    def forward(self, x_a):  #, x_b):
        r = self.res
        x_a = x_a.float()
        assert x_a.shape[1:] == (3, r, r), x_a.shape
        B = x_a.shape[0]

        y_cam_a = self.layers(x_a)

        heatmaps_a, cs_raw_a = y_cam_a[:, :self.n_objs *
                                       2], y_cam_a[:, self.n_objs * 2:]

        cs_raw_a = cs_raw_a.view(B, self.n_objs, 6, r, r)
        print(heatmaps_a.shape)
        heatmaps_a = F.softmax(heatmaps_a.view(B, self.n_objs * 2, -1),
                               dim=-1).view(B, self.n_objs, 2, r, r)
        print(heatmaps_a.shape)
        coords_a = heatmaps_a * self.coords.view(1, 1, 2, r,
                                                 r)  # B, n_objs, 4, r, r
        print(coords_a.shape)
        coords_a = coords_a.sum(dim=(-1, -2))  # B, n_objs*2, 2

        if self.smooth_cs:
            cs_a = torch.sum(heatmaps_a.detach() * cs_raw_a,
                             dim=(-1, -2))  # B, n_objs, 2
        else:
            # there must be a better way than this crazy indexing?

            # idxs = coords as index in heatmaps (0 to 126)
            idxs = torch.clamp_(
                torch.round_((coords_a + 0.5) * r).long(), 0,
                r - 1)  # B, n_objs, 2  (xy)
            print(coords_a.shape)
            print(idxs.shape)
            print(cs_raw_a.shape)
            print()
            cs_a = cs_raw_a[
                torch.arange(B).view((B, 1, 1)),  # Take from all batches
                torch.arange(self.n_objs).view(1, self.n_objs, 1
                                               ),  # for all objects
                torch.arange(6).view(1, 1, 6),  #  sin and cos
                idxs[..., 1].view(B, self.n_objs, 1),  # at ys
                idxs[..., 0].view(B, self.n_objs, 1),  # and xs from idxs
            ]

        return coords_a, cs_a, heatmaps_a, cs_raw_a
    def visualise_layer_with_hooks(self, img_id):
        self.hook_layer()
        for i in range(0, 35):
            # optimizer.zero_grad()
            # Assign create image to a variable to move forward in the model
            x = self.image
            for index, layer in enumerate(self.model):
                if not os.path.exists('D:/generated/' + 'layer' + str(index)):
                    os.makedirs('D:/generated/' + 'layer' + str(index))
                print('network-architecture-index', index)
                print('network-architecture-layer', layer)
                x = layer(x.cuda())
                if index == self.selected_layer:
                    break

                # for i in range(x.shape[1]):
                # img = x.detach().numpy()[0, i, :, :]
                # if i == self.selected_filter:
                img = x.data.cpu().numpy()[0, i, :, :]
                #######################
                ymax = 255
                ymin = 0
                xmax = img.max()
                xmin = img.min()
                img = torch.round_(
                    torch.Tensor((ymax - ymin) * (img - xmin) / (xmax - xmin) +
                                 ymin))
                #######################

                # a = cv2.cvtColor(img.cpu().numpy(), cv2.COLOR_BGR2GRAY)
                c = img.cpu().numpy().astype(np.uint8)
                b = cv2.equalizeHist(c)
                cv2.imwrite(
                    'D:/generated/' + 'layer' + str(index) + '/' + img_id[-1] +
                    '----' + 'layer' + str(index) + '-filter' + str(i) +
                    '.png', b)
Exemple #9
0
 def forward(ctx, inp):
     return torch.round_(inp)
Exemple #10
0
def plot_boxes_cv2(img,
                   boxes,
                   class_names=None,
                   color=None):  #savename=save 3rd one.
    "plots boxes"
    import cv2
    colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0],
                                [1, 1, 0], [1, 0, 0]])

    def get_color(c, x, max_val):
        ratio = float(x) / max_val * 5
        i = int(math.floor(ratio))
        j = int(math.ceil(ratio))
        ratio = ratio - i
        r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
        return int(r * 255)

    width = img.shape[1]
    height = img.shape[0]

    nameAndDist = []
    for i in range(len(boxes)):
        box = boxes[i]
        x1 = int(torch.round_((box[0] - box[2] / 2.0) * width))
        y1 = int(torch.round_((box[1] - box[3] / 2.0) * height))
        x2 = int(torch.round_((box[0] + box[2] / 2.0) * width))
        y2 = int(torch.round_((box[1] + box[3] / 2.0) * height))

        if color:
            rgb = color
        else:
            rgb = (255, 0, 0)
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red = get_color(2, offset, classes)
            green = get_color(1, offset, classes)
            blue = get_color(0, offset, classes)
            if color is None:
                rgb = (red, green, blue)
            "set True, if want better boxes but low running time. Set False if want worse boxes but good running time. May not change the running time at all, so was set true."
            "change: no boxes needed maybe. no depth. Absolutely not needed"
            # if True:
            #     # CV2 TO PIL #
            #     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #     # cv2 are array, PIL is not array as seen.
            #     img = Image.fromarray(img)
            #     try:
            #         drawtext(img, (x1, y1), class_names[cls_id], bgcolor=rgb, font=ImageFont.truetype("arialbd", 14))
            #     except Exception as e:
            #         pass
            #     # PIL TO CV2 back
            #     img = np.array(img)
            #     # Convert RGB to BGR
            #     img = img[:, :, ::-1].copy()
            # else:
            #     img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_DUPLEX, 0.8, rgb, 2)
            nameAndDist.append(
                [class_names[cls_id], x1, y1, x2,
                 y2])  #investigate, maybe later purposes. no needed rightnow
        # img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 2)
    "returns img and all detected images name and distance"
    return img, nameAndDist  #coordinates can be given, so distances can also be calculated.
Exemple #11
0
 def discretizer(self,tensor):
     q_D = pow(2, Quantizer.discretization_level)
     torch.round_(tensor.mul_(q_D))
     tensor.div_(q_D)
Exemple #12
0
def estimate_lk(
    G,
    k,
    num_estimation=1,
    num_rv=None,
    epsilon=1e-2,
    lmin=None,
    lmax=None,
    return_coherence=True,
    order=30,
    lap_type="comb",
    verbose=False,
):
    r"""
    Estimate the optimal distribution according to which the bandlimited graph signals
    are sampled [3]_ .

    Parameters
    ----------
    G: GraphBase
        The graph
    k: int
        The :obj:`k`-th smallest eigenvalue of graph Laplacian
    num_estimation: int
        The number of times the estimation of :math:`\lambda_{k}` is going to run
    num_rv:    int, None
        The number of random vectors used
    epsilon: float
        The tolerance of binary search to find approximated :math:`\lambda_{k}`
    lmin: float
        The smallest frequency of graph Laplacian
    lmax: float
        The largest frequency of graph Laplacian
    return_coherence: bool
        If :obj:`True`, return the estimated square of graph local cumulative coherence
        [3]_ of all nodes
    order: int
        The order of the Chebyshev approximation
    lap_type: str
        :obj:`comb`, :obj:`sym`, and :obj:`rw` represent combinatorial, symmetric
        normalized, and random-walk normalized Laplacian, separately
    verbose: bool

    Returns
    -------
    lambda_k: float
        The eventual estimated :obj:`k`-th smallest graph frequency
    cum_coh:  Tensor or(None)
        If :obj:`return_coherence` is :obj:`True` , return the estimated graph local
        cumulative coherence [3]_ of every node, otherwise :obj:`None`

    References
    ----------
    .. [3] G. Puy, et al., “Random sampling of bandlimited signals on graphs,”
            Applied and Computational Harmonic Analysis, 2018.
    """
    N = G.size(1)
    appropriate_num_rv = np.int32(2 * np.round(np.log(N)))
    if num_rv is None:
        num_rv = appropriate_num_rv
    elif num_rv < appropriate_num_rv:
        warnings.warn(
            f"Using at least {appropriate_num_rv} random vectors are recommended."
        )
        num_rv = appropriate_num_rv
    else:
        if verbose:
            print(f"Use {num_rv} random vectors to estimate the distribution")

    L = G.L(lap_type)
    if lmin is None:
        lmin = 0.0
    if lmax is None:
        lmax = G.max_frequency(lap_type)

    device = G.device()
    dtype = G.dtype()

    x = None
    coeff = torch.zeros(1, num_rv, order + 1, dtype=dtype, device=device)
    norm_UK = (torch.zeros(num_estimation, N, dtype=dtype, device=device)
               if return_coherence else None)
    estimated_lam_k = np.zeros(num_estimation)
    for i in range(num_estimation):
        sig = torch.randn(N, num_rv, dtype=dtype,
                          device=device) / np.sqrt(num_rv)
        counts = 0
        lambda_min, lambda_max = lmin, lmax
        while counts != k or (lambda_max - lambda_min) / lambda_max > epsilon:
            lambda_mid = (lambda_min + lambda_max) / 2
            coeff[...] = torch.from_numpy(
                cheby_coeff4ideal_band_pass(0.0, lambda_mid, 0.0, lmax, order))
            x = cheby_op(sig, L, coeff, lmax).squeeze_()
            counts = torch.round_(torch.sum(x**2))
            if counts >= k:
                lambda_max = lambda_mid
            else:
                lambda_min = lambda_mid
            if verbose:
                print(f"[estimating lambda_k]counts: {int(counts):8d}, "
                      f"bottom: {lambda_min:.4f}, top: {lambda_max:.4f}")
        estimated_lam_k[i] = (lambda_min + lambda_max) / 2
        if verbose:
            print(f"{i:4d} estimation lambda_k: {estimated_lam_k[i]:8f}, "
                  f"bottom: {lambda_min:.4f}, top: {lambda_max:.4f}")

        if return_coherence:
            norm_UK[i] = (x**2).sum(1)

    lambda_k = np.mean(estimated_lam_k)
    if verbose:
        print(f"Final lambda_k: {lambda_k:.4f}")
    cum_coh = torch.mean(norm_UK, 0) if return_coherence else None
    return lambda_k, cum_coh
def k_bit_quantize_forward(x: torch.Tensor, k: int):
    c = float(1 << k) - 1.
    x = x * c
    torch.round_(x)
    return x / c
Exemple #14
0
 def clamp_and_shift(self, tensor: torch.Tensor) -> torch.Tensor:
     tensor = tensor.clamp(min=self.min_repr, max=self.max_repr)
     return torch.round_(tensor << self.nb_digits).to(
         torch.int16
     )  # to not lose the sign yet, and still apply bitwise func
Exemple #15
0
    def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
        data_dict = next(data_generator)
        data = data_dict['data']
        target = data_dict['target']
        # print('target unique is:',torch.unique(target))

        # print('data type is:',(data.shape))#torch.tensor
        # print('target type is:',(target.shape))#torch.tensor
        """
        data shape is: torch.Size([8, 1, 192, 192, 48])
        target shape is: torch.Size([8, 1, 192, 192, 48])
        data type is: torch.float32
        target type is: torch.float32
        """

        if not isinstance(data, torch.Tensor):
            data = torch.from_numpy(data).float()
        if not isinstance(target, torch.Tensor):
            target = torch.from_numpy(target).float()

        # target_list=target[np.newaxis,:]
        # target_list.append(target)
        # new_shape=[[8, 1, 96, 96, 48],[8, 1, 48, 48, 48],[8, 1, 24, 24, 24],[8, 1, 12, 12, 12]]
        # if self.do_supervision:

        # new_shape = [[4, 1, 48, 48, 48],[4, 1, 12, 12, 12]]
        new_shape=[[96,96,48],[48,48,48],[24,24,24],[12,12,12]]
        data = data.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        target_list=list()
        target_list.append(target)

        for i in range(4):
            target_temp = torch.nn.functional.interpolate(target, size=new_shape[i], mode='trilinear',
                                                          align_corners=True)
            # print('target_temp shape is:',target_temp.shape)
            target_list.append(torch.round_(target_temp))

        # target_cpu_np=target.data.cpu().numpy()
        # del target
        # # target.data.cpu()
        # # print('After cpu conversion, target is cuda?',target.is_cuda)
        # # target.numpy()
        # for i in range(2):
        #     data_cpu=resize_segmentation(target_cpu_np,new_shape[i])
        #     data_tensor=torch.from_numpy(data_cpu).float()
        #     # data_gpu=data_tensor.cuda(non_blocking=True)
        #     # print('data_gpu is cuda?',data_gpu.is_cuda)
        #     #data_gpu is cuda? True
        #     target_list.append(data_tensor.cuda(non_blocking=True))
            # print('target_list %d is cuda?'%i,target_list[i+1].is_cuda)
            # if not isinstance(target_list[i+1], torch.Tensor):
            #     target_list[i+1] = torch.from_numpy(target_list[i]).float()
            # print('target_list %d shape is:'%(i+1),target_list[i+1].shape)


        self.optimizer.zero_grad()

        output = self.network(data)
        # print('output is cuda?',output[0].is_cuda)
        # target_layers=output.clone()
        # for i in range(len(output)):
        #     print('output of %d shape is:'%i, output[i].shape)
        #     new_shape=output[i].shape


        """
        output of 0 shape is: torch.Size([8, 3, 192, 192, 48])
        output of 1 shape is: torch.Size([8, 3, 96, 96, 48])
        output of 2 shape is: torch.Size([8, 3, 48, 48, 48])
        output of 3 shape is: torch.Size([8, 3, 24, 24, 24])
        output of 4 shape is: torch.Size([8, 3, 12, 12, 12])
        """
        # print('output shape is:',output.shape)
        # output shape is: [8,3,192,192,48] when batch size is 8 and labels are [0,1,2]
        l = self.loss(output, target_list)
        # print('loss shape is:',l.shape)
        # print('run_online_evaluation is:',run_online_evaluation)
        # print('output[0] shape is:',output[0].shape)
        # print('target shape is:',target.shape)
        # print('target unique is:',torch.unique(target))
        # print('target[1] unique is:', torch.unique(target_list[1]))
        # print('target[2] unique is:', torch.unique(target_list[2]))
        # print('target[3] unique is:', torch.unique(target_list[3]))
        """
        run_online_evaluation is: False
        output[0] shape is: torch.Size([4, 3, 192, 192, 48])
        target shape is: torch.Size([4, 1, 192, 192, 48])
        target unique is: tensor([0., 1., 2.], device='cuda:2')
        target[1] unique is: tensor([0., 1., 2.], device='cuda:2')
        target[2] unique is: tensor([0., 1., 2.], device='cuda:2')
        target[3] unique is: tensor([0., 1., 2.], device='cuda:2')
        """
        if run_online_evaluation:
            self.run_online_evaluation(output[0], target)# if do_supervision, we should set output[0], otherwise we should use output directly.

        if do_backprop:
            if not self.fp16 or amp is None:
                l.backward()
            else:
                with amp.scale_loss(l, self.optimizer) as scaled_loss:
                    scaled_loss.backward()
            self.optimizer.step()

        return l