Exemple #1
0
    def test_local_tensor_multi_var_methods(self):
        x = torch.FloatTensor([[1, 2], [2, 3], [5, 6]])
        t, s = torch.max(x, 1)
        assert (t == torch.FloatTensor([2, 3, 6])).float().sum() == 3
        assert (s == torch.LongTensor([1, 1, 1])).float().sum() == 3

        x = torch.FloatTensor([[0, 0], [1, 1]])
        y, z = torch.eig(x, True)
        assert (y == torch.FloatTensor([[1, 0], [0, 0]])).all()
        assert (torch.equal(z == torch.FloatTensor([[0, 0], [1, 0]]), torch.ByteTensor([[1, 0], [1, 0]])))

        x = torch.FloatTensor([[0, 0], [1, 0]])
        y, z = torch.qr(x)
        assert (y == torch.FloatTensor([[0, -1], [-1, 0]])).all()
        assert (z == torch.FloatTensor([[-1, 0], [0, 0]])).all()

        x = torch.arange(1, 6)
        y, z = torch.kthvalue(x, 4)
        assert (y == torch.FloatTensor([4])).all()
        assert (z == torch.LongTensor([3])).all()

        x = torch.zeros(3, 3)
        w, y, z = torch.svd(x)
        assert (w == torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])).all()
        assert (y == torch.FloatTensor([0, 0, 0])).all()
        assert (z == torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])).all()
Exemple #2
0
    def test_remote_tensor_multi_var_methods(self):
        hook = TorchHook(verbose=False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 1)
        local.add_worker(remote)

        x = torch.FloatTensor([[1, 2], [4, 3], [5, 6]])
        x.send(remote)
        y, z = torch.max(x, 1)
        assert torch.equal(y.get(), torch.FloatTensor([2, 4, 6]))
        assert torch.equal(z.get(), torch.LongTensor([1, 0, 1]))

        x = torch.FloatTensor([[0, 0], [1, 0]]).send(remote)
        y, z = torch.qr(x)
        assert (y.get() == torch.FloatTensor([[0, -1], [-1, 0]])).all()
        assert (z.get() == torch.FloatTensor([[-1, 0], [0, 0]])).all()

        x = torch.arange(1, 6).send(remote)
        y, z = torch.kthvalue(x, 4)
        assert (y.get() == torch.FloatTensor([4])).all()
        assert (z.get() == torch.LongTensor([3])).all()

        x = torch.FloatTensor([[0, 0], [1, 1]]).send(remote)
        y, z = torch.eig(x, True)
        assert (y.get() == torch.FloatTensor([[1, 0], [0, 0]])).all()
        assert ((z.get() == torch.FloatTensor([[0, 0], [1, 0]])) == torch.ByteTensor([[1, 0], [1, 0]])).all()

        x = torch.zeros(3, 3).send(remote)
        w, y, z = torch.svd(x)
        assert (w.get() == torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])).all()
        assert (y.get() == torch.FloatTensor([0, 0, 0])).all()
        assert (z.get() == torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])).all()
Exemple #3
0
    def test_torch_function_with_multiple_output_on_local_var(self):
        x = Var(torch.FloatTensor([[1, 2], [2, 3], [5, 6]]))
        t, s = torch.max(x, 1)
        assert (t == Var(torch.FloatTensor([2, 3, 6]))).all()
        assert (s == Var(torch.LongTensor([1, 1, 1]))).all()

        x = Var(torch.FloatTensor([[0, 0], [0, 0]]))
        y, z = torch.eig(x, True)
        assert (y == Var(torch.FloatTensor([[0, 0], [0, 0]]))).all()
        assert (z == Var(torch.FloatTensor([[1, 0.], [0, 1]]))).all()


        x = Var(torch.FloatTensor([[0, 0], [1, 0]]))
        y, z = torch.qr(x)
        assert (y == Var(torch.FloatTensor([[0, -1], [-1, 0]]))).all()
        assert (z == Var(torch.FloatTensor([[-1, 0], [0, 0]]))).all()

        x = Var(torch.arange(1, 6))
        y, z = torch.kthvalue(x, 4)
        assert (y == Var(torch.FloatTensor([4]))).all()
        assert (z == Var(torch.LongTensor([3]))).all()

        x = Var(torch.zeros(3, 3))
        w, y, z = torch.svd(x)
        assert (w == Var(torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))).all()
        assert (y == Var(torch.FloatTensor([0, 0, 0]))).all()
        assert (z == Var(torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))).all()
Exemple #4
0
    def test_torch_function_with_multiple_output_on_remote_var(self):
        hook = TorchHook(verbose=False)
        me = hook.local_worker
        remote = VirtualWorker(id=2, hook=hook)
        me.add_worker(remote)

        x = Var(torch.FloatTensor([[1, 2], [4, 3], [5, 6]]))
        x.send(remote)
        y, z = torch.max(x, 1)
        y.get()
        assert torch.equal(y, Var(torch.FloatTensor([2, 4, 6])))

        x = Var(torch.FloatTensor([[0, 0], [1, 0]])).send(remote)
        y, z = torch.qr(x)
        assert (y.get() == Var(torch.FloatTensor([[0, -1], [-1, 0]]))).all()
        assert (z.get() == Var(torch.FloatTensor([[-1, 0], [0, 0]]))).all()

        x = Var(torch.arange(1, 6)).send(remote)
        y, z = torch.kthvalue(x, 4)
        assert (y.get() == Var(torch.FloatTensor([4]))).all()
        assert (z.get() == Var(torch.LongTensor([3]))).all()

        x = Var(torch.FloatTensor([[0, 0], [0, 0]]))
        x.send(remote)
        y, z = torch.eig(x, True)
        assert (y.get() == Var(torch.FloatTensor([[0, 0], [0, 0]]))).all()
        assert (z.get() == Var(torch.FloatTensor([[1, 0.], [0, 1]]))).all()


        x = Var(torch.zeros(3, 3)).send(remote)
        w, y, z = torch.svd(x)
        assert (w.get() == Var(torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))).all()
        assert (y.get() == Var(torch.FloatTensor([0, 0, 0]))).all()
        assert (z.get() == Var(torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))).all()
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []
            # skip the background
            for j in range(1, self.num_classes):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class = boxlist_nms(boxlist_for_class,
                                                self.nms_thresh,
                                                score_field="scores")
                num_labels = len(boxlist_for_class)
                boxlist_for_class.add_field(
                    "labels",
                    torch.full((num_labels, ),
                               j,
                               dtype=torch.int64,
                               device=scores.device))
                result.append(boxlist_for_class)

            result = cat_boxlist(result)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
Exemple #6
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class,
                                            self.nms,
                                            score_field="scores")
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - self.detections_per_img + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
    def _initialize_prune_threshold(self):
        """Initialize prune threshold h"""
        weighted_mean = 0
        total_params_count = 0
        # initialize h and total_params
        with torch.no_grad():
            for m in list(self.network.modules()):
                if isinstance(m, DynamicSparseBase):
                    # count how many weights are not equal to 0
                    count_p = torch.sum(m.weight != 0).item()
                    # get topk for that level, and weight by num of values
                    non_zero = torch.abs(m.weight[m.weight != 0]).view(-1)
                    val, _ = torch.kthvalue(non_zero, int(len(non_zero) * self.on_perc))
                    weighted_mean += count_p * val.item()
                    total_params_count += count_p

        # get initial value for h based on enforced sparsity
        self.h = weighted_mean / total_params_count
        print(self.h)
Exemple #8
0
    def _global_mask(self, sparsity):
        r"""Updates masks of model with scores by sparsity level globally.
        """
        # # Set score for masked parameters to -inf
        # for mask, param in self.masked_parameters:
        #     score = self.scores[id(param)]
        #     score[mask == 0.0] = -np.inf

        # Threshold scores
        global_scores = torch.cat(
            [torch.flatten(v) for v in self.scores.values()])
        k = int((1.0 - sparsity) * global_scores.numel())
        if not k < 1:
            threshold, _ = torch.kthvalue(global_scores, k)
            for mask, param in self.masked_parameters:
                score = self.scores[id(param)]
                zero = torch.tensor([0.]).to(mask.device)
                one = torch.tensor([1.]).to(mask.device)
                mask.copy_(torch.where(score <= threshold, zero, one))
def _load_class_freq(cfg):
    freq_weight = None
    if cfg.MODEL.ROI_BOX_HEAD.USE_EQL_LOSS or cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS:
        cat_info = json.load(open(cfg.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH, 'r'))
        cat_info = torch.tensor([
            c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])
        ],
                                device=torch.device(cfg.MODEL.DEVICE))
        if cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS and \
            cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT > 0.:
            freq_weight = \
                cat_info.float() ** cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT
        else:
            thresh, _ = torch.kthvalue(
                cat_info,
                len(cat_info) - cfg.MODEL.ROI_BOX_HEAD.EQL_FREQ_CAT + 1)
            freq_weight = (cat_info < thresh.item()).float()

    return freq_weight
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            # multiclass nms
            result = boxlist_ml_nms(boxlists[i], self.nms_thresh)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
Exemple #11
0
 def forward(self):
     a = torch.tensor(0)
     b = torch.tensor(1)
     return len(
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.eq(a, 1),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.ge(a, 1),
         torch.greater_equal(a, b),
         torch.greater_equal(a, 1),
         torch.gt(a, b),
         torch.gt(a, 1),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.le(a, 1),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.lt(a, 1),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.ne(a, 1),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
Exemple #12
0
def _pvalue(data: torch.Tensor, ratio: float = 0.25, **kwargs) -> torch.Tensor:
    """
    Finds the pth largest value in the tensor, where p = ratio x len(data).

    Parameters
    ----------
    data: torch.Tensor
        Pytorch tensor against which the function is evaluated.

    ratio: float, optional
        Value of the scaling factor in the value calculated by
        the function.

    Returns
    -------
    torch.Tensor
        Tensor of dimension (1,) with the result of the function.
    """
    cut = max(1, int(data.numel() * (1 - ratio)))
    return torch.kthvalue(data, cut)[0].item()
Exemple #13
0
    def step(self, x, g):
        """
        """
        grad_view = g.view(g.shape[0], -1)
        abs_grad = ch.abs(grad_view)
        sign = ch.sign(grad_view)

        q_range = self.kwargs['percentile_range']
        q = q_range[0] + ch.rand(1)[0] * (q_range[1] - q_range[0])
        k = int(q * abs_grad.shape[1])

        percentile_value, _ = ch.kthvalue(abs_grad, k, keepdim=True)
        percentile_value = percentile_value.repeat(1, grad_view.shape[1])
        tied_for_max = ch.ge(abs_grad, percentile_value).int().float()
        num_ties = ch.sum(tied_for_max, dim=1, keepdim=True)

        e = (sign * tied_for_max) / num_ties
        e = e.view(g.shape)

        return x + e * self.step_size
Exemple #14
0
    def get_prune_idx(self, i_node, pruning_ratio=0.0):
        weights = i_node['layer'].weight.clone()

        if pruning_ratio <= 0: return []
        n = len(weights)

        #lN_norm = torch.norm( weights.view(n, -1), p=self.p, dim=1 )
        if self.p == 1:
            lN_norm = torch.sum(torch.abs(weights), dim=(1, 2, 3))
        else:
            lN_norm = torch.sum(torch.sqrt(weights**2), dim=(1, 2, 3))

        n_to_prune = int(pruning_ratio * n)
        if n_to_prune == 0:
            return []
        threshold = torch.kthvalue(lN_norm, k=n_to_prune).values

        indices = torch.nonzero(lN_norm <= threshold).view(-1).tolist()

        return indices
def _calc_thresh(data: torch.Tensor,
                 method: str = 'none',
                 current_max: float = -1,
                 factor: float = -1,
                 percentile: float = .875) -> float:
    """
    Calculates the clipping threshold by looking at the layer norms
    of each example. Three methods are supported: static threshold,
    threshold calculated based on mean and variance of the norms, and
    threshold calculated based on percentile values of the norms.
    """
    method = method.lower()
    if method == 'none':
        return current_max
    elif method == 'mean_var':
        return max(data.min().item(),
                   data.mean().item() + factor * data.std().item() + 1e-8)
    elif method == 'pvalue':
        cut = max(1, int(data.numel() * (1 - percentile)))
        return torch.kthvalue(data, cut)[0].item()
Exemple #16
0
    def on_batch_end(self, state):
        if not state.get('visdom_will_log', False):
            return

        x = state['batch_gpu'][0]
        grad_img = x.grad.abs().sum(1, keepdim=True)
        b, c, h, w = grad_img.shape
        gi_flat = grad_img.view(b, c, -1)
        cl = torch.kthvalue(gi_flat, int(grad_img[0].numel() * 0.99),
                            dim=-1)[0]
        grad_img = torch.min(grad_img, cl.unsqueeze(-1).unsqueeze(-1))
        m = gi_flat.min(dim=-1).values.unsqueeze(-1).unsqueeze(-1)
        M = gi_flat.max(dim=-1).values.unsqueeze(-1).unsqueeze(-1)
        grad_img = (grad_img - m) / (M - m)
        x = x.detach()
        xm = x.min()
        xM = x.max()
        x = (x - xm) / (xM - xm)
        img = x * grad_img + 0.5 * (1 - grad_img)
        state['metrics']['feature_vis'] = img
Exemple #17
0
    def _get_seg_array(self, predicted, windows, img):
        _, h, w = img.shape
        n_predictions, _, _ = predicted.shape
        seg = torch.full((h, w), self.N_CLASSES).float()
        pred_stack = torch.full((n_predictions, h, w),
                                self.N_CLASSES).float().cuda()
        for i, window in enumerate(windows):
            indice = (slice(i,
                            i + 1), window.indices()[1], window.indices()[2])
            pred_stack[indice] = predicted[i, :, :]

        if n_predictions > 1:
            # If only a single prediction is made for a pixel, take that
            # prediction
            pred_stack = pred_stack.cpu()
            twothvalue, _ = torch.kthvalue(pred_stack, 2, dim=0)
            # If the 2th smallest value is self.N_CLASSES, then only a single
            # prediction was made for that pixel
            # So we take the single prediction for that pixel
            single_predicted, _ = torch.min(pred_stack, dim=0)
            seg[twothvalue == self.N_CLASSES] = single_predicted[
                twothvalue == self.N_CLASSES]
            pred_stack = pred_stack.numpy()
            seg_array = seg.numpy()
            twothvalue = twothvalue.numpy()
            # For the rest pixels, i.e. those pixels with more than one
            #  prediction, take the majority vote among those predictions
            pred_stack[pred_stack == self.N_CLASSES] = np.nan
            # torch.mode() not working
            majority_vote, _ = mode(pred_stack, axis=0, nan_policy="omit")
            majority_vote = np.squeeze(majority_vote, axis=0)
            seg_array[twothvalue != self.N_CLASSES] = majority_vote[
                twothvalue != self.N_CLASSES]
        else:
            # All predictions are single predictions
            seg = torch.squeeze(pred_stack, dim=0)
            seg_array = seg.cpu().numpy()

        seg_array = seg_array.astype(np.uint8)

        return seg_array
    def to_quantiles(self, y_pred: torch.Tensor, quantiles: List[float] = None) -> torch.Tensor:
        """
        Convert network prediction into a quantile prediction.

        Args:
            y_pred: prediction output of network (with ``output_transformation = None``)
            quantiles (List[float], optional): quantiles for probability range. Defaults to quantiles as
                as defined in the class initialization.

        Returns:
            torch.Tensor: prediction quantiles (last dimension)
        """
        if quantiles is None:
            quantiles = self.quantiles

        samples = y_pred.size(-1)
        quantiles = torch.stack(
            [torch.kthvalue(y_pred, int(samples * q), dim=-1)[0] if samples > 1 else y_pred[..., 0] for q in quantiles],
            dim=-1,
        )
        return quantiles
Exemple #19
0
    def select_over_all_levels(self, instances, image_sizes):
        results = []
        for instance in instances:
            # multiclass nms
            keep = batched_nms(instance.proposal_boxes.tensor, instance.objectness_logits, instance.labels.float(), self.nms_thresh)
            instance = instance[keep]
            cls_scores = instance.objectness_logits
            number_of_detections = len(cls_scores)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1
                )
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                instance = instance[keep]
            instance.remove("labels")
            results.append(instance)
        return results
Exemple #20
0
    def forward(self,
                emb_support,
                labels_support,
                train_way,
                train_shot,
                prune_ratio=0.0):
        n_episode, n_support, d = emb_support.size()
        # Train the SVM head
        logit_support, wnorm = self.cls_head(emb_support, emb_support,
                                             labels_support, train_way,
                                             train_shot)

        # Compute the gradient of `wnorm` w.r.t. `emb_support`
        wgrad = computeGradientPenalty(wnorm, emb_support)
        # wgrad -> (tasks_per_batch, n_support, d)
        wgrad_abs = wgrad.abs()
        # Normalize gradient
        with torch.no_grad():
            # Prune the gradient according to the magnitude
            if prune_ratio > 0:
                assert prune_ratio < 1.0
                num_pruned = int(d * prune_ratio)
                threshold = torch.kthvalue(wgrad_abs,
                                           k=num_pruned,
                                           dim=-1,
                                           keepdim=True)[0].detach()
                wgrad_abs[wgrad_abs <= threshold] = 0.0
            wgrad_abs_sum = torch.sum(wgrad_abs, dim=(1, 2), keepdim=True)
        G = wgrad_abs / wgrad_abs_sum * d
        # print(labels_support)
        # print(G)
        # np.save('./emb_support.npy', emb_support.detach().cpu().numpy())
        # np.save('./G.npy', G.detach().cpu().numpy())
        # np.save('./G_labels.npy', labels_support.detach().cpu().numpy())

        # Compute task features
        emb_task = (emb_support * G).sum(dim=1, keepdim=True)
        # emb_task -> (tasks_per_batch, 1, d)

        return emb_task, wgrad
def prune_weight(args, param, device, percent):
    # to work with admm, we calculate percentile based on all elements instead of nonzero elements.
    weight = param.detach()
    if args.struct:
        mask = torch.zeros_like(weight, dtype=torch.bool).to(device)
        rram = weight.view(weight.shape[0], -1)
        rram_mask = mask.view(mask.shape[0], -1)
        tmp = torch.zeros(((rram.shape[0] - 1) // args.ou_w + 1,
                           (rram.shape[1] - 1) // args.ou_h + 1)).to(device)
        norm_cuda.norm(rram, tmp, args.ou_w, args.ou_h)
        #for i in range(tmp.shape[0]):
        #    for j in range(tmp.shape[1]):
        #        tmp[i, j] = rram[i * args.ou_h : (i + 1) * args.ou_h, j * args.ou_w : (j + 1) * args.ou_w].norm()
        pcen, _ = tmp.view(-1).kthvalue(
            round(percent * tmp.shape[0] * tmp.shape[1]))
        upon_threshold = tmp >= pcen
        res1 = rram.shape[0] % args.ou_w
        res2 = rram.shape[1] % args.ou_h
        for i in range(args.ou_w):
            for j in range(args.ou_h):
                if i < res1 or res1 == 0:
                    rram_mask.data[
                        i::args.ou_w, j::args.
                        ou_h] = upon_threshold if j < res2 or res2 == 0 else upon_threshold[:, :
                                                                                            -1]
                else:
                    rram_mask.data[
                        i::args.ou_w, j::args.
                        ou_h] = upon_threshold[:
                                               -1, :] if j < res2 or res2 == 0 else upon_threshold[:
                                                                                                   -1, :
                                                                                                   -1]
        #under_threshold = scale(tmp < pcen, rram_proj.shape, args.ou_h, args.ou_w)
        #rram_proj.data[under_threshold] = 0
    else:
        pcen, _ = torch.kthvalue(abs(weight.view(-1)),
                                 round(percent * weight.view(-1).shape[0]))
        mask = (abs(weight) >= pcen).to(device)

    return mask
def select_over_all_levels(boxlist):
    num_images = len(boxlist)
    results = []

    for i in range(num_images):
        scores = boxlist[i].get_field("scores")
        labels = boxlist[i].get_field("labels")
        boxes = boxlist[i].bbox
        boxlist = boxlist[i]
        result = []

        for j in range(1, CLASS):
            inds = (labels == j).nonzero().view(-1)
            scores_j = scores[inds]
            boxes_j = boxes[inds, :].view(-1, 4)
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class,
                                            NMS_THRESH,
                                            score_field="scores")
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ),
                           j,
                           dtype=torch.int64,
                           device=scores.device))
            result.append(boxlist_for_class)
        result = cat_boxlist(result)
        number_of_detections = len(result)

        if number_of_detections > FPN_POS_NMS_TOP_K > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - FPN_POS_NMS_TOP_K + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        results.append(result)
        return results
Exemple #23
0
def get_global_score_threshold(model, prune_rate):
    all_scores = None
    if prune_rate == 0:
        # YHT modification, since I delete abs, 0 no longer make sense
        return -10000
    # YHT modification
    for n, m in model.named_modules():
        if hasattr(m, "scores") and m.prune_rate != 0:
            shape = m.scores.shape
            if all_scores is None:
                all_scores = tensor([]).to(m.scores.device)
            if parser_args.rank_method == "absolute":
                if parser_args.pmode == "normal":
                    all_scores = torch.cat(
                        [all_scores, m.scores.abs().flatten()])
                elif parser_args.pmode == "channel":
                    channel_size = shape[1] * shape[2] * shape[3]
                    all_scores = torch.cat([
                        all_scores,
                        m.scores.abs().sum((1, 2, 3)).flatten() / channel_size
                    ])
            elif parser_args.rank_method == "relevant":
                assert parser_args.pmode == "channel", "only channel pmode could use relevant method!"
                channel_size = shape[1] * shape[2] * shape[3]
                # noabs / abs of init
                if parser_args.whether_abs == "abs":
                    attach = torch.div(m.scores.abs().sum((1, 2, 3)).flatten(),
                                       m.sumofabsofinit.cuda())
                else:
                    attach = torch.div(
                        m.scores.sum((1, 2, 3)).flatten(),
                        m.sumofabsofinit.cuda())
                all_scores = torch.cat([all_scores, attach])
            else:
                print(
                    "wrong rank_method! Only absolute and relevant is supported."
                )
                raise
    return torch.kthvalue(all_scores,
                          int(prune_rate * all_scores.numel())).values.item()
Exemple #24
0
def batchwise_recall(lang_output, visn_output, lang_mask, recalls=(1, )):
    """
    Calculate the accuracy of contextual word retrieval, average by batch.
    :param lang_output: [batch_size, max_len, hid_dim]
    :param visn_output: [batch_size, hid_dim]
    :param lang_mask: Int Tensor [batch_size, max_len], 1 for tokens, 0 for paddings.
    :param recall: a list, which are the number of recalls to be evaluated.
    :return:
    """
    batch_size, lang_len, dim = lang_output.shape
    assert batch_size % 2 == 0 and batch_size == visn_output.shape[0]

    # Expand the visn_output to match each word
    visn_output = visn_output.unsqueeze(1)  # [b, 1, dim]

    # The score of positive pairs
    positive_score = (lang_output * visn_output).sum(-1)  # [b, max_len]

    # The score of negative pairs. Note that the diagonal is actually the positive score,
    # but it would be zero-graded in calculating the loss below.
    negative_scores = (lang_output.reshape(batch_size, 1, lang_len, dim) *
                       visn_output.reshape(1, batch_size, 1, dim)).sum(
                           -1)  # [b(lang), b(visn), max_len]
    # negative_scores = torch.einsum('ikd,jd->ijk', lang_output, visn_output)

    result = {}
    for recall in recalls:
        kthscore, kthidx = torch.kthvalue(negative_scores,
                                          batch_size - recall,
                                          dim=1)  # [b, max_len]
        # print(kthscore.shape) print(positive_score.shape)
        correct = (positive_score >= kthscore)  # [b, max_len]
        bool_lang_mask = lang_mask.type(correct.dtype)
        correct = correct * bool_lang_mask
        correct_num = correct.sum()
        # print(correct_num)
        # print(bool_lang_mask.sum())
        result[recall] = (correct_num * 1. / bool_lang_mask.sum()).item()

    return result
Exemple #25
0
    def _get_inverse_hebbian_mask(self, weight, corr, active_synapses, prune_perc):

        num_synapses = np.prod(weight.shape)
        total_active = torch.sum(active_synapses).item()

        corr_active = corr[active_synapses]
        # decide which weights to remove based on correlation
        kth = int((1 - prune_perc) * total_active)
        # if kth = 0, keep all the synapses
        if kth == 0:
            hebbian_mask = torch.zeros(weight.shape).bool()
        # else if kth greater than shape, remove all synapses
        elif kth >= num_synapses:
            hebbian_mask = active_synapses
        # if no edge cases
        else:
            keep_threshold, _ = torch.kthvalue(corr_active, kth)
            # keep mask are ones above threshold and currently active
            hebbian_mask = (corr <= keep_threshold) & active_synapses
        hebbian_mask = hebbian_mask.to(self.device)

        return hebbian_mask
    def compute_pattern(self, weights):
        """Computes the updated residual pattern given weights.
        args:
            weights: np.array with shape (N, m, m)
        """
        N = weights.shape[0]

        # element wise product for pattern scaling
        pattern = self.default_pattern * weights

        # just return thresholded values
        if self.threshold:
            return pattern.round()

        # find top k scoring elements
        flattened_weights = pattern.reshape(N, self.m * self.m)
        kth_elem = torch.kthvalue(flattened_weights,
                                  self.m * self.m - self.max_points)
        for i in range(N):
            weights[i] = torch.gt(weights[i], kth_elem[i])

        return weights.float()
Exemple #27
0
def merge_result_from_multi_scales(boxlists, nms_type='nms', vote_thresh=0.65):
    num_images = len(boxlists)
    results = []
    for i in range(num_images):
        scores = boxlists[i].get_field("scores")
        labels = boxlists[i].get_field("labels")
        boxes = boxlists[i].bbox
        boxlist = boxlists[i]
        result = []
        # skip the background
        for j in range(1, cfg.MODEL.RETINANET.NUM_CLASSES):
            inds = (labels == j).nonzero().view(-1)

            scores_j = scores[inds]
            boxes_j = boxes[inds, :].view(-1, 4)
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class, cfg.MODEL.FCOS.NMS_TH, score_field="scores",
                                            nms_type=nms_type, vote_thresh=vote_thresh)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field("labels", torch.full((num_labels,), j, dtype=torch.int64, device=scores.device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > cfg.MODEL.FCOS.PRE_NMS_TOP_N > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - cfg.MODEL.FCOS.PRE_NMS_TOP_N + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        results.append(result)
    return results
    def navigation(self, current_state):
        # generate sensor data
        array_laser = utils.remapping_laser_data(current_state.laserScan)
        sensor = Variable(
            torch.FloatTensor(np.reshape(array_laser, (1, self.sensor_dim))))

        # generate target data
        target_polar = utils.target_transform(current_state)
        target = Variable(
            torch.FloatTensor(np.reshape(target_polar, (1, self.target_dim))))

        # generate action
        target_driven_action = self.differential_driver.run(
            x=current_state.desired_x, y=current_state.desired_y)
        collision_avoidance_action = self.actor_ca(
            sensor=sensor, target=target).cpu().data.numpy()
        predict_state = self.evaluation_net.predict_state(
            array_laser.reshape(1, -1))

        # genrate action based on hmm_state
        # the less the state is, the dangerous the situation is
        # final_action = target_driven_action[0] + (self.hmm_state-predict_state)/float(self.hmm_state)*collision_avoidance_action[0]

        # Collision avoidance ratio
        ratio = min(float(torch.kthvalue(sensor, 1)[0]) / (-3.5), 1)
        #        print ratio
        final_action = []
        for i in range(2):
            final_action.append((1.0 - ratio) * target_driven_action[0][i] +
                                ratio * collision_avoidance_action[0][i])
#        print final_action

# constrain the action
        final_action[0] = utils.constrain_actions(final_action[0], 1)
        final_action[1] = utils.constrain_actions(final_action[1], 1)
        #        print final_action

        return final_action[0], final_action[1]
Exemple #29
0
        def cal_metrics(the_test, the_orig, prefix='gn'):
            the_as = torch.cat([o['anomaly_score'] for o in the_test])
            orig_as = torch.cat([o['anomaly_score'] for o in the_orig])

            # 95% TPR: k is the kth-smallest element
            # I want 95% of examples to below this number
            thresh = torch.kthvalue(orig_as,
                                    k=int(np.floor(0.95 *
                                                   len(orig_as)))).values
            fpr = (the_as <= thresh).float().mean().item()
            tqdm_dict[f'{prefix}_ood_fpr'] = fpr

            cat_as = torch.cat([the_as, orig_as], dim=0)
            ys = torch.cat(
                [torch.ones(len(the_as)),
                 torch.zeros(len(orig_as))], dim=0)

            tqdm_dict[f'{prefix}_ood_auc'] = roc_auc_score(
                ys.cpu().numpy(),
                cat_as.cpu().numpy())
            tqdm_dict[f'{prefix}_ood_aupr'] = average_precision_score(
                ys.cpu().numpy(),
                cat_as.cpu().numpy())
Exemple #30
0
    def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
                            img_metas):
        num_imgs = len(img_metas)
        rois = bbox2roi([res.bboxes for res in sampling_results])
        bbox_results = self._bbox_forward(x, rois)

        bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
                                                  gt_labels, self.train_cfg)
        # record the `beta_topk`-th smallest target
        # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
        # and bbox_weights, respectively
        pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
        num_pos = len(pos_inds)
        cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
        beta_topk = min(self.beta_topk * num_imgs, num_pos)
        cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
        self.beta_history.append(cur_target)
        loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
                                        bbox_results['bbox_pred'], rois,
                                        *bbox_targets)

        bbox_results.update(loss_bbox=loss_bbox)
        return bbox_results
Exemple #31
0
    def select_over_scales(self, boxlists):
        results = []

        for boxlist in boxlists:
            scores = boxlist.fields['scores']
            labels = boxlist.fields['labels']
            box = boxlist.box

            result = []

            for j in range(1, self.n_class):
                id = (labels == j).nonzero().view(-1)
                score_j = scores[id]
                box_j = box[id, :].view(-1, 4)
                box_by_class = BoxList(box_j, boxlist.size, mode='xyxy')
                box_by_class.fields['scores'] = score_j
                box_by_class = boxlist_nms(box_by_class, score_j,
                                           self.nms_threshold)
                n_label = len(box_by_class)
                box_by_class.fields['labels'] = torch.full(
                    (n_label, ), j, dtype=torch.int64, device=scores.device)
                result.append(box_by_class)

            result = cat_boxlist(result)
            n_detection = len(result)

            if n_detection > self.post_top_n > 0:
                scores = result.fields['scores']
                img_threshold, _ = torch.kthvalue(
                    scores.cpu(), n_detection - self.post_top_n + 1)
                keep = scores >= img_threshold.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]

            results.append(result)

        return results
Exemple #32
0
    def filter_result(self, bbox_res, proposal, heat_scores):
        cls_scores = proposal.get_field("scores")
        cls_weight, heat_weight = self.score_weights
        scores = cls_weight * cls_scores + heat_weight * heat_scores
        device = scores.device
        labels = proposal.get_field("labels")
        result = []
        number_of_detections = 0
        for j in range(1, self.num_classes):
            inds = (labels == j).nonzero().view(-1)
            boxes_j = bbox_res[inds, :]
            if boxes_j.size()[0] == 0:
                continue
            scores_j = scores[inds]
            quadboxes_for_class = QuadBoxes(boxes_j, proposal.size)
            quadboxes_for_class.add_field("scores", scores_j)
            quadboxes_for_class = quadboxes_nms(quadboxes_for_class,
                                                self.nms_th)
            num_labels = len(quadboxes_for_class)
            number_of_detections += num_labels
            quadboxes_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(quadboxes_for_class)

        result = cat_quadboxes(result)

        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - self.detections_per_img + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
Exemple #33
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            # multiclass nms
            if self.nms_type == 'nms' or self.nms_type == 'default':
                result = ml_nms(boxlists[i], self.nms_thresh)
            elif self.nms_type == 'diou_nms':
                result = diou_nms(boxlists[i], self.nms_thresh, beta1=0.9)
            else:
                raise Exception("Not implement nms type!!")
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.scores
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
Exemple #34
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            # multiclass nms
            bbox, scores = boxlists[i][:, :4], boxlists[i][:, 4]
            result = nms(bbox, scores, self.nms_thresh)
            result_score = scores[result]
            result_bbox = bbox[result]
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result_score
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1
                )
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result_bbox = result_bbox[keep]
                result_score = result_score[keep]
            results.append(torch.cat([result_bbox, result_score], 1))
        return results