Exemple #1
0
    def loss_fn(self, feature, targets):
        # feature: [nb, feat_depth, ng, ng]
        # targets: [nb, no, 4(x/y/w/h)]

        proposals = self(feature)
        gt = targets.to(self.device)
        p = proposals.to(self.device)
        nb, na, ng, no = p.size(0), p.size(1), p.size(2), gt.size(1)
        anchors = anchor_bbox(self.anchor,
                              ng)[None, :, :, :, :].repeat(nb, 1, 1, 1,
                                                           1).to(self.device)
        anchors_obj = anchors.view(nb, na, ng, ng, 1,
                                   4).repeat(1, 1, 1, 1, no, 1)
        gt_roi = gt.view(nb, 1, 1, 1, no, 4).repeat(1, na, ng, ng, 1, 1)
        iou_obj = bbox_iou(anchors_obj, gt_roi)
        iou_idx = torch.argmax(iou_obj, dim=-1, keepdim=True)
        iou_idx_t = iou_idx.view(nb, na, ng, ng, 1, 1).repeat(1, 1, 1, 1, 1, 4)
        gt_active = torch.gather(gt_roi, -2, iou_idx_t).view(nb, na, ng, ng, 4)
        iou_active = torch.gather(iou_obj, -1, iou_idx).view(nb, na, ng, ng)
        valid = valid_bbox(anchors)
        mask_obj = torch.bitwise_and(iou_active > 0.5, valid)
        mask_no_obj = torch.bitwise_and(
            torch.bitwise_and(iou_active > 0, iou_active < 0.1), valid)
        loss_reg = torch.sum((p[..., 0:4] - gt_active)**2, dim=-1)
        loss_obj = (p[..., 4] - 1)**2
        loss_no_obj = (p[..., 4] - 0)**2
        loss_1 = torch.sum(loss_reg * mask_obj)
        loss_2 = torch.sum(loss_obj * mask_obj)
        loss_3 = torch.sum(loss_no_obj * mask_no_obj)
        loss = loss_1 + loss_2 + loss_3
        return loss
Exemple #2
0
def valid_bbox(bbox):
    x, y, w, h = bbox[..., 0], bbox[..., 1], bbox[..., 2], bbox[..., 3]
    x1, x2, y1, y2 = x - w / 2, x + w / 2, y - h / 2, y + h / 2
    valid_x = torch.bitwise_and(torch.bitwise_and(0 <= x1, x1 <= x2), x2 <= 1)
    valid_y = torch.bitwise_and(torch.bitwise_and(0 <= y1, y1 <= y2), y2 <= 1)
    valid = torch.bitwise_and(valid_x, valid_y)
    return valid
Exemple #3
0
def rgb2hsl_torch(rgb: torch.Tensor) -> torch.Tensor:
    cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)
    cmin = torch.min(rgb, dim=1, keepdim=True)[0]
    delta = cmax - cmin
    hsl_h = torch.empty_like(rgb[:, 0:1, :, :])
    cmax_idx[delta == 0] = 3
    hsl_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) %
                            6)[cmax_idx == 0]
    hsl_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) +
                            2)[cmax_idx == 1]
    hsl_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) +
                            4)[cmax_idx == 2]
    hsl_h[cmax_idx == 3] = 0.
    hsl_h /= 6.

    hsl_l = (cmax + cmin) / 2.
    hsl_s = torch.empty_like(hsl_h)
    hsl_s[hsl_l == 0] = 0
    hsl_s[hsl_l == 1] = 0
    hsl_l_ma = torch.bitwise_and(hsl_l > 0, hsl_l < 1)
    hsl_l_s0_5 = torch.bitwise_and(hsl_l_ma, hsl_l <= 0.5)
    hsl_l_l0_5 = torch.bitwise_and(hsl_l_ma, hsl_l > 0.5)
    hsl_s[hsl_l_s0_5] = ((cmax - cmin) / (hsl_l * 2.))[hsl_l_s0_5]
    hsl_s[hsl_l_l0_5] = ((cmax - cmin) / (-hsl_l * 2. + 2.))[hsl_l_l0_5]
    return torch.cat([hsl_h, hsl_s, hsl_l], dim=1)
Exemple #4
0
 def criterion(self, image):
     e = self.formulate_ellipse()
     fn_loss = torch.mean(image * self.sigmoid(e, c=20))
     fp_loss = torch.mean((1 - image) * F.relu(1 - e))
     e = e.detach()
     fp = len(e[torch.bitwise_and(image == 0, e < 1)])
     fn = len(e[torch.bitwise_and(image == 1, e > 1)])
     return fp_loss + fn_loss * self.fn_weight, {"fn": fn, "fp": fp}
Exemple #5
0
def propose_boxes_rcnn(cls_deltas, bbox_deltas, propose, targets, training=True,device="cpu", thred_cls=0.3, thred_nms=0.4,
                  top_N_propose=64):
    if not training:
        cls_deltas = torch.softmax(cls_deltas, -1)
        # cls_deltas = torch.sigmoid(cls_deltas) # 如果使用sigmoid_focal_loss_jit
        scores, labels = cls_deltas.max(1)
        keep = torch.bitwise_and(scores >= thred_cls, labels > 0)
        scores = scores[keep]
        labels = labels[keep]
        bbox_reg_pro = bbox_deltas[keep, labels]
        propose = propose[keep]
    else:
        propose, indexs, gt_indexs = propose
        keep = indexs > 0
        labels = (targets[0]["labels"][gt_indexs] * indexs).long()[keep]
        gt_indexs = gt_indexs[keep]
        propose = propose[keep]
        scores = torch.softmax(cls_deltas, -1)[keep, labels]
        bbox_reg_pro = bbox_deltas[keep, labels]

    input_h, input_w, scale = targets[0]["resize"]
    tmp = torch.tensor([input_w, input_h, input_w, input_h], device=device)
    anchors_xywh = x1y1x2y22xywh(propose / tmp)
    x = bbox_reg_pro[:, 0] * anchors_xywh[:, 2] + anchors_xywh[:, 0]
    y = bbox_reg_pro[:, 1] * anchors_xywh[:, 3] + anchors_xywh[:, 1]
    w = torch.exp(bbox_reg_pro[:, 2]) * anchors_xywh[:, 2]
    h = torch.exp(bbox_reg_pro[:, 3]) * anchors_xywh[:, 3]
    propose = torch.stack((x, y, w, h), -1) * torch.tensor([input_w, input_h, input_w, input_h],
                                                           device=device)
    # to x1y1x2y2
    propose = xywh2x1y1x2y2(propose)
    # clip to img
    propose[:, [0, 2]] = propose[:, [0, 2]].clamp(0, input_w)
    propose[:, [1, 3]] = propose[:, [1, 3]].clamp(0, input_h)

    keep = nms(propose, scores, labels, thred_nms)
    # propose = (propose[keep]/scale).int() # 缩放到原始输入图像上
    propose = propose[keep]  # /scale # 缩放到原始输入图像上
    scores = scores[keep]
    labels = labels[keep]
    if training:
        gt_indexs = gt_indexs[keep]
        propose = propose[:top_N_propose]
        scores = scores[:top_N_propose]
        labels = labels[:top_N_propose]
        gt_indexs = gt_indexs[:top_N_propose]

    keep = torch.bitwise_and((propose[:, 2] - propose[:, 0]) > 1, (propose[:, 3] - propose[:, 1]) > 1)
    propose = propose[keep]
    scores = scores[keep]
    labels = labels[keep]
    if training: gt_indexs = gt_indexs[keep]

    if training:
        return {"boxes": propose, "gt_indexs": gt_indexs}

    return [{"boxes": propose, "scores": scores, "labels": labels}]
Exemple #6
0
 def test_bitwise_and_fallback(self):
     device = self.get_device()
     # use randint because bitwise_and is not supported on floats
     cpu_a = torch.randint(200, (3, 4))
     cpu_b = torch.randint(200, (3, 4))
     ort_a = cpu_a.to(device)
     ort_b = cpu_b.to(device)
     cpu_result = torch.bitwise_and(cpu_a, cpu_b)
     ort_result = torch.bitwise_and(ort_a, ort_b)
     assert torch.equal(cpu_result, ort_result.cpu())
Exemple #7
0
def mux_p_cuda(x, y, rands):
    #global mask 
    #mask = torch.cuda.ByteTensor([2 ** x for x in range(8)])

    xs = x.shape[0]
    ys = y.shape[0]
    assert xs == ys
    #rands = torch.cuda.FloatTensor(xs << 3).uniform_() > p
    #rands = torch.sum(rands.view(xs, 8) * mask, 1)
    top = torch.bitwise_and(x, rands)
    bot = torch.bitwise_and(y, torch.bitwise_not(rands))
    return torch.bitwise_or(top, bot)
Exemple #8
0
def maj_p_cuda(x, y, rands):
    #FIX THIS ASAP
    #global mask 
    #mask = torch.cuda.ByteTensor([2 ** x for x in range(8)])

    xs = x.shape[0]
    ys = y.shape[0]
    assert xs == ys
    and_ = torch.bitwise_and(x, y)
    or_ = torch.bitwise_or(x, y)
    top = torch.bitwise_and(and_, torch.bitwise_not(rands))
    bot = torch.bitwise_and(or_, rands)
    return torch.bitwise_or(top, bot)
def obtain_uncertainty_mask(factors,
                            factor_bins,
                            num_factor_bins,
                            epsilon=1e-7):
    factor_bins_ = torch.cat([
        torch.min(factors).unsqueeze(0) - epsilon, factor_bins,
        torch.max(factors).unsqueeze(0) + epsilon
    ])

    ind_list = []
    for i in range(factor_bins_.shape[0] - 1):
        low = factor_bins_[i]
        high = factor_bins_[i + 1]
        ind = torch.bitwise_and(torch.ge(factors, low),
                                torch.lt(factors, high))
        ind_list.append(ind.unsqueeze(1))
    '''
    for i in range(factor_bins.shape[0]):
        if i == 0:
            low = torch.min(factors)-epsilon
            high = factor_bins[0]
        elif i == factor_bins.shape[0]-1:
            low = factor_bins[i-1]
            high = torch.max(factors)+epsilon
        else:
            low = factor_bins[i-1]
            high = factor_bins[i]
        ind = torch.bitewise_and(torch.ge(factors, low), torch.lt(factors, high))
        ind_list.append(ind.unsqueeze(1))
    '''
    return torch.cat(ind_list, dim=1)
Exemple #10
0
def heatmap2indexV2(heatmap:torch.tensor,heatmap2:torch.tensor=None,thres=0.5,has_background=False,topK=5):
    """
    heatmap[0,0,:]可以属于多个类别
    :param heatmap: [bs,h,w,num_classes] or [h,w,num_classes]
    :param heatmap2 : [bs,h,w,num_classes,4] or [h,w,num_classes,4]
    """
    scores, labels = heatmap.topk(topK,-1)

    if heatmap2 is not None:
        h,w,c = labels.shape
        new_heatmap2 = torch.zeros((h,w,c,heatmap2.shape[-1]),device=heatmap2.device)
        # for i in range(h):
        #     for j in range(w):
        #         for k in range(c):
        #             l = labels[i,j,k]
        #             new_heatmap2[i,j,k,:] = heatmap2[i,j,l,:]
        # for i in range(h):
        #     for j in range(w):
        for i,j in product(range(h),range(w)):
                new_heatmap2[i,j] = heatmap2[i,j,labels[i,j]]

    if has_background:
        keep = torch.bitwise_and(scores > thres, labels > 0)  # 0为背景,包括背景
    else:
        keep = scores > thres
    scores, labels = scores[keep], labels[keep]
    cycx = torch.nonzero(keep)[...,:2]
    if heatmap2 is not None:
        new_heatmap2 = new_heatmap2[keep]
        heatmap2= new_heatmap2

    return scores, labels,cycx,keep,heatmap2
Exemple #11
0
 def test_bitwise_and(self):
     device = self.get_device()
     cpu_a = torch.tensor([[0], [1], [1]], dtype=bool)
     cpu_b = torch.tensor([[1], [0], [1]], dtype=bool)
     ort_a = cpu_a.to(device)
     ort_b = cpu_b.to(device)
     cpu_out = torch.tensor([], dtype=bool)
     ort_out = cpu_out.to(device)
     cpu_result = torch.bitwise_and(cpu_a, cpu_b)
     ort_result = torch.bitwise_and(ort_a, ort_b)
     assert torch.equal(cpu_result, ort_result.cpu())
     cpu_result = torch.bitwise_and(cpu_a, cpu_b, out=cpu_out)
     ort_result = torch.bitwise_and(ort_a, ort_b, out=ort_out)
     assert torch.equal(cpu_result, ort_result.cpu())
     assert torch.equal(cpu_out, ort_out.cpu())
     assert torch.equal(ort_result.cpu(), ort_out.cpu())
    def func_dawson_2nd(self, x: Tensor) -> Tensor:
        y = torch.zeros_like(x)
        idx1 = torch.lt(x, -10.)
        idx2 = torch.gt(x, 10.)

        y[idx1] = self.func_asym_neg_inf(x[idx1])
        y[idx2] = self.func_asym_pos_inf(x[idx2])

        idx1 = torch.bitwise_not(torch.bitwise_or(idx1, idx2))
        y[idx1] = chebyshev_val_neg(-x[idx1].abs_(),
                                    self.cheb_neg,
                                    num_sub=self.div)

        idx1 = torch.bitwise_and(idx1, x > 0)
        if x.is_cuda:
            if x[idx1].numel() < mnn_config.get_value('cpu_or_gpu'):
                device = x.device
                temp = torch.from_numpy(scipy.erfi(
                    x[idx1].cpu().numpy())).to(device=device)
                y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                          (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * temp) - y[idx1]
            else:
                y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                          (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * self.dawson1.erfi(x[idx1])) - \
                          y[idx1]
        else:
            y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                      (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * torch.from_numpy(
                          scipy.erfi(x[idx1].numpy()))) - y[idx1]
        return y
Exemple #13
0
 def update_active_objects(self, new_gt_masks, new_valid_targets):
     objs_changes = torch.ne(self.active_valid_targets, new_valid_targets)
     if objs_changes.any():
         # We just care about objects that are new (1st appearance) on the clip -> batched_valid_target == True and valid_masks_record
         # == False on the positions where there are changes
         new_appearance_ids = torch.bitwise_and(
             torch.bitwise_and(objs_changes, new_valid_targets),
             torch.bitwise_and(torch.logical_not(self.active_valid_targets),
                               objs_changes))
         # Check if there is any appearance
         if new_appearance_ids.any():
             self.active_valid_targets = torch.bitwise_or(
                 self.active_valid_targets, new_appearance_ids)
             mask_to_op = torch.zeros_like(self.active_objs_masks)
             mask_to_op[new_appearance_ids, :, :] = 1
             self.active_objs_masks = mask_to_op * new_gt_masks + self.active_objs_masks
def GMM_train(model, train_batches, feature_dim=512, dataset='CIFAR-10'):
    print('Crafting GMM parameters on training set')
    if dataset == 'CIFAR-10':
        num_class = 10
    elif dataset == 'CIFAR-100':
        num_class = 100
    dic = {}
    for i in range(num_class):
        dic[str(i)] = torch.tensor([]).cuda()
    for i, (X, y) in enumerate(train_batches):
        print(i)
        X, y = X.cuda(), y.cuda()
        output, features = model(normalize(X))  # features: 128 x 512
        _, pre_labels = output.max(1)  # pre_labels : 128
        c_or_w = (pre_labels == y)
        for j in range(num_class):
            is_j = torch.bitwise_and(c_or_w, (y == j))
            indexs = torch.where(is_j)[0]
            dic[str(j)] = torch.cat((dic[str(j)], features[indexs].detach()),
                                    dim=0)
    mu = torch.zeros(num_class, feature_dim).cuda()
    sigma = torch.zeros(num_class, feature_dim, feature_dim).cuda()
    for i in range(num_class):
        dic_i = dic[str(i)]
        mu[i] = dic_i.mean(dim=0)
        gap = dic_i - mu[i].unsqueeze(dim=0)  # 1 x 512
        sigma[i] = (torch.mm(gap.t(), gap) + 1e-10 *
                    torch.eye(feature_dim).cuda()) / dic_i.size(0)  # 512 x 512
    print('Finished!')
    return mu, sigma
    def forward(self, x: Tensor) -> Tensor:
        idx1 = torch.lt(x, -10)
        idx2 = torch.gt(x, self.cheb_xmas_for_H)
        idx3 = torch.bitwise_and(torch.bitwise_not(idx1), x <= 0)
        idx4 = torch.bitwise_and(torch.bitwise_not(idx2), x > 0)
        y = torch.zeros_like(x)

        y[idx1] = self.func_int_asym_neg_inf(x[idx1])
        y[idx2] = self.func_int_asym_pos_inf(x[idx2])
        y[idx3] = chebyshev_val_neg(x[idx3], self.cheb_H_neg, num_sub=self.div)
        y[idx4] = torch.exp(
            2 * torch.pow(x[idx4], 2)) * chebyshev_val_no_transform(
                x[idx4],
                self.cheb_H_pos,
                x_max=self.cheb_xmas_for_H,
                num_sub=self.div_pos)
        return y
Exemple #16
0
 def validation_step(self, batch, batch_nb):
     # OPTIONAL
     batch = self.mel_spectrogramer_val(batch)
     y_hat = self(batch)
     y = batch['target']
     loss = F.cross_entropy(y_hat, y)
     y_pred = F.softmax(y_hat, dim=1).argmax(dim=1)
     acc = (y_pred == y).float().mean()
     fa = torch.bitwise_and(y_pred != y, y == 0).float().sum()
     fr = torch.bitwise_and(y_pred != y, y != 0).float().sum()
     length_zeros = batch['lengths'][y == 0].sum()
     length_nonzeros = batch['lengths'][y != 0].sum()
     return {
         'val_loss': loss, 'val_acc': acc, 'fa': fa, 'fr': fr,
         'length_zeros': length_zeros, 'length_nonzeros': length_nonzeros,
         'zeros': (y == 0).float().sum(), 'non_zeros': (y != 0).float().sum()
     }
Exemple #17
0
 def decompress(self, tensor_compressed):
     sign = tensor_compressed > 127
     exps = torch.bitwise_and(tensor_compressed, 0b01111111)
     floats = ((exps + 18).to(torch.int32) << 23).view(torch.float32)
     tensor_decompressed = torch.where(sign, -floats, floats)
     tensor_decompressed = torch.multiply((exps >= 1).to(torch.float32),
                                          tensor_decompressed)
     return tensor_decompressed
def make_SA_bool(weights, mask, mask1):
    ## Inject errors
    # output = ((weights + mask) > 0.)  # inject stuck at 0
    # output = ((output - mask1)> 0.)   # inject stuck at 1
    not_mask0 = torch.bitwise_not(mask)
    output = torch.bitwise_and(weights, not_mask0)  # inject stuck at 0
    output = torch.bitwise_or(output, mask1)  # inject stuck at 1
    return output
Exemple #19
0
 def _region0_idx(self, u: Tensor, s: Tensor) -> Tuple[Tensor, Tensor]:
     idx0 = torch.gt(s, 0.)
     idx1 = torch.bitwise_and(
         idx0,
         torch.lt(self.vol_th * self.L - u,
                  self.cut_off * math.sqrt(self.L) * s))
     idx0.bitwise_not_()
     return idx0, idx1
def accuracy(predict, target):
    assert predict.shape == target.shape

    and_sum = torch.bitwise_and(predict.type(torch.IntTensor),
                                target.type(torch.IntTensor)).sum()
    or_sum = torch.bitwise_or(predict.type(torch.IntTensor),
                              target.type(torch.IntTensor)).sum()

    return and_sum / or_sum / predict.shape[0]
Exemple #21
0
    def training_step(self, batch, batch_nb):
        # REQUIRED
        batch = self.mel_spectrogramer_train(batch)
        y_hat = self(batch)
        y = batch['target']
        loss = F.cross_entropy(y_hat, y)
        y_pred = F.softmax(y_hat, dim=1).argmax(dim=1)
        acc = (y_pred == y).float().mean()
        fa = torch.bitwise_and(y_pred != y, y == 0).float().sum()
        fr = torch.bitwise_and(y_pred != y, y != 0).float().sum()
        length_zeros = batch['lengths'][y == 0].sum()
        length_nonzeros = batch['lengths'][y != 0].sum()

        self.logger.experiment.log({
            'train_loss': loss, 'train_acc': acc,
            'train_fa': fa / (y == 0).float().sum(), 'train_fr': fr / (y != 0).float().sum(),
            'train_fa_per_len': fa / length_zeros, 'train_fr_per_len': fr / length_nonzeros
        })
        return {'train_loss': loss, 'train_acc': acc}
Exemple #22
0
def get_accuracy(x, x_o):
    res = 0
    for t in range(1, x.shape[0]):
        v, v_o = x[t].int(), x_o[t].int()
        try:
            res += int(torch.bitwise_and(v, v_o).sum()) / int(
                torch.bitwise_or(v, v_o).sum())
        except ZeroDivisionError:
            res += 1
    return res / x.shape[0]
Exemple #23
0
    def __get_synthetic_dict_entries__(self, data: MultiNodeData, sequence_id,
                                       file_id):
        ##### determine characteristics of this sequence #####
        if f"sequence-{sequence_id}" not in self.__synthetic_dict__:

            target_node: int = torch.argmax(
                torch.max(data["y_full"], dim=1)[0]).item()
            anomaly_idx: int = torch.argmax(data["y"]).item()

            self.__synthetic_dict__[f"sequence-{sequence_id}"] = {
                "anomaly":
                self.__anomalies__[
                    anomaly_idx],  # what is the type of this sequence?
                "adversary_anomaly":
                self.__adversary_dict__[self.__anomalies__[anomaly_idx]]
                (),  # which adversary?
                "target_node":
                target_node  # in which node shall we inject an anomaly?                    
            }

        if f"file-{file_id}" not in self.__synthetic_dict__:

            sequence_dict = self.__synthetic_dict__[f"sequence-{sequence_id}"]
            target_node = sequence_dict["target_node"]

            adj_cluster_coarse = data["adj_cluster_coarse"]
            row, col, _ = adj_cluster_coarse.coo()

            neighbor_nodes = row[torch.bitwise_and(
                col == target_node, row != target_node)].tolist()

            # randomly select only a subset
            neighbor_nodes = random.sample(
                neighbor_nodes, self.__neighbor_func__(neighbor_nodes))
            neighbor_nodes_dict = {
                k: (random.randint(0, self.metric_count - 1))
                for k in neighbor_nodes
            }

            self.__synthetic_dict__[f"file-{file_id}"] = {
                "target_row":
                random.randint(
                    0, self.metric_count -
                    1),  # in which metric shall we inject an anomaly?
                "neighbor_nodes_dict":
                neighbor_nodes_dict,  # what are the neighbor nodes?
                "type":
                random.choices(self.__types__,
                               weights=self.__type_weights__,
                               k=1)[0],  # which type to choose?
            }

        return self.__synthetic_dict__[
            f"sequence-{sequence_id}"], self.__synthetic_dict__[
                f"file-{file_id}"]
Exemple #24
0
    def forward_fast_mean(self, u: Tensor, s: Tensor) -> Tensor:
        idx0, idx1 = self._region0_idx(u, s)
        output = torch.zeros_like(u)

        ub, lb = self._compute_bound(u[idx1], s[idx1])
        output[idx1] = 1 / (self._auxiliary_func_mean(ub, lb) + self.t_ref)

        idx1 = torch.bitwise_and(idx0, torch.gt(u, self.vol_th * self.L))
        output[idx1] = 1 / (self.t_ref -
                            1 / self.L * torch.log(1 - 1 / u[idx1]))
        return output
Exemple #25
0
def bitwise_and(input_, other):
    """Wrapper of `torch.bitwise_and`

    Parameters
    ----------
    input_ : DTensor
        The first operand.
    other : DTensor
        The second operand.
    """
    return torch.bitwise_and(input_._data, other._data)
def chebyshev_val_no_transform(x: Tensor,
                               c: Tensor,
                               x_min: float = 0.,
                               x_max: float = 1.,
                               num_sub: int = 50) -> Tensor:
    delta_x = (x_max - x_min) / num_sub
    y = torch.zeros_like(x)
    for i in range(num_sub):
        idx = torch.bitwise_and(torch.gt(x, x_min + delta_x * i),
                                torch.le(x, x_min + delta_x * (i + 1)))
        y[idx] = chebyshev_val(x[idx], c[i, :])
    return y
Exemple #27
0
def bs_zscc_cuda(bsx, bsy, N):
    """Single bitstream bsx is being compared to multiple bitstreams in bsy"""
    px = bs_count_cuda(bsx) / N
    py = bs_count_cuda(bsy) / N
    if px in (0, 1) or 1 in py or 0 in py:
        return 1
    p_uncorr = px * py
    p_actual = bs_count_cuda(torch.bitwise_and(bsx, bsy)) / N
    p_max = torch.min(px, py)
    p_min = torch.max(px + py - 1, 0).values
    delta0 = torch.floor(p_uncorr * N + 0.5) / N - p_uncorr
    delta = p_actual - p_uncorr
    numer = (delta - delta0)
    result = torch.cuda.FloatTensor(len(py)).fill_(0)
    gt_denom = p_max - p_uncorr - delta0
    gt_mask = torch.bitwise_and(numer > 0, gt_denom != 0)
    lt_denom = p_uncorr - p_min + delta0
    lt_mask = torch.bitwise_and(numer < 0, lt_denom != 0)
    result += (numer / (gt_denom + 1e-15)) * gt_mask
    result += (numer / (lt_denom + 1e-15)) * lt_mask
    return result
Exemple #28
0
def compute_iou(pred_mask, gt_mask):
    """
    Computes IoU between predicted instance mask and 
    ground-truth instance mask
    """

    pred_mask = pred_mask.byte().squeeze()
    gt_mask = gt_mask.byte().squeeze()
    # print('pred_masks', pred_mask.shape, 'gt_masks', gt_mask.shape)
    intersection = torch.bitwise_and(pred_mask, gt_mask).sum().float()
    union = torch.bitwise_or(pred_mask, gt_mask).sum().float()
    return intersection / union
Exemple #29
0
    def forward_fast_chi(self, u: Tensor, s: Tensor, u_a: Tensor,
                         s_a: Tensor) -> Tensor:
        idx0, idx1 = self._region0_idx(u, s)
        output = torch.zeros_like(u)
        ub, lb = self._compute_bound(u[idx1], s[idx1])
        output[idx1] = self._auxiliary_func_chi(ub, lb, u_a[idx1], s_a[idx1])

        idx1 = torch.bitwise_and(idx0, torch.gt(u, self.vol_th * self.L))
        output[idx1] = math.sqrt(2 / self.L) / torch.sqrt(self.t_ref - 1 / self.L * torch.log(1 - 1 / u[idx1])) / \
                       torch.sqrt(2 * u[idx1] - 1)

        return output
def chebyshev_val_neg(x: Tensor,
                      c: Tensor,
                      num_sub: int = 50,
                      wrap: float = 4.,
                      alpha: int = 1) -> Tensor:
    delta_x = 1 / num_sub
    x = wrap / (wrap + torch.abs(x).pow(alpha))
    y = torch.zeros_like(x)
    for i in range(num_sub):
        idx = torch.bitwise_and(x > delta_x * i, x <= delta_x * (i + 1))
        y[idx] = chebyshev_val(x[idx], c[i, :])
    return y