예제 #1
0
def logical_xor(a, b, out=None, where=True):
    if where is None or (isinstance(where, type(True)) and where is True):
        if out is not None:
            torch.logical_xor(a, b, out=out)
        else:
            out = torch.logical_xor(a, b)
    else:
        if out is None:
            out = torch.zeros_like(a, dtype=torch.bool)
        out[where] = a[where] ^ b[where]
    return out
예제 #2
0
def synthesise_lower_triangular_circuit(tensor, size, sec_size):

    reverse_circuit = []

    num_sec = (size + sec_size - 1) // sec_size

    for sec in range(num_sec):

        first_sec_idx = sec * sec_size
        last_sec_idx = (sec + 1) * sec_size - 1

        if (last_sec_idx > size - 1):
            last_sec_idx = size - 1

        patterns = dict()

        for row_idx in range(first_sec_idx, size):

            sub_row = tensor[row_idx][first_sec_idx:last_sec_idx + 1]

            if (sum(sub_row) > 0):

                sub_row_pattern = ''.join(str(bit) for bit in sub_row)

                if (sub_row_pattern not in patterns.keys()):
                    patterns[sub_row_pattern] = row_idx

                else:
                    tensor[row_idx] = torch.logical_xor(tensor[row_idx], tensor[patterns[sub_row_pattern]])

                    reverse_circuit.append((patterns[sub_row_pattern], row_idx))

        for sec_col_idx in range(first_sec_idx, last_sec_idx + 1):

            diagonal_entry = True
            if (tensor[sec_col_idx][sec_col_idx] is False):
                diagonal_entry = False

            for row_idx in range(sec_col_idx + 1, size):

                if (tensor[row_idx][sec_col_idx] is True):

                    if (diagonal_entry is False):
                        tensor[sec_col_idx] = torch.logical_xor(tensor[sec_col_idx], tensor[row_idx])

                        reverse_circuit.append((row_idx, sec_col_idx))
                        diagonal_entry = 1

                    tensor[row_idx] = torch.logical_xor(tensor[row_idx], tensor[sec_col_idx])

                    reverse_circuit.append((sec_col_idx, row_idx))

    return tensor, reverse_circuit
예제 #3
0
    def forward(self, b, s, y):

        r = torch.arange(b.shape[0], device=b.device)

        m_b = b.view(-1, 1) == b.view(1, -1)  # same batch id
        m_y = torch.logical_xor(y.view(-1, 1), y.view(1,
                                                      -1))  # different labels
        m_r = r.view(-1, 1) < r.view(1, -1)  # prevent duplicates
        m = torch.logical_and(torch.logical_and(m_b, m_y), m_r)

        if m.sum().item() == 0:
            raise EmptyBatchException

        mat_d = s.view(-1, 1) - s.view(1, -1)
        mat_y = y.view(-1, 1).repeat(1, y.shape[0])
        d = mat_d[m]
        z = mat_y[m].float()

        loss = nn.BCEWithLogitsLoss()(d, z)

        output = {}
        output['loss'] = loss
        output['logits'] = d.detach()
        output['labels'] = z.detach()

        return output
예제 #4
0
def confusion_matrix(pred, smnt):

    # IoU = (true positive) / (true_positive + false_positive + false_negative)

    batch,_,H,W = smnt.shape
    smnt = torch.reshape(smnt, (batch, H, W))
    _, indices = torch.max(pred, dim=1)


    confusion_mtrx = np.zeros((19, 2)) # [classes]*[true_pos, false_cases]
    ones = torch.ones((batch, H, W))
    zeros = torch.zeros((batch, H, W))

    unclassified = torch.where(smnt == 0, ones, zeros)
    for i in range(1, 20):
        ground_truth = torch.where(smnt == i, ones, zeros)
        prediction = torch.where(indices == i, ones, zeros)
        prediction = prediction - prediction * unclassified # unclassifed label not considered

        true_pos = torch.sum(ground_truth * prediction).item()
        false_cases = torch.sum(torch.logical_xor(ground_truth, prediction)).item()

        confusion_mtrx[i-1, 0] = true_pos
        confusion_mtrx[i-1, 1] = false_cases

    return confusion_mtrx
예제 #5
0
    def predict(self, data, num_samples=500):
        cond_mask = data.train_mask
        eval_mask = torch.logical_xor(
            torch.ones_like(data.train_mask).to(dtype=torch.bool),
            data.train_mask)
        cov = self.get_cov(data)
        logits = self.forward(data)
        copula = GaussianCopula(cov)

        cond_cov = (cov[cond_mask, :])[:, cond_mask]
        cond_marginal = self.marginal(logits[cond_mask], cond_cov)
        eval_cov = (cov[eval_mask, :])[:, eval_mask]
        eval_marginal = self.marginal(logits[eval_mask], eval_cov)

        cond_u = torch.clamp(self.cdf(cond_marginal, data.y[cond_mask]),
                             self.eps, 1 - self.eps)
        cond_idx = torch.where(cond_mask)[0]
        sample_idx = torch.where(eval_mask)[0]
        eval_u = copula.conditional_sample(cond_val=cond_u,
                                           sample_shape=[
                                               num_samples,
                                           ],
                                           cond_idx=cond_idx,
                                           sample_idx=sample_idx)
        eval_u = torch.clamp(eval_u, self.eps, 1 - self.eps)
        eval_y = self.icdf(eval_marginal, eval_u)

        pred_y = data.y.clone()
        pred_y[eval_mask] = eval_y
        return pred_y
예제 #6
0
    def apply_action(self, action):

        if type(action) is not torch.Tensor:
            action = torch.Tensor(action)

        while len(action.shape) < 4:
            action = action.unsqueeze(0)

        if action.device != self.my_device:
            action = action.to(self.my_device)

        # this may be better as an assertion line to avoid silent failures
        #action = action[:, :, :self.action_width, :self.action_height]

        if action.shape[3] > self.action_width and action.shape[1] < self.width: 
            off_y = (self.width - self.action_width) // 2
            off_x = (self.height - self.action_height) // 2
            action_crop = action[:, :, off_y:-off_y, off_x:-off_x]
        else:
            action_crop = action


        assert action_crop.shape[2] == self.action_width, \
                f"action width is wrong {action_crop.shape[2]} not "\
                f"{self.action_width}, f{action_crop.shape}"
        assert action_crop.shape[3] == self.action_height,\
                f"action height is wrong {action_crop.shape[1]} not "\
                f"{self.action_height}, f{action_crop.shape}"

        action_crop = self.action_padding(action_crop)

        # toggle cells according to actions
        self.universe = 1.0 * torch.logical_xor(self.universe, action_crop.detach())
예제 #7
0
    def layer_stats_collector(
        param_name,
        clipping_factor,
        clipping_threshold,
        per_sample_norm,
        per_sample_grad,
        grad_before_clip,
        grad_after_clip,
    ):
        global _clipping_stats
        if param_name is None:
            # module is done processing all params, report all stats at once
            stats.update(stats.StatType.CLIPPING, "Clipping",
                         **_clipping_stats)
            # clear stats for next round
            _clipping_stats = {}
            return

        _clipping_stats[f"{param_name}:max_norm"] = per_sample_norm.max()
        _clipping_stats[f"{param_name}:mean_norm"] = per_sample_norm.mean()
        _clipping_stats[f"{param_name}:median_norm"] = per_sample_norm.median()
        _clipping_stats[f"{param_name}:clip"] = clipping_threshold
        _clipping_stats[f"{param_name}:percent"] = ((
            per_sample_norm > clipping_threshold).to(
                dtype=torch.float64).mean())
        pre_clip_pos = grad_before_clip > 0
        post_clip_pos = grad_after_clip > 0
        _clipping_stats[f"{param_name}:switch"] = (torch.logical_xor(
            pre_clip_pos, post_clip_pos).to(dtype=torch.float64).mean())
예제 #8
0
    def update(self, targets: Tensor, outputs: Tensor, loss: Tensor):
        targets = targets.cuda()
        outputs = outputs.cuda()
        loss = loss.cuda()
        # try:
        predicts = torch.sigmoid(outputs).clamp(1e-6, 1 - 1e-6)
        mean = predicts.mean()
        acc = ((predicts > 0.5) == targets).sum().float() / len(predicts)
        bias = 1 - torch.logical_xor(
            (predicts > 0.5)[:len(predicts) // 2],
            (predicts > 0.5)[len(predicts) // 2:]).float().mean()
        pos_p, pos_l = predicts[targets == 0], targets[targets == 0]
        neg_p, neg_l = predicts[targets == 1], targets[targets == 1]
        pos_acc = ((pos_p > 0.5) == pos_l).sum().float() / len(pos_p)
        neg_acc = ((neg_p > 0.5) == neg_l).sum().float() / len(neg_p)
        neg_loss = self.criterion(neg_p, neg_l)
        pos_loss = self.criterion(pos_p, pos_l)
        confidence = (torch.min(predicts, 1 - predicts)).mean()

        self.loss.append(all_sum(loss).item() / self.workers)
        self.acc.append(all_sum(acc).item() / self.workers)
        self.neg_loss.append(all_sum(neg_loss).item() / self.workers)
        self.pos_loss.append(all_sum(pos_loss).item() / self.workers)
        self.confidence.append(all_sum(confidence).item() / self.workers)
        self.pos_acc.append(all_sum(pos_acc).item() / self.workers)
        self.neg_acc.append(all_sum(neg_acc).item() / self.workers)
        self.bias.append(all_sum(bias).item() / self.workers)
        self.mean.append(all_sum(mean).item() / self.workers)
예제 #9
0
def cal_Hamming(feat1, feat2, mask1=None, mask2=None):
    if mask1 is None or mask2 is None:
        mask1 = torch.ones_like(feat1).to(torch.bool)
        mask2 = torch.ones_like(feat2).to(torch.bool)
    mask = torch.logical_and(mask1, mask2)
    dist = torch.logical_and(torch.logical_xor(feat1, feat2), mask).to(
        torch.float).sum() / mask.to(torch.float).sum()
    return dist
예제 #10
0
 def cal_persistence_feature(self,
                             saliency_maps: torch.Tensor) -> torch.Tensor:
     self.thre = torch.median(saliency_maps).item()
     saliency_maps = torch.where(saliency_maps > self.thre,
                                 torch.tensor(1.0), torch.tensor(0.0))
     _base = saliency_maps[0]
     for i in range(1, len(saliency_maps)):
         _base = torch.logical_xor(_base, saliency_maps[i]).float()
     return _base.flatten(start_dim=1).norm(p=1)
예제 #11
0
    def test_cmap_locations(self):
        inputs = torch.rand(1, 1, 10, 10)
        inputs2 = torch.rand(1, 1, 10, 10)
        out1 = apply_colormap(inputs, "gray")
        out2 = apply_colormap(inputs2, "gray")

        greater_input = inputs <= inputs2
        greater_output = out1[:, 0, ...] <= out2[:, 0, ...]
        assert ~(torch.logical_xor(greater_input, greater_output)).all()
예제 #12
0
def pytorch_error_msg(a, b, rtol, atol):
    msg = f"\ntensor 1\n{a}\ntensor 2\n{b}"
    if torch.is_tensor(a) and torch.is_tensor(b):
        if a.dtype == torch.bool and b.dtype == torch.bool:
            diff = torch.logical_xor(a, b)
            msg = msg + f"\ndifference \n{diff}"
        else:
            diff = torch.abs(a - b)
            msg = msg + f"\ndifference \n{diff}\nrtol {rtol}\natol {atol}"
    return msg
예제 #13
0
파일: metrics.py 프로젝트: t3hseus/ariadne
def wrong_dist(preds, target):
    preds_true = (preds < MIN_EPS_HOLDER.MIN_EPS)

    tgt_true = (target < MIN_EPS_HOLDER.MIN_EPS)

    only_true = torch.logical_xor(preds_true, tgt_true)

    # count_per_batch = torch.floor_divide(only_true.sum(dim=1), only_true.shape[1])

    return only_true.double().mean()
예제 #14
0
    def get_explanation_prefix_difference(self, tensor1, tensor2):
        tensor1_positive = tensor1 > 0
        tensor1_negative = tensor1 < 0
        tensor2_positive = tensor2 > 0
        tensor2_negative = tensor2 < 0
        positive_xor = torch.logical_xor(tensor1_positive, tensor2_positive)
        negative_xor = torch.logical_xor(tensor1_negative, tensor2_negative)

        converge_distances = (
            tensor2 * positive_xor.float() + tensor2 * negative_xor.float()
        ) / 2
        explanation_prefix_convergence_distance = F.mse_loss(
            converge_distances, zeros(converge_distances.size())
        )
        """
        explanation_prefix_convergence_distance = sum(
            sum(torch.abs(converge_distances))
        )
        """
        return explanation_prefix_convergence_distance
예제 #15
0
def logical_xor(input_, other):
    """Wrapper of `torch.logical_xor`.

    Parameters
    ----------
    input_ : DTensor
        The first operand.
    other : DTensor
        The second operand.
    """
    return torch.logical_xor(input_._data, other._data)
예제 #16
0
    def forward(self, y_pred, y_true):
        tp = torch.sum(torch.logical_and(y_pred, y_true))
        tn = torch.sum(
            torch.logical_and(torch.logical_not(y_pred),
                              torch.logical_not(y_true)))
        fp = torch.sum(
            torch.logical_and(torch.logical_xor(y_pred, y_true), y_pred))
        fn = torch.sum(
            torch.logical_and(torch.logical_xor(y_pred, y_true), y_true))
        accuracy = (tp + tn) / (tp + tn + fp + fn)
        precision = tp / (tp + fp + self.epsilon)
        recall = tp / (tp + fn + self.epsilon)

        f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)
        f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)
        return {
            "accuracy": float(accuracy),
            "precision": float(precision),
            "recall": float(recall),
            "f1_score": float(f1),
        }
예제 #17
0
파일: DiffRender.py 프로젝트: zebrajack/DRT
    def silhouette_edge(self, origin:torch.Tensor):
        assert origin.dim() == 1
        vertices = self.vertices.detach() #[Vx3]
        faces = self.E2F

        EF1N, EF2N = edge_face_norm(vertices, faces)
        F1v = vertices[faces[:,0,0]]
        F2v = vertices[faces[:,1,0]]
        dot1 = dot(EF1N, origin - F1v)
        dot2 = dot(EF2N, origin - F2v)

        silhouette_edge = torch.logical_xor(dot1>0,dot2>0)
        return self.Edges[silhouette_edge]
예제 #18
0
def lower_train_test_difference_by_retraining(diff_atk, temp_mask, x):
    diff_atk = torch.round(diff_atk)
    # 对于第一通道,如果temp_mask为True而diff_atk==0
    temp_mask_problem = torch.logical_xor(temp_mask, diff_atk[:, :1, :, :])
    problem_elems = torch.nonzero(temp_mask_problem, as_tuple=False)
    # 将diff_atk修改为1或-1
    for elem in problem_elems:
        idx = list(elem)
        if x[idx] == 255:
            diff_atk[idx] = -1
        else:
            diff_atk[idx] = 1
    return diff_atk
예제 #19
0
 def create_mask(self, x_len, y_len):
     # a mask of shape x_len * y_len
     device = x_len.device
     max_x_len = x_len.max()
     max_y_len = y_len.max()
     x_mask = torch.arange(max_x_len,
                           device=x_len.device)[None, :] < x_len[:, None]
     y_mask = torch.arange(max_y_len,
                           device=x_len.device)[None, :] < y_len[:, None]
     ones = torch.ones_like(x_mask[:, :, None] * y_mask[:, None, :],
                            device=device).bool()
     mask = torch.logical_xor(ones, x_mask[:, :, None] * y_mask[:, None, :])
     return mask
예제 #20
0
def rand_index(pred_clusters: Tensor, gt_classes: Tensor) -> Tensor:
    r"""
    Rand index measurement for clusters.

    Rand index is computed by the number of instances predicted in the same class with the same label :math:`n_{11}` and
    the number of instances predicted in separate classes and with different labels :math:`n_{00}`, normalized by the total
    number of instances pairs :math:`n(n-1)`:

    .. math::
        \text{rand index} = \frac{n_{11} + n_{00}}{n(n-1)}

    :param pred_clusters: :math:`(b\times n)` predicted clusters. :math:`n`: number of instances.
        ::

            e.g. [[0,0,1,2,1,2]
                  [0,1,2,2,1,0]]
    :param gt_classes: :math:`(b\times n)` ground truth classes
        ::

            e.g. [['car','car','bike','bike','person','person'],
                  ['bus','bus','cat', 'sofa',  'cat',  'sofa' ]]
    :return: :math:`(b)` clustering purity
    """
    num_clusters = torch.max(pred_clusters, dim=-1).values + 1
    num_instances = pred_clusters.shape[1]
    batch_num = pred_clusters.shape[0]
    gt_classes_t = []
    for b in range(batch_num):
        gt_classes_b_set = list(set(gt_classes[b]))
        gt_classes_t.append([])
        assert len(gt_classes_b_set) == num_clusters[b]
        for i in range(len(gt_classes[b])):
            gt_classes_t[b].append(gt_classes_b_set.index(gt_classes[b][i]))
    gt_clusters = torch.tensor(gt_classes_t).to(dtype=pred_clusters.dtype,
                                                device=pred_clusters.device)
    pred_pairs = pred_clusters.unsqueeze(-1) == pred_clusters.unsqueeze(-2)
    gt_pairs = gt_clusters.unsqueeze(-1) == gt_clusters.unsqueeze(-2)
    unmatched_pairs = torch.logical_xor(pred_pairs,
                                        gt_pairs).to(dtype=torch.float)
    rand_index = 1 - torch.sum(
        unmatched_pairs, dim=(-1, -2)) / (num_instances * (num_instances - 1))
    return rand_index
예제 #21
0
    def forward(self, best_hyp_indices, best_word_indices, finished,
                scores_accumulated, lengths, reference_lengths, *factor_args):

        # Reorder fixed-size beam data according to best_hyp_indices (ascending)
        finished = finished.index_select(0, best_hyp_indices)
        lengths = lengths.index_select(0, best_hyp_indices)
        reference_lengths = reference_lengths.index_select(0, best_hyp_indices)

        # Normalize hypotheses that JUST finished
        all_finished = pt.logical_or(best_word_indices == self.pad_id,
                                     best_word_indices == self.eos_id)
        newly_finished = pt.logical_xor(all_finished, finished).unsqueeze(1)
        scores_accumulated = pt.where(
            newly_finished,
            self._scorer(scores_accumulated, lengths, reference_lengths),
            scores_accumulated)

        # Recompute finished. Hypotheses are finished if they are extended with <pad> or <eos>
        finished = pt.logical_or(best_word_indices == self.pad_id,
                                 best_word_indices == self.eos_id)

        best_word_indices = best_word_indices.unsqueeze(1)

        # Traced modules do not allow optional return values or None, but lists. We return
        # primary scores and optional factor scores therefore in a list.
        scores = [scores_accumulated]  # type: List[pt.Tensor]
        if self.expect_factors:
            factors, factor_scores_accumulated = factor_args
            # factors: (batch*beam, num_secondary_factors, 2)
            f_sorted = factors.index_select(0, best_hyp_indices)
            factor_scores, factor_indices = f_sorted[:, :, 0], f_sorted[:, :,
                                                                        1]
            # updated_factor_scores: (batch*beam, num_secondary_factors)
            updated_factor_scores = factor_scores_accumulated.index_select(
                0, best_hyp_indices) + factor_scores
            # Concatenate sorted secondary target factors to best_word_indices. Shape: (batch*beam, num_factors)
            best_word_indices = pt.cat(
                (best_word_indices, factor_indices.int()), dim=1)
            scores.append(updated_factor_scores)

        return best_word_indices, finished, scores, lengths, reference_lengths
예제 #22
0
def iou(outputs: torch.Tensor, labels: torch.Tensor, batch_size=1):
    outputs = outputs.argmax(dim=1, keepdim=True)  # dim is classes
    height = list(outputs.shape)[2]
    width = list(outputs.shape)[3]
    # class 20 not used in eval, so copy first 19 channels to new tensor
    top_pred = torch.zeros(batch_size, 19, height, width).cuda()  # assume batch size 1
    labels_19 = torch.zeros(batch_size, 19, height, width).cuda()
    for chan in range(0, 19):
        top_pred[:, chan, :, :] = outputs[:, 0, :, :] == chan
        labels_19[:, chan, :, :] = labels[:, chan, :, :]

    labels_19 = labels_19.int()
    top_pred = top_pred.int()
    SMOOTH = 1e-6
    intersection = torch.mul(top_pred, labels_19).int().sum((2, 3))  # both 1 -> 1
    union = intersection + torch.logical_xor(top_pred, labels_19).int().sum((2, 3))  # 1 in either -> 1
    iou = (intersection + SMOOTH) / (union + SMOOTH)  # We smooth our devision to avoid 0/0
    # iou is score for every class in every batch
    # thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10  # This is equal to comparing with thresolds
    ret = iou.mean().item()  # averages across whole batch
    return ret
    def _calculate_val_loss(self, start_hat, end_hat, start, end, token_type_ids, attention_mask):

        # mask out paddings, [cls]/[sep] is already cut
        mask = torch.logical_xor(token_type_ids[:,1:101], attention_mask[:,1:101])
        mask = torch.logical_not(mask).unsqueeze(2).repeat(1,1,20)

        start_hat = start_hat.masked_fill_(mask, float("-inf"))
        end_hat = end_hat.masked_fill_(mask, float("-inf"))

        loss_start = self.criterion(start_hat, start.long())
        loss_end = self.criterion(end_hat, end.long())

        total_loss = loss_start+loss_end 
        # 答案應該在[0, max_len]之間 cross entropy class 超過噴nan/inf
        if math.isnan(total_loss) or math.isinf(total_loss):
            print(start)
            print(end)
            print(loss_start)
            print(loss_end)
            sys.exit()

        return total_loss
예제 #24
0
    def update_weight(self, pred, target, weight, avg_factor):
        """Update the weight according to targets."""
        if weight is None:
            weight = target.new_ones(target.size())

        invalid_inds = weight <= 0
        target[invalid_inds] = -1
        pos_inds = target == 1
        neg_inds = target == 0

        if self.pos_margin > 0:
            pred[pos_inds] -= self.pos_margin
        if self.neg_margin > 0:
            pred[neg_inds] -= self.neg_margin
        pred = torch.clamp(pred, min=0, max=1)

        num_pos = int((target == 1).sum())
        num_neg = int((target == 0).sum())
        if self.neg_pos_ub > 0 and num_neg / (num_pos +
                                              1e-6) > self.neg_pos_ub:
            num_neg = num_pos * self.neg_pos_ub
            neg_idx = torch.nonzero(target == 0, as_tuple=False)

            if self.hard_mining:
                costs = l2_loss(pred, target,
                                reduction='none')[neg_idx[:, 0],
                                                  neg_idx[:, 1]].detach()
                neg_idx = neg_idx[costs.topk(num_neg)[1], :]
            else:
                neg_idx = self.random_choice(neg_idx, num_neg)

            new_neg_inds = neg_inds.new_zeros(neg_inds.size()).bool()
            new_neg_inds[neg_idx[:, 0], neg_idx[:, 1]] = True

            invalid_neg_inds = torch.logical_xor(neg_inds, new_neg_inds)
            weight[invalid_neg_inds] = 0

        avg_factor = (weight > 0).sum()
        return pred, weight, avg_factor
예제 #25
0
    def peak_finder_loss(self, logit=None, labels=None):

        if logit is None:
            return None
        batch_size = labels.size()[0]
        loss_penalty = 10000

        ascend = torch.tensor([0, 1, -1],
                              requires_grad=False,
                              dtype=torch.float32)
        descend = torch.tensor([-1, 1, 0],
                               requires_grad=False,
                               dtype=torch.float32)

        if torch.cuda.is_available():
            ascend = ascend.cuda()
            descend = descend.cuda()

        max = F.relu(
            F.conv1d(labels.view(batch_size, 1, -1),
                     ascend.view(1, 1, -1),
                     bias=None,
                     stride=1,
                     padding=1))
        min = F.relu(
            F.conv1d(labels.view(batch_size, 1, -1),
                     descend.view(1, 1, -1),
                     bias=None,
                     stride=1,
                     padding=1))
        zeros = torch.mul(max, min).squeeze()
        zeros = torch.logical_xor(zeros, torch.zeros_like(zeros))
        loss = torch.mean(
            torch.mean(torch.mul(torch.abs(logit - labels), zeros), 1))
        # loss = 0
        # print(loss)
        loss = loss * 100
        # print(loss)
        return loss
예제 #26
0
def calc_signed_distance(pred_label, target_label):
    # Get the boundaries of the labels
    pred_bounds = get_segmentation_boundaries(pred_label)
    target_bounds = get_segmentation_boundaries(target_label)

    # Get the boundary pixels that are within the target segmentation
    internal = torch.logical_and(pred_bounds.data, target_label.data)
    external = torch.logical_and(torch.logical_xor(pred_bounds.data, target_label.data), pred_bounds.data)

    sign_vol = pred_bounds.clone()
    sign_vol.data[internal] = -1.0
    sign_vol.data[external] = 1.0

    # Get the indicies of label
    signs = sign_vol.data[pred_bounds.data != 0.0]
    pred_inds = torch.nonzero(pred_bounds.data.squeeze(), as_tuple=False).float()
    target_inds = torch.nonzero(target_bounds.data.squeeze(), as_tuple=False).float()

    distance_mat = ((pred_inds.permute(1, 0).unsqueeze(0) - target_inds.unsqueeze(2)) ** 2).sum(1).sqrt()
    min_dist_vector = distance_mat.min(dim=0)[0]
    signed_distance_vector = signs * min_dist_vector

    return signed_distance_vector
예제 #27
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
예제 #28
0
파일: __init__.py 프로젝트: struss/ONE-1
 def forward(self, inputs):
     return torch.logical_xor(inputs[0], inputs[1])
예제 #29
0
def subtraction(tensor1, tensor2):
    left = torch.logical_xor(tensor1, tensor2)
    return torch.logical_and(tensor1, left)
예제 #30
0
def get_item_mask(og_interactions: torch.Tensor, masked_interactions: torch.Tensor) -> torch.Tensor:
    return torch.logical_xor(og_interactions, masked_interactions)