def fill_hetero_edges(self, encoded_edges, x, cell_x, start, end,
                          volume_id):
        """
        Fill the heterogeneous edges with the corresponding encoders
        """
        for encoder, combo in zip(self.edge_encoders, self.all_combos):
            vol_ids_0, vol_ids_1 = torch.tensor(
                self.hparams["model_ids"][combo[0]]["volume_ids"],
                device=encoded_edges.device), torch.tensor(
                    self.hparams["model_ids"][combo[1]]["volume_ids"],
                    device=encoded_edges.device)

            vol_edge_mask = torch.isin(volume_id[start],
                                       vol_ids_0) & torch.isin(
                                           volume_id[end], vol_ids_1)

            encoded_edges[vol_edge_mask] = encoder(
                torch.cat([
                    x[start[vol_edge_mask], :self.
                      hparams["model_ids"][combo[0]]["num_features"]],
                    cell_x[start[vol_edge_mask]],
                    x[end[vol_edge_mask], :self.
                      hparams["model_ids"][combo[1]]["num_features"]],
                    cell_x[end[vol_edge_mask]]
                ],
                          dim=-1))

        return encoded_edges
예제 #2
0
    def fill_hetero_edges(self, input_node_features, start, end, volume_id):
        """
        Fill the heterogeneous edges with the corresponding encoders
        """

        features_to_fill = torch.empty(
            (start.shape[0], self.hparams["hidden"])).to(start.device)

        for encoder, combo in zip(self.edge_encoders, self.all_combos):
            vol_ids_0, vol_ids_1 = torch.tensor(
                self.hparams["model_ids"][combo[0]]["volume_ids"],
                device=features_to_fill.device), torch.tensor(
                    self.hparams["model_ids"][combo[1]]["volume_ids"],
                    device=features_to_fill.device)
            vol_edge_mask = torch.isin(volume_id[start],
                                       vol_ids_0) & torch.isin(
                                           volume_id[end], vol_ids_1)

            features_to_encode = torch.cat([
                input_node_features[start[vol_edge_mask]],
                input_node_features[end[vol_edge_mask]]
            ],
                                           dim=-1)

            features_to_fill[vol_edge_mask] = encoder(features_to_encode)

        return features_to_fill
예제 #3
0
    def test_isin_different_devices(self, device, dtype):
        a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])
        b = torch.arange(3, 30, device='cpu', dtype=dtype)
        with self.assertRaises(RuntimeError):
            torch.isin(a, b)

        c = torch.arange(6, device='cpu', dtype=dtype).reshape([2, 3])
        d = torch.arange(3, 30, device=device, dtype=dtype)
        with self.assertRaises(RuntimeError):
            torch.isin(c, d)
예제 #4
0
 def assert_isin_equal(a, b):
     # Compare to the numpy reference implementation.
     x = torch.isin(a, b)
     a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a)
     b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b)
     y = np.isin(a, b)
     self.assertEqual(x, y)
예제 #5
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def comparison_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.greater_equal(a, b),
         torch.gt(a, b),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
예제 #6
0
파일: array.py 프로젝트: Nic-Ma/MONAI
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Filter the image on the `applied_labels`.

        Args:
            img: Pytorch tensor or numpy array of any shape.

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch tensor or numpy array of the same shape as the input.
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(
                f"{self.__class__} can not handle data of type {type(img)}.")

        if isinstance(img, torch.Tensor):
            img = convert_to_tensor(img, track_meta=get_track_meta())
            img_ = convert_to_tensor(img, track_meta=False)
            if hasattr(torch, "isin"):  # `isin` is new in torch 1.10.0
                appl_lbls = torch.as_tensor(self.applied_labels,
                                            device=img_.device)
                out = torch.where(torch.isin(img_, appl_lbls), img_,
                                  torch.tensor(0.0).to(img_))
                return convert_to_dst_type(out, dst=img)[0]
            out: NdarrayOrTensor = self(
                img_.detach().cpu().numpy())  # type: ignore
            out = convert_to_dst_type(out, img)[0]  # type: ignore
            return out
        return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
예제 #7
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Filter the image on the `applied_labels`.

        Args:
            img: Pytorch tensor or numpy array of any shape.

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch tensor or numpy array of the same shape as the input.
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")

        if isinstance(img, torch.Tensor):
            if hasattr(torch, "isin"):
                appl_lbls = torch.as_tensor(self.applied_labels, device=img.device)
                return torch.where(torch.isin(img, appl_lbls), img, 0)
            else:
                out = self(img.detach().cpu().numpy())
                out, *_ = convert_to_dst_type(out, img)
                return out
        return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
def save_scores(detector: NoveltyDetector,
                output_folder,
                patch=False,
                quad_full_image=False):
    (_, valid_loader, test_loader), class_to_idx = polycraft_dataloaders(
        patch=patch,
        include_novel=True,
        ret_class_to_idx=True,
        shuffle=False,
        quad_full_image=quad_full_image)
    normal_targets = torch.Tensor(
        [class_to_idx[c] for c in data_const.NORMAL_CLASSES])
    idx_to_class = {v: k for k, v in class_to_idx.items()}
    for split in ["valid", "test"]:
        loader = valid_loader if split == "valid" else test_loader
        # collect scores, novelty labels with 1 as novel, and targets
        novel_score = torch.Tensor([])
        novel_true = torch.Tensor([])
        targets = torch.Tensor([])
        for data, target in loader:
            novel_score = torch.hstack(
                [novel_score, detector.novelty_score(data).cpu()])
            novel_true = torch.hstack(
                [novel_true, (~torch.isin(target, normal_targets)).long()])
            targets = torch.hstack([targets, target])
        # convert targets to names
        classes = np.array([idx_to_class[target.item()] for target in targets])
        # output data
        folder_path = Path(output_folder)
        folder_path.mkdir(exist_ok=True, parents=True)
        torch.save(novel_score, folder_path / f"{split}_novel_score.pt")
        torch.save(novel_true, folder_path / f"{split}_novel_true.pt")
        np.save(folder_path / f"{split}_classes.npy", classes)
예제 #9
0
파일: utils.py 프로젝트: wxwilcke/mrgcn
def sliceSparseCOO(t, idx):
    row, col = t._indices()[:,
                            torch.where(torch.isin(t._indices()[1], idx))[0]]

    col_index_map = {int(j): i for i, j in enumerate(idx)}
    col = torch.LongTensor([col_index_map[int(i)] for i in col])

    return torch.sparse_coo_tensor(torch.vstack([row, col]),
                                   torch.ones(len(col), dtype=torch.float32),
                                   size=[t.shape[0], len(idx)])
예제 #10
0
 def test_isin_different_dtypes(self, device):
     supported_types = all_types() if device == 'cpu' else all_types_and(torch.half)
     for mult in [1, 10]:
         for assume_unique in [False, True]:
             for dtype1, dtype2 in product(supported_types, supported_types):
                 a = torch.tensor([1, 2, 3], device=device, dtype=dtype1)
                 b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2)
                 ec = torch.tensor([False, False, True], device=device)
                 c = torch.isin(a, b, assume_unique=assume_unique)
                 self.assertEqual(c, ec)
예제 #11
0
 def fill_hetero_nodes(self, input_node_features, volume_id):
     """
     Fill the heterogeneous nodes with the corresponding encoders
     """
     features_to_fill = torch.empty((input_node_features.shape[0], self.hparams["hidden"])).to(input_node_features.device)
     
     for encoder, model in zip(self.node_encoders, self.hparams["model_ids"]):
         node_id_mask = torch.isin(volume_id, torch.tensor(model["volume_ids"]).to(input_node_features.device))
         features_to_fill[node_id_mask] = encoder(input_node_features[node_id_mask])
     
     return features_to_fill
    def fill_hetero_nodes(self, encoded_nodes, x, volume_id):
        """
        Fill the heterogeneous nodes with the corresponding encoders
        """
        for encoder, model in zip(self.node_encoders,
                                  self.hparams["model_ids"]):
            node_id_mask = torch.isin(
                volume_id,
                torch.tensor(model["volume_ids"]).to(x.device))
            encoded_nodes[node_id_mask] = checkpoint(
                encoder, x[node_id_mask, :model["num_features"]])

        return encoded_nodes
예제 #13
0
def get_unique_signal_segments(labels, pids, signal_pids):

    labels_unique, labels_inverse, labels_counts = labels.unique(
        return_counts=True, return_inverse=True)

    segments_pids = torch.stack([labels, pids])
    is_signal = torch.isin(
        pids, pids[signal_pids]) & (labels_counts[labels_inverse] >= 3)

    signal_segments_pids = segments_pids[:, is_signal]
    unique_signal_segments_pids = signal_segments_pids.unique(dim=1)

    return signal_segments_pids, unique_signal_segments_pids
예제 #14
0
def _compute_variance_term_scatter(cluster_means,
                                   embeddings,
                                   target,
                                   norm,
                                   delta_var,
                                   instance_sizes,
                                   ignore_labels=None):
    assert cluster_means.shape[1] == embeddings.shape[1]
    ndim = embeddings.ndim - 2
    assert ndim in (2, 3), f"{ndim}"
    n_instances = cluster_means.shape[0]

    # compute the spatial mean and instance fields by scattering with the target tensor
    cluster_means_spatial = cluster_means[target]
    instance_sizes_spatial = instance_sizes[target]

    # permute the embedding dimension to axis 1
    if ndim == 2:
        cluster_means_spatial = cluster_means_spatial.permute(0, 3, 1, 2)
        dim_arg = (1, 2)
    else:
        cluster_means_spatial = cluster_means_spatial.permute(0, 4, 1, 2, 3)
        dim_arg = (1, 2, 3)
    assert embeddings.shape == cluster_means_spatial.shape

    # compute the variance
    variance = torch.norm(embeddings - cluster_means_spatial, norm, dim=1)

    # apply the ignore labels (if given)
    if ignore_labels is not None:
        assert isinstance(ignore_labels, list)
        # mask out the ignore labels
        mask = torch.ones_like(variance)
        mask[torch.isin(mask, torch.tensor(ignore_labels).to(mask.device))]
        variance *= mask
        # decrease number of instances
        n_instances -= len(ignore_labels)
        # if there are only ignore labels in the target return 0
        if n_instances == 0:
            return 0.0

    # hinge the variance
    variance = torch.clamp(variance - delta_var, min=0)**2
    assert variance.shape == instance_sizes_spatial.shape

    # normalize the variance by instance sizes and number of instances and sum it up
    variance = torch.sum(variance / instance_sizes_spatial,
                         dim=dim_arg) / n_instances
    return variance
예제 #15
0
 def forward(self):
     a = torch.tensor(0)
     b = torch.tensor(1)
     return len(
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.eq(a, 1),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.ge(a, 1),
         torch.greater_equal(a, b),
         torch.greater_equal(a, 1),
         torch.gt(a, b),
         torch.gt(a, 1),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.le(a, 1),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.lt(a, 1),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.ne(a, 1),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
예제 #16
0
def _get_triple_mask(
    ids: Collection[int],
    triples: MappedTriples,
    columns: Union[int, Collection[int]],
    invert: bool = False,
    max_id: Optional[int] = None,
) -> torch.BoolTensor:
    # normalize input
    triples = triples[:, columns]
    if isinstance(columns, int):
        columns = [columns]
    mask = torch.isin(
        elements=triples,
        test_elements=torch.as_tensor(list(ids), dtype=torch.long),
        assume_unique=False,
        invert=invert,
    )
    if len(columns) > 1:
        mask = mask.all(dim=-1)
    return mask
예제 #17
0
    def test_isin(self, device, dtype):
        def assert_isin_equal(a, b):
            # Compare to the numpy reference implementation.
            x = torch.isin(a, b)
            a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a)
            b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b)
            y = np.isin(a, b)
            self.assertEqual(x, y)

        # multi-dim tensor, multi-dim tensor
        a = torch.arange(24, device=device, dtype=dtype).reshape([2, 3, 4])
        b = torch.tensor([[10, 20, 30], [0, 1, 3], [11, 22, 33]], device=device, dtype=dtype)
        assert_isin_equal(a, b)

        # zero-dim tensor
        zero_d = torch.tensor(3, device=device, dtype=dtype)
        assert_isin_equal(zero_d, b)
        assert_isin_equal(a, zero_d)
        assert_isin_equal(zero_d, zero_d)

        # empty tensor
        empty = torch.tensor([], device=device, dtype=dtype)
        assert_isin_equal(empty, b)
        assert_isin_equal(a, empty)
        assert_isin_equal(empty, empty)

        # scalar
        assert_isin_equal(a, 6)
        assert_isin_equal(5, b)

        def define_expected(lst, invert=False):
            expected = torch.tensor(lst, device=device)
            if invert:
                expected = expected.logical_not()
            return expected

        # Adapted from numpy's in1d tests
        for mult in [1, 10]:
            for invert in [False, True]:
                a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)
                b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)
                ec = define_expected([True, False, True, True], invert=invert)
                c = torch.isin(a, b, assume_unique=True, invert=invert)
                self.assertEqual(c, ec)

                a[0] = 8
                ec = define_expected([False, False, True, True], invert=invert)
                c = torch.isin(a, b, assume_unique=True, invert=invert)
                self.assertEqual(c, ec)

                a[0], a[3] = 4, 8
                ec = define_expected([True, False, True, False], invert=invert)
                c = torch.isin(a, b, assume_unique=True, invert=invert)
                self.assertEqual(c, ec)

                a = torch.tensor([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], device=device, dtype=dtype)
                b = torch.tensor([2, 3, 4] * mult, device=device, dtype=dtype)
                ec = define_expected([False, True, False, True, True, True, True, True, True,
                                      False, True, False, False, False], invert=invert)
                c = torch.isin(a, b, invert=invert)
                self.assertEqual(c, ec)

                b = torch.tensor([2, 3, 4] * mult + [5, 5, 4] * mult, device=device, dtype=dtype)
                ec = define_expected([True, True, True, True, True, True, True, True, True, True,
                                      True, False, True, True], invert=invert)
                c = torch.isin(a, b, invert=invert)
                self.assertEqual(c, ec)

                a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)
                b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)
                ec = define_expected([True, False, True, True], invert=invert)
                c = torch.isin(a, b, invert=invert)
                self.assertEqual(c, ec)

                a = torch.tensor([5, 7, 1, 1, 2], device=device, dtype=dtype)
                b = torch.tensor([2, 4, 3, 3, 1, 5] * mult, device=device, dtype=dtype)
                ec = define_expected([True, False, True, True, True], invert=invert)
                c = torch.isin(a, b, invert=invert)
                self.assertEqual(c, ec)

                a = torch.tensor([5, 5], device=device, dtype=dtype)
                b = torch.tensor([2, 2] * mult, device=device, dtype=dtype)
                ec = define_expected([False, False], invert=invert)
                c = torch.isin(a, b, invert=invert)
                self.assertEqual(c, ec)

                # multi-dimensional input case using sort-based algo
                for assume_unique in [False, True]:
                    a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])
                    b = torch.arange(3, 30, device=device, dtype=dtype)
                    ec = define_expected([[False, False, False], [True, True, True]], invert=invert)
                    c = torch.isin(a, b, invert=invert, assume_unique=assume_unique)
                    self.assertEqual(c, ec)