Exemple #1
0
 def test_factory_type_inference(self):
     t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=torch.float32))
     self.assertEqual(torch.float32, t.dtype)
     t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=torch.float64))
     self.assertEqual(torch.float64, t.dtype)
     t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1]))
     self.assertEqual(torch.int64, t.dtype)
Exemple #2
0
    def test_factory_copy(self):
        # both correct
        indices = torch.tensor(([0], [2]), dtype=torch.int64)
        values = torch.tensor([1.], dtype=torch.float64)
        sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64)
        self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
        self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())

        # only indices correct
        indices = torch.tensor(([0], [2]), dtype=torch.int64)
        values = torch.tensor([1.], dtype=torch.float32)
        sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64)
        self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
        self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())

        # only values correct
        indices = torch.tensor(([0], [2]), dtype=torch.int32)
        values = torch.tensor([1.], dtype=torch.float64)
        sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64)
        self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
        self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())

        # neither correct
        indices = torch.tensor(([0], [2]), dtype=torch.int32)
        values = torch.tensor([1.], dtype=torch.float32)
        sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64)
        self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
        self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
Exemple #3
0
 def test_factory(self):
     default_size = torch.Size([1, 3])
     size = torch.Size([3, 3])
     for include_size in [True, False]:
         for use_tensor_idx in [True, False]:
             for use_tensor_val in [True, False]:
                 for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
                     # have to include size with cuda sparse tensors
                     include_size = include_size or use_cuda
                     dtype = torch.float64
                     long_dtype = torch.int64
                     device = torch.device('cpu') if not use_cuda else torch.device(torch.cuda.device_count() - 1)
                     indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
                     values = torch.tensor([1.], dtype=dtype) if use_tensor_val else 1.
                     if include_size:
                         sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
                                                                 device=device, requires_grad=True)
                     else:
                         sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
                                                                 device=device, requires_grad=True)
                     self.assertEqual(indices, sparse_tensor._indices())
                     self.assertEqual(values, sparse_tensor._values())
                     self.assertEqual(size if include_size else default_size, sparse_tensor.size())
                     self.assertEqual(dtype, sparse_tensor.dtype)
                     if use_cuda:
                         self.assertEqual(device, sparse_tensor._values().device)
                     self.assertEqual(True, sparse_tensor.requires_grad)
Exemple #4
0
 def test_factory_device_type_inference(self):
     # both indices/values are CUDA
     shape = (1, 3)
     for indices_device in ['cuda', 'cpu']:
         for values_device in ['cuda', 'cpu']:
             for sparse_device in ['cuda', 'cpu', None]:
                 t = torch.sparse_coo_tensor(torch.tensor(([0], [2]), device=indices_device),
                                             torch.tensor([1.], device=values_device),
                                             (1, 3), device=sparse_device)
                 should_be_cuda = sparse_device == 'cuda' or (sparse_device is None and values_device == 'cuda')
                 self.assertEqual(should_be_cuda, t.is_cuda)
Exemple #5
0
def _default_collate(batch):
    r"""Puts each data field into a tensor with outer dimension batch size"""
    error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
    elem_type = type(batch[0])
    if isinstance(batch[0], torch.Tensor):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
            and elem_type.__name__ != 'string_':
        elem = batch[0]
        if elem_type.__name__ == 'ndarray':
            # array of string classes and object
            if re.search('[SaUO]', elem.dtype.str) is not None:
                raise TypeError(error_msg.format(elem.dtype))

            return torch.stack([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int_classes):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        return {key: _default_collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [_default_collate(samples) for samples in transposed]
    elif 'scipy.sparse' in str(elem_type):
        data = sp.vstack(batch)
        i = torch.LongTensor(data.nonzero())
        v = torch.Tensor(data.data)
        shape = (len(batch), batch[0].shape[1])
        output = torch.sparse_coo_tensor(i, v, shape)
        return output

    raise TypeError((error_msg.format(type(batch[0]))))
Exemple #6
0
def block_diag_sparse(a: T.Tensor, dense=False):
    """
    Creates a sparse block diagonal matrix from the provided array.
    Given the input tensor of size ``(n, r, c)``, the output will have
    the matrices of the last two indices arranged on the diagonal::

        [[a[0], 0, 0],
         [0, a[1], 0],
         [0, 0, a[2]]]

    :param a:
        a tensor of size ``(n, r, c)``.
    :param dense:
        whether to return a dense matrix.
        Default: ``False``.
    :return:
        a tensor with `a[0]`, `a[1]`, `a[2]`, ... on the diagonal.
        Has the same dtype as `a`.

    Notes
    -----
    This function is for square matrices only. For general cases,
    use :func:`~neuralnet_pytorch.utils.block_diag`.

    See Also
    --------
    :func:`~neuralnet_pytorch.utils.block_diag`

    Examples
    --------

    >>> from neuralnet_pytorch.utils import block_diag_sparse
    >>> import numpy as np
    >>> a = T.arange(3 * 2 * 4).view(3, 2, 4)
    >>> block_diag_sparse(a)
    tensor(indices=tensor([[ 0,  0,  0,  0,  1,  1,  1,  1,  2,  2,  2,  2,  3,  3,
                             3,  3,  4,  4,  4,  4,  5,  5,  5,  5],
                           [ 0,  1,  2,  3,  0,  1,  2,  3,  4,  5,  6,  7,  4,  5,
                             6,  7,  8,  9, 10, 11,  8,  9, 10, 11]]),
           values=tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,
                          14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
           size=(6, 12), nnz=24, layout=torch.sparse_coo)
    >>> block_diag_sparse(a, dense=True)
    tensor([[ 0,  1,  2,  3,  0,  0,  0,  0,  0,  0,  0,  0],
            [ 4,  5,  6,  7,  0,  0,  0,  0,  0,  0,  0,  0],
            [ 0,  0,  0,  0,  8,  9, 10, 11,  0,  0,  0,  0],
            [ 0,  0,  0,  0, 12, 13, 14, 15,  0,  0,  0,  0],
            [ 0,  0,  0,  0,  0,  0,  0,  0, 16, 17, 18, 19],
            [ 0,  0,  0,  0,  0,  0,  0,  0, 20, 21, 22, 23]])
    """
    assert len(a.shape) == 3, \
        'Input tensor must have 3 dimensions with the last two being matrices, got {}'.format(len(a.shape))

    n, r, c = a.shape
    y = T.arange(r)
    x = T.arange(c)
    yy, xx = T.meshgrid(y, x)

    xxs = T.stack([xx] * n)
    yys = T.stack([yy] * n)
    transl_x = T.arange(n) * c
    transl_y = T.arange(n) * r
    xxs_transl = xxs + transl_x[..., None, None]
    yys_transl = yys + transl_y[..., None, None]

    x_flat = xxs_transl.flatten()
    y_flat = yys_transl.flatten()
    indices = T.stack((y_flat, x_flat))

    a_sp = T.sparse_coo_tensor(indices.long(), a.flatten(),
                               size=T.Size((n * r, n * c)), dtype=a.dtype)
    return a_sp.to_dense() if dense else a_sp
Exemple #7
0
 def test_is_nonzero(self):
     self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,)).is_nonzero())
     self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,)).is_nonzero())
     self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1)).is_nonzero())
     self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,)).is_nonzero())
     self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,)).is_nonzero())
Exemple #8
0
 def forward(ctx, indices, values, shape, b):
     assert indices.requires_grad == False
     a = torch.sparse_coo_tensor(indices, values, shape)
     ctx.save_for_backward(a, b)
     ctx.N = shape[0]
     return torch.matmul(a, b)
def check_for_deadlock():
    indices = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
    values = torch.FloatTensor([3, 4, 5])
    tensor = torch.sparse_coo_tensor(indices, values, torch.Size([2, 4]))
    return tensor
Exemple #10
0
    def loss(self,
             cls_scores,
             bbox_preds,
             gt_bboxes,
             gt_labels,
             img_metas,
             cfg,
             gt_bboxes_ignore=None):
        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
        assert len(featmap_sizes) == len(self.anchor_generators)

        anchor_list, _ = self.get_anchors(featmap_sizes, img_metas)
        anchors = [torch.cat(anchor) for anchor in anchor_list]

        # concatenate each level
        cls_scores = [
            cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1,
                                            self.cls_out_channels)
            for cls in cls_scores
        ]
        bbox_preds = [
            bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)
            for bbox_pred in bbox_preds
        ]
        cls_scores = torch.cat(cls_scores, dim=1)
        bbox_preds = torch.cat(bbox_preds, dim=1)

        cls_prob = torch.sigmoid(cls_scores)
        box_prob = []
        num_pos = 0
        positive_losses = []
        for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,
                bbox_preds_) in enumerate(
                    zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):
            gt_labels_ -= 1

            with torch.no_grad():
                # box_localization: a_{j}^{loc}, shape: [j, 4]
                pred_boxes = delta2bbox(anchors_, bbox_preds_,
                                        self.target_means, self.target_stds)

                # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
                object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)

                # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
                t1 = self.bbox_thr
                t2 = object_box_iou.max(
                    dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
                object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp(
                    min=0, max=1)

                # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
                num_obj = gt_labels_.size(0)
                indices = torch.stack(
                    [torch.arange(num_obj).type_as(gt_labels_), gt_labels_],
                    dim=0)
                object_cls_box_prob = torch.sparse_coo_tensor(
                    indices, object_box_prob)

                # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
                """
                from "start" to "end" implement:
                image_box_iou = torch.sparse.max(object_cls_box_prob,
                                                 dim=0).t()

                """
                # start
                box_cls_prob = torch.sparse.sum(object_cls_box_prob,
                                                dim=0).to_dense()

                indices = torch.nonzero(box_cls_prob).t_()
                if indices.numel() == 0:
                    image_box_prob = torch.zeros(
                        anchors_.size(0),
                        self.cls_out_channels).type_as(object_box_prob)
                else:
                    nonzero_box_prob = torch.where(
                        (gt_labels_.unsqueeze(dim=-1) == indices[0]),
                        object_box_prob[:, indices[1]],
                        torch.tensor(
                            [0]).type_as(object_box_prob)).max(dim=0).values

                    # upmap to shape [j, c]
                    image_box_prob = torch.sparse_coo_tensor(
                        indices.flip([0]),
                        nonzero_box_prob,
                        size=(anchors_.size(0),
                              self.cls_out_channels)).to_dense()
                # end

                box_prob.append(image_box_prob)

            # construct bags for objects
            match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)
            _, matched = torch.topk(match_quality_matrix,
                                    self.pre_anchor_topk,
                                    dim=1,
                                    sorted=False)
            del match_quality_matrix

            # matched_cls_prob: P_{ij}^{cls}
            matched_cls_prob = torch.gather(
                cls_prob_[matched], 2,
                gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
                                                 1)).squeeze(2)

            # matched_box_prob: P_{ij}^{loc}
            matched_anchors = anchors_[matched]
            matched_object_targets = bbox2delta(
                matched_anchors,
                gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors),
                self.target_means, self.target_stds)
            loss_bbox = self.loss_bbox(bbox_preds_[matched],
                                       matched_object_targets,
                                       reduction_override='none').sum(-1)
            matched_box_prob = torch.exp(-loss_bbox)

            # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
            num_pos += len(gt_bboxes_)
            positive_losses.append(
                self.positive_bag_loss(matched_cls_prob, matched_box_prob))
        positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)

        # box_prob: P{a_{j} \in A_{+}}
        box_prob = torch.stack(box_prob, dim=0)

        # negative_loss:
        # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
        negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
            1, num_pos * self.pre_anchor_topk)

        losses = {
            'positive_bag_loss': positive_loss,
            'negative_bag_loss': negative_loss
        }
        return losses
    def test_cuda_array_interface(self):
        """torch.Tensor exposes __cuda_array_interface__ for cuda tensors.

        An object t is considered a cuda-tensor if:
            hasattr(t, '__cuda_array_interface__')

        A cuda-tensor provides a tensor description dict:
            shape: (integer, ...) Tensor shape.
            strides: (integer, ...) Tensor strides, in bytes.
            typestr: (str) A numpy-style typestr.
            data: (int, boolean) A (data_ptr, read-only) tuple.
            version: (int) Version 0

        See:
        https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
        """

        types = [
            torch.DoubleTensor,
            torch.FloatTensor,
            torch.HalfTensor,
            torch.LongTensor,
            torch.IntTensor,
            torch.ShortTensor,
            torch.CharTensor,
            torch.ByteTensor,
        ]
        dtypes = [
            numpy.float64,
            numpy.float32,
            numpy.float16,
            numpy.int64,
            numpy.int32,
            numpy.int16,
            numpy.int8,
            numpy.uint8,
        ]
        for tp, npt in zip(types, dtypes):

            # CPU tensors do not implement the interface.
            cput = tp(10)

            self.assertFalse(hasattr(cput, "__cuda_array_interface__"))
            self.assertRaises(AttributeError, lambda: cput.__cuda_array_interface__)

            # Sparse CPU/CUDA tensors do not implement the interface
            if tp not in (torch.HalfTensor,):
                indices_t = torch.empty(1, cput.size(0), dtype=torch.long).clamp_(min=0)
                sparse_t = torch.sparse_coo_tensor(indices_t, cput)

                self.assertFalse(hasattr(sparse_t, "__cuda_array_interface__"))
                self.assertRaises(
                    AttributeError, lambda: sparse_t.__cuda_array_interface__
                )

                sparse_cuda_t = torch.sparse_coo_tensor(indices_t, cput).cuda()

                self.assertFalse(hasattr(sparse_cuda_t, "__cuda_array_interface__"))
                self.assertRaises(
                    AttributeError, lambda: sparse_cuda_t.__cuda_array_interface__
                )

            # CUDA tensors have the attribute and v0 interface
            cudat = tp(10).cuda()

            self.assertTrue(hasattr(cudat, "__cuda_array_interface__"))

            ar_dict = cudat.__cuda_array_interface__

            self.assertEqual(
                set(ar_dict.keys()), {"shape", "strides", "typestr", "data", "version"}
            )

            self.assertEqual(ar_dict["shape"], (10,))
            self.assertEqual(ar_dict["strides"], (cudat.storage().element_size(),))
            # typestr from numpy, cuda-native little-endian
            self.assertEqual(ar_dict["typestr"], numpy.dtype(npt).newbyteorder("<").str)
            self.assertEqual(ar_dict["data"], (cudat.data_ptr(), False))
            self.assertEqual(ar_dict["version"], 0)
Exemple #12
0
def _make_sparse(grad, grad_indices, values):
    size = grad.size()
    if grad_indices.numel() == 0 or values.numel() == 0:
        return torch.empty_like(grad)
    return torch.sparse_coo_tensor(grad_indices, values, size)
Exemple #13
0
def engine(
        config_id='naive3',
        device_id=3,
        model_idx=4,
        scaffolds_file='data-center/test.smi',
        batch_size=500,
        np=mp.cpu_count(),
):
    device = torch.device(f'cuda:{device_id}')
    model_ckpt = path.join(path.dirname(__file__), 'ckpt', config_id,
                           f'model_{model_idx}_ckpt.ckpt')
    model_dic_loc = path.join(path.dirname(__file__), 'ckpt', config_id,
                              'modle_dic.json')
    if path.exists(model_dic_loc):
        with open(model_dic_loc) as f:
            model_dic = json.load(f)
    else:
        model_dic = dict(
            num_in_feat=43,
            num_c_feat=8,
            num_embeddings=8,
            casual_hidden_sizes=[16, 32],
            num_botnec_feat=72,  # 16 x 4
            num_k_feat=24,  # 16
            num_dense_layers=20,
            num_out_feat=268,
            num_z_feat=10,
            activation='elu',
            use_cuda=True)
    # print(model_ckpt)
    model = GraphInf(**model_dic)
    model.load_state_dict(torch.load(model_ckpt))
    print(device_id)
    model.to(device)
    model.eval()

    dataloader = ComLoader(original_scaffolds_file=scaffolds_file,
                           batch_size=batch_size,
                           num_workers=1)

    all_num_valid = 0
    all_num_recon = 0
    events_loc = f'eval_configs/{config_id}/'
    if not path.exists(events_loc):
        makedirs(events_loc)

    with SummaryWriter(events_loc) as writer:
        step = 0
        with open(f'eval_configs/{config_id}_records.txt', 'w') as f:
            for batch in ipb(dataloader,
                             desc="step",
                             total=dataloader.num_id_block):
                (block, nums_nodes, nums_edges, seg_ids, bond_info_all,
                 nodes_o, nodes_c) = batch
                num_N = sum(nums_nodes)
                num_E = sum(nums_edges)

                values = torch.ones(num_E)

                s_adj = torch.sparse_coo_tensor(bond_info_all.T, values,
                                                torch.Size([num_N,
                                                            num_N])).to(device)

                s_nfeat = torch.from_numpy(nodes_o).to(device)
                c_nfeat = torch.from_numpy(nodes_c).to(device)

                x_inf, mu2, var2 = model.inf(c_nfeat, s_adj)
                x_inf, mu2, var2 = (x_inf.cpu().detach(), mu2.cpu().detach(),
                                    var2.cpu().detach())
                x_recon, mu1, var1 = model.reconstrcut(s_nfeat, c_nfeat, s_adj)
                x_recon, mu1, var1 = (x_recon.cpu().detach(),
                                      mu1.cpu().detach(), var1.cpu().detach())

                seg_ids = torch.from_numpy(seg_ids)

                MSE, KL = loss_func(x_recon, s_nfeat, mu1, var1, mu2, var2,
                                    seg_ids)
                loss = MSE + KL
                writer.add_scalar(f'loss', loss.cpu().item(), step)
                writer.add_scalar(f'recon_loss', MSE.cpu().item(), step)
                writer.add_scalar(f'KL', KL.cpu().item(), step)

                ls_x_inf = torch.split(x_inf, nums_nodes)
                ls_x_recon = torch.split(x_recon, nums_nodes)
                ls_mols_inf = Parallel(n_jobs=np, backend='multiprocessing')(
                    delayed(get_mol_from_array)(ls_x_inf[i], block[i], True,
                                                False)
                    for i in range(len(block)))
                ls_mols_recon = Parallel(n_jobs=np, backend='multiprocessing')(
                    delayed(get_mol_from_array)(ls_x_recon[i], block[i], True,
                                                True)
                    for i in range(len(block)))
                num_valid = sum(x is not None for x in ls_mols_inf)
                num_recon = sum(ls_mols_recon[i] == block[i]
                                for i in range(len(block)))
                all_num_valid += num_valid
                all_num_recon += num_recon
                f.write(
                    str(num_valid) + '\t' + str(num_recon) + '\t' +
                    str(len(ls_mols_inf)) + '\n')
                f.flush()
                step += 1

    with open(f'eval_configs/{config_id}.txt', 'w') as f:
        f.write(str(all_num_valid) + '\t')
        f.write(str(all_num_recon))
Exemple #14
0
def sparse_getitem(sparse, idxs):
    """
    """
    if not isinstance(idxs, tuple):
        idxs = (idxs, )

    if not sparse.ndimension() <= 2:
        raise RuntimeError("Must be a 1d or 2d sparse tensor")

    if len(idxs) > sparse.ndimension():
        raise RuntimeError("Invalid index for %d-order tensor" %
                           sparse.ndimension())

    indices = sparse._indices()
    values = sparse._values()
    size = list(sparse.size())

    for i, idx in list(enumerate(idxs))[::-1]:
        if isinstance(idx, int):
            del size[i]
            mask = indices[i].eq(idx)
            if sum(mask):
                new_indices = torch.zeros(indices.size(0) - 1,
                                          sum(mask),
                                          dtype=indices.dtype,
                                          device=indices.device)
                for j in range(indices.size(0)):
                    if i > j:
                        new_indices[j].copy_(indices[j][mask])
                    elif i < j:
                        new_indices[j - 1].copy_(indices[j][mask])
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0) - 1, 1).zero_()
                values.resize_(1).zero_()

            if not len(size):
                return sum(values)

        elif isinstance(idx, slice):
            start, stop, step = idx.indices(size[i])
            size = list(size[:i]) + [stop - start] + list(size[i + 1:])
            if step != 1:
                raise RuntimeError("Slicing with step is not supported")
            mask = indices[i].lt(stop) * indices[i].ge(start)
            if sum(mask):
                new_indices = torch.zeros(indices.size(0),
                                          sum(mask),
                                          dtype=indices.dtype,
                                          device=indices.device)
                for j in range(indices.size(0)):
                    new_indices[j].copy_(indices[j][mask])
                new_indices[i].sub_(start)
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0), 1).zero_()
                values.resize_(1).zero_()

        else:
            raise RuntimeError("Unknown index type")

    return torch.sparse_coo_tensor(indices,
                                   values,
                                   torch.Size(size),
                                   dtype=values.dtype,
                                   device=values.device)
Exemple #15
0
def oned_partition(rank, size, inputs, adj_matrix, data, features, classes,
                   device):
    node_count = inputs.size(0)
    n_per_proc = math.ceil(float(node_count) / size)

    am_partitions = None
    am_pbyp = None

    inputs = inputs.to(torch.device("cpu"))
    adj_matrix = adj_matrix.to(torch.device("cpu"))

    # Compute the adj_matrix and inputs partitions for this process
    # TODO: Maybe I do want grad here. Unsure.
    with torch.no_grad():
        # Column partitions
        am_partitions, vtx_indices = split_coo(adj_matrix, node_count,
                                               n_per_proc, 1)

        proc_node_count = vtx_indices[rank + 1] - vtx_indices[rank]
        am_pbyp, _ = split_coo(am_partitions[rank], node_count, n_per_proc, 0)
        for i in range(len(am_pbyp)):
            if i == size - 1:
                last_node_count = vtx_indices[i + 1] - vtx_indices[i]
                am_pbyp[i] = torch.sparse_coo_tensor(
                    am_pbyp[i],
                    torch.ones(am_pbyp[i].size(1)),
                    size=(last_node_count, proc_node_count),
                    requires_grad=False)

                am_pbyp[i] = scale_elements(adj_matrix, am_pbyp[i], node_count,
                                            vtx_indices[i], vtx_indices[rank])
            else:
                am_pbyp[i] = torch.sparse_coo_tensor(
                    am_pbyp[i],
                    torch.ones(am_pbyp[i].size(1)),
                    size=(n_per_proc, proc_node_count),
                    requires_grad=False)

                am_pbyp[i] = scale_elements(adj_matrix, am_pbyp[i], node_count,
                                            vtx_indices[i], vtx_indices[rank])

        for i in range(len(am_partitions)):
            proc_node_count = vtx_indices[i + 1] - vtx_indices[i]
            am_partitions[i] = torch.sparse_coo_tensor(
                am_partitions[i],
                torch.ones(am_partitions[i].size(1)),
                size=(node_count, proc_node_count),
                requires_grad=False)
            am_partitions[i] = scale_elements(adj_matrix, am_partitions[i],
                                              node_count, 0, vtx_indices[i])

        input_partitions = torch.split(inputs,
                                       math.ceil(float(inputs.size(0)) / size),
                                       dim=0)

        adj_matrix_loc = am_partitions[rank]
        inputs_loc = input_partitions[rank]

    print(f"rank: {rank} adj_matrix_loc.size: {adj_matrix_loc.size()}",
          flush=True)
    print(f"rank: {rank} inputs.size: {inputs.size()}", flush=True)
    return inputs_loc, adj_matrix_loc, am_pbyp
Exemple #16
0
def scale_elements(adj_matrix, adj_part, node_count, row_vtx, col_vtx):
    if not normalization:
        return adj_part

    # Scale each edge (u, v) by 1 / (sqrt(u) * sqrt(v))
    # indices = adj_part._indices()
    # values = adj_part._values()

    # deg_map = dict()
    # for i in range(adj_part._nnz()):
    #     u = indices[0][i] + row_vtx
    #     v = indices[1][i] + col_vtx

    #     if u.item() in deg_map:
    #         degu = deg_map[u.item()]
    #     else:
    #         degu = (adj_matrix[0] == u).sum().item()
    #         deg_map[u.item()] = degu

    #     if v.item() in deg_map:
    #         degv = deg_map[v.item()]
    #     else:
    #         degv = (adj_matrix[0] == v).sum().item()
    #         deg_map[v.item()] = degv

    #     values[i] = values[i] / (math.sqrt(degu) * math.sqrt(degv))

    adj_part = adj_part.coalesce()
    deg = torch.histc(adj_matrix[0].double(), bins=node_count)
    deg = deg.pow(-0.5)

    row_len = adj_part.size(0)
    col_len = adj_part.size(1)

    dleft = torch.sparse_coo_tensor(
        [np.arange(0, row_len).tolist(),
         np.arange(0, row_len).tolist()],
        deg[row_vtx:(row_vtx + row_len)].float(),
        size=(row_len, row_len),
        requires_grad=False,
        device=torch.device("cpu"))

    dright = torch.sparse_coo_tensor(
        [np.arange(0, col_len).tolist(),
         np.arange(0, col_len).tolist()],
        deg[col_vtx:(col_vtx + col_len)].float(),
        size=(col_len, col_len),
        requires_grad=False,
        device=torch.device("cpu"))
    # adj_part = torch.sparse.mm(torch.sparse.mm(dleft, adj_part), dright)
    ad_ind, ad_val = torch_sparse.spspmm(adj_part._indices(),
                                         adj_part._values(), dright._indices(),
                                         dright._values(), adj_part.size(0),
                                         adj_part.size(1), dright.size(1))

    adj_part_ind, adj_part_val = torch_sparse.spspmm(dleft._indices(),
                                                     dleft._values(), ad_ind,
                                                     ad_val, dleft.size(0),
                                                     dleft.size(1),
                                                     adj_part.size(1))

    adj_part = torch.sparse_coo_tensor(adj_part_ind,
                                       adj_part_val,
                                       size=(adj_part.size(0),
                                             adj_part.size(1)),
                                       requires_grad=False,
                                       device=torch.device("cpu"))

    return adj_part
 def foo():
     return torch.sparse_coo_tensor((2, 2), dtype=torch.double)
Exemple #18
0
# torch.device   识别存储设备,CPU  GPU CUDA的名称
# 三种属性

# tensor的属性---稀疏张量
#
# torch.sparse_coo_tensor()
# coo类型非0元素的坐标形式

# indices = torch.tensor([[0, 1, 1], [2, 0, 2]])
# values = torch.tensor([3, 4, 5], dtype=torch.float32)
# x = torch.sparse_coo_tensor(i, v, [2,4])

dev = torch.device('cpu')
# dev = torch.device('cuda')
a = torch.tensor([2, 2], device=dev, dtype=torch.float32)
print(a)


# 稀疏的张量必须指定
i = torch.tensor([[0, 1, 2], [0, 1, 2]])  # 坐标
v = torch.tensor([1, 2, 3])  # 数值
b = torch.sparse_coo_tensor(i, v, (4, 4))
print(b)



i = torch.tensor([[0, 1, 2], [0, 1, 2]])  # 坐标
v = torch.tensor([1, 2, 3])  # 数值
c = torch.sparse_coo_tensor(i, v, (4, 4), dtype=torch.float32, device=dev).to_dense()  # 将稀疏转为稠密
print(c)
 def forward(ctx, indices, values, shape, b):
     a = torch.sparse_coo_tensor(indices, values, shape)
     ctx.rowsize = shape[0]
     ctx.save_for_backward(a, b)
     return torch.mm(a.cpu(), b).cuda()
def pca_lowrank(A, q=None, center=True, niter=2):
    # type: (Tensor, Optional[int], bool, int) -> Tuple[Tensor, Tensor, Tensor]
    r"""Performs linear Principal Component Analysis (PCA) on a low-rank
    matrix, batches of such matrices, or sparse matrix.

    This function returns a namedtuple ``(U, S, V)`` which is the
    nearly optimal approximation of a singular value decomposition of
    a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.

    .. note:: The relation of ``(U, S, V)`` to PCA is as follows:

                - :math:`A` is a data matrix with ``m`` samples and
                  ``n`` features

                - the :math:`V` columns represent the principal directions

                - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
                  :math:`A^T A / (m - 1)` which is the covariance of
                  ``A`` when ``center=True`` is provided.

                - ``matmul(A, V[:, :k])`` projects data to the first k
                  principal components

    .. note:: Different from the standard SVD, the size of returned
              matrices depend on the specified rank and q
              values as follows:

                - :math:`U` is m x q matrix

                - :math:`S` is q-vector

                - :math:`V` is n x q matrix

    .. note:: To obtain repeatable results, reset the seed for the
              pseudorandom number generator

    Args:

        A (Tensor): the input tensor of size :math:`(*, m, n)`

        q (int, optional): a slightly overestimated rank of
                           :math:`A`. By default, ``q = min(6, m,
                           n)``.

        center (bool, optional): if True, center the input tensor,
                                 otherwise, assume that the input is
                                 centered.

        niter (int, optional): the number of subspace iterations to
                               conduct; niter must be a nonnegative
                               integer, and defaults to 2.

    References::

        - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
          structure with randomness: probabilistic algorithms for
          constructing approximate matrix decompositions,
          arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
          `arXiv <http://arxiv.org/abs/0909.4061>`_).

    """

    if not torch.jit.is_scripting():
        if type(A) is not torch.Tensor and has_torch_function((A, )):
            return handle_torch_function(pca_lowrank, (A, ),
                                         A,
                                         q=q,
                                         center=center,
                                         niter=niter)

    (m, n) = A.shape[-2:]

    if q is None:
        q = min(6, m, n)
    elif not (q >= 0 and q <= min(m, n)):
        raise ValueError('q(={}) must be non-negative integer'
                         ' and not greater than min(m, n)={}'.format(
                             q, min(m, n)))
    if not (niter >= 0):
        raise ValueError(
            'niter(={}) must be non-negative integer'.format(niter))

    dtype = _utils.get_floating_dtype(A)

    if not center:
        return _svd_lowrank(A, q, niter=niter, M=None)

    if _utils.is_sparse(A):
        if len(A.shape) != 2:
            raise ValueError(
                'pca_lowrank input is expected to be 2-dimensional tensor')
        c = torch.sparse.sum(A, dim=(-2, )) / m
        # reshape c
        column_indices = c.indices()[0]
        indices = torch.zeros(2,
                              len(column_indices),
                              dtype=column_indices.dtype,
                              device=column_indices.device)
        indices[0] = column_indices
        C_t = torch.sparse_coo_tensor(indices,
                                      c.values(), (n, 1),
                                      dtype=dtype,
                                      device=A.device)

        ones_m1_t = torch.ones(A.shape[:-2] + (1, m),
                               dtype=dtype,
                               device=A.device)
        M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
        return _svd_lowrank(A, q, niter=niter, M=M)
    else:
        C = A.mean(dim=(-2, ), keepdim=True)
        return _svd_lowrank(A - C, q, niter=niter, M=None)
Exemple #21
0
 def _deserialize_torch_tensor(data):
     if isinstance(data, tuple):
         return torch.sparse_coo_tensor(data[0], data[1], data[2])
     else:
         return torch.from_numpy(data)
    def test_conversion_errors(self):
        """Numba properly detects array interface for tensor.Tensor variants."""

        # CPU tensors are not cuda arrays.
        cput = torch.arange(100)

        self.assertFalse(numba.cuda.is_cuda_array(cput))
        with self.assertRaises(TypeError):
            numba.cuda.as_cuda_array(cput)

        # Sparse tensors are not cuda arrays, regardless of device.
        sparset = torch.sparse_coo_tensor(cput[None, :], cput)

        self.assertFalse(numba.cuda.is_cuda_array(sparset))
        with self.assertRaises(TypeError):
            numba.cuda.as_cuda_array(sparset)

        sparse_cuda_t = sparset.cuda()

        self.assertFalse(numba.cuda.is_cuda_array(sparset))
        with self.assertRaises(TypeError):
            numba.cuda.as_cuda_array(sparset)

        # Device-status overrides gradient status.
        # CPU+gradient isn't a cuda array.
        cpu_gradt = torch.zeros(100).requires_grad_(True)

        self.assertFalse(numba.cuda.is_cuda_array(cpu_gradt))
        with self.assertRaises(TypeError):
            numba.cuda.as_cuda_array(cpu_gradt)

        # CUDA+gradient raises a RuntimeError on check or conversion.
        #
        # Use of hasattr for interface detection causes interface change in
        # python2; it swallows all exceptions not just AttributeError.
        cuda_gradt = torch.zeros(100).requires_grad_(True).cuda()

        if sys.version_info.major > 2:
            # 3+, conversion raises RuntimeError
            with self.assertRaises(RuntimeError):
                numba.cuda.is_cuda_array(cuda_gradt)
            with self.assertRaises(RuntimeError):
                numba.cuda.as_cuda_array(cuda_gradt)
        else:
            # 2, allow either RuntimeError on access or non-implementing
            # behavior to future-proof against potential changes in numba.
            try:
                was_cuda_array = numba.cuda.is_cuda_array(cuda_gradt)
                was_runtime_error = False
            except RuntimeError:
                was_cuda_array = False
                was_runtime_error = True

            self.assertFalse(was_cuda_array)

            if not was_runtime_error:
                with self.assertRaises(TypeError):
                    numba.cuda.as_cuda_array(cuda_gradt)
            else:
                with self.assertRaises(RuntimeError):
                    numba.cuda.as_cuda_array(cuda_gradt)
Exemple #23
0
def test_permute_structure(pdb_id, offset):
    # Reference structure (first chain from provided PDB)
    structure = kmbio.PDB.load(f"rcsb://{pdb_id}.cif")
    while len(list(structure.chains)) > 1:
        structure[0]._children.popitem()
    df, pairs, distances = get_distances(structure)
    num_atoms = len(df)
    num_offset_atoms = len(df[df["residue_idx"] < offset])

    # Permutted structure
    structure_p = permute_structure(structure, offset)
    _, pairs_p, distances_p = get_distances(structure_p)

    # === Test that `permute_structure` works ===

    # Permutted adjacency from reference structure
    pairs_p_ref = permute_adjacency_dense(pairs, num_atoms, num_offset_atoms)
    pairs_p_ref.sort(axis=1)
    pairs_distances_p_ref = np.c_[pairs_p_ref, distances]
    pairs_distances_p_ref.sort(axis=0)

    # Permutted adjacency from permutted structure
    pairs_distances_p = np.c_[pairs_p, distances_p]
    pairs_distances_p.sort(axis=0)

    assert (pairs_distances_p_ref == pairs_distances_p).all()

    # === Test that `permute_sequence` works ===

    # Test permute_sequence
    seq = structure_tools.get_chain_sequence(next(structure.chains))
    seq_array = seq_to_array(seq.encode("ascii"))
    seq_array_p = torch.sparse_coo_tensor(*permute_sequence(
        seq_array._indices(), seq_array._values(), offset),
                                          size=seq_array.size())
    seq_permutted = array_to_seq(seq_array_p.to_dense())
    seq_permutted_ = structure_tools.get_chain_sequence(
        next(structure_p.chains))
    assert seq_permutted == seq_permutted_

    # === Test that `permute_adjacency` works ===

    # Permutted adjacency from permutted structure
    pairs_p = np.r_[pairs_p, pairs_p[:, ::-1]]
    adj_permutted_ = sparse.coo_matrix(
        (np.ones(pairs_p.shape[0]), (pairs_p[:, 0], pairs_p[:, 1])))
    assert np.allclose(adj_permutted_.todense(), adj_permutted_.todense().T)
    assert adj_permutted_.max() == 1

    # Permutted adjacency from reference structure
    pairs_sym = np.r_[pairs, pairs[:, ::-1]]
    adj = sparse.coo_matrix(
        (np.ones(pairs_sym.shape[0]), (pairs_sym[:, 0], pairs_sym[:, 1])))
    adj_permutted = permute_adjacency(adj, num_offset_atoms)
    assert np.allclose(adj_permutted.todense(), adj_permutted.todense().T)
    assert adj_permutted.max() == 1
    assert (adj_permutted_.todense() == adj_permutted.todense()).all()

    # Permutted adjacency from reference structure (dense)
    pairs_sym_p_ref = permute_adjacency_dense(pairs_sym, num_atoms,
                                              num_offset_atoms)
    adj_permutted_2 = sparse.coo_matrix(
        (np.ones(pairs_sym_p_ref.shape[0]), (pairs_sym_p_ref[:, 0],
                                             pairs_sym_p_ref[:, 1])))
    assert np.allclose(adj_permutted_2.todense(), adj_permutted_2.todense().T)
    assert adj_permutted_2.max() == 1
    assert (adj_permutted_.todense() == adj_permutted_2.todense()).all()
Exemple #24
0
 def forward(self,
             coo,
             numbers,
             grad=False,
             normalize=None,
             sparse_tensor=True):
     species = torch.unique(numbers, sorted=True)
     dim0 = len(species)**2
     bcasted = torch.broadcast_tensors(species[None, ], species[:, None])
     ab = torch.cat([_.reshape(1, -1) for _ in bcasted])  # alpha, beta
     xyz = coo / self.unit
     d = xyz.pow(2).sum(dim=-1).sqrt()
     n = 2 * torch.arange(self.nmax + 1).type(xyz.type())
     r, dr = self.radial(self.unit * d)
     dr = self.unit * dr
     f = (r * d[None]**n[:, None])
     Y = self.ylm(xyz, grad=grad)
     if grad:
         Y, dY = Y
     ff = f[:, None, None] * Y[None]
     i = torch.arange(r.size(0))
     c = []
     for num in species:
         t = torch.index_select(ff, -1, i[numbers == num])
         c += [t.sum(dim=-1)]
     c = torch.stack(c)
     nnp = c[None, :, None, ] * c[:, None, :, None]
     p = (nnp * self.Yr).sum(dim=-1) + (nnp * self.Yi).sum(dim=-2)
     if grad:
         df = dr * d[None]**n[:, None] + r * n[:, None] * d[None]**(
             n[:, None] - 1)
         df = df[..., None] * xyz / d[:, None]
         dc = (df[:, None, None] * Y[None, ..., None] +
               f[:, None, None, :, None] * dY[None])
         dc = torch.stack([(numbers == num).type(r.type())[:, None] * dc
                           for num in species])
         dnnp = (c[None, :, None, ..., None, None] * dc[:, None, :, None] +
                 dc[None, :, None, ] * c[:, None, :, None, ..., None, None])
         dp = ((dnnp * self.Yr[..., None, None]).sum(dim=-3) +
               (dnnp * self.Yi[..., None, None]).sum(dim=-4))
         p, dp = p * self.nnl, dp * self.nnl[..., None, None] / self.unit
         if (normalize if normalize else self.normalize):
             norm = p.norm() + torch.finfo().eps
             p = p / norm
             dp = dp / norm
             dp = dp - p[..., None, None] * (p[..., None, None] *
                                             dp).sum(dim=(0, 1, 2, 3, 4))
         p = p.view(dim0, *self._shape)
         dp = dp.view(dim0, *self._shape, *xyz.size())
         if sparse_tensor:
             p = torch.sparse_coo_tensor(ab, p, size=self._size)
             dp = torch.sparse_coo_tensor(ab,
                                          dp,
                                          size=(*self._size, *xyz.size()))
             return p, dp
         else:
             return ab, p, self._size, dp, (*self._size, *xyz.size())
     else:
         p = p * self.nnl
         if (normalize if normalize else self.normalize):
             norm = p.norm() + torch.finfo().eps
             p = p / norm
         if sparse_tensor:
             p = torch.sparse_coo_tensor(ab,
                                         p.view(dim0, *self._shape),
                                         size=self._size)
             return p
         else:
             return ab, p.view(dim0, *self._shape), self._size
Exemple #25
0
    def _get_snake_addition(self,
                            pathing: torch.Tensor,
                            exception_on_failure: bool) -> (torch.Tensor, torch.Tensor, torch.Tensor):
        """pathing is a nx1xhxw tensor containing filled locations"""
        n = pathing.shape[0]
        l = self.initial_snake_length - 1
        successfully_spawned = torch.zeros(n, dtype=torch.uint8, device=self.device)

        # Expand pathing because you can't put a snake right next to another snake
        t0 = time()
        pathing = (F.conv2d(
            pathing.to(dtype=self.dtype),
            torch.ones((1, 1, 3, 3), dtype=self.dtype, device=self.device),
            padding=1
        ) > EPS).byte()
        # Remove boundaries
        pathing[:, :, :l, :] = 1
        pathing[:, :, :, :l] = 1
        pathing[:, :, -l:, :] = 1
        pathing[:, :, :, -l:] = 1
        available_locations = ~pathing
        self._log(f'Respawn location calculation: {1000 * (time() - t0)}ms')

        any_available_locations = available_locations.view(n, -1).max(dim=1)[0].byte()
        if exception_on_failure:
            # If there is no available locations for a snake raise an exception
            if torch.any(~any_available_locations):
                raise RuntimeError('There is no available locations to create snake!')

        successfully_spawned |= any_available_locations

        t0 = time()
        body_seed_indices = drop_duplicates(torch.nonzero(available_locations), 0)
        # Body seeds is a tensor that contains all zeros except where a snake will be spawned
        # Shape: (n, 1, self.size, self.size)
        body_seeds = torch.sparse_coo_tensor(
            body_seed_indices.t(), torch.ones(len(body_seed_indices)), available_locations.shape,
            device=self.device, dtype=self.dtype
        )
        self._log(f'Choose spawn locations: {1000 * (time() - t0)}ms')

        t0 = time()
        # Choose random starting directions
        random_directions = torch.randint(4, (n,), device=self.device)
        random_directions_onehot = torch.empty((n, 4), dtype=self.dtype, device=self.device)
        random_directions_onehot.zero_()
        random_directions_onehot.scatter_(1, random_directions.unsqueeze(-1), 1)
        self._log(f'Choose spawn directions: {1000 * (time() - t0)}ms')

        t0 = time()
        # Create bodies
        new_bodies = torch.einsum('bchw,bc->bhw', [
            F.conv2d(
                body_seeds.to_dense(),
                LENGTH_3_SNAKES.to(self.device).to(dtype=self.dtype),
                padding=1
            ),
            random_directions_onehot
        ]).unsqueeze(1)
        self._log(f'Create bodies: {1000 * (time() - t0)}ms')

        t0 = time()
        # Create heads at end of bodies
        snake_sizes = new_bodies.view(n, -1).max(dim=1)[0]
        # Only create heads where there is a snake. This catches an edge case where there is no room
        # for a snake to spawn and hence snake size == bodies everywhere (as bodies is all 0)
        snake_sizes[snake_sizes == 0] = -1
        snake_size_mask = snake_sizes[:, None, None, None].expand((n, 1, self.size, self.size))
        new_heads = (new_bodies == snake_size_mask).to(dtype=self.dtype)
        self._log(f'Create heads: {1000 * (time() - t0)}ms')

        return new_bodies, new_heads, random_directions, successfully_spawned
Exemple #26
0
 def _just_create_matrix(self, lbl_wrd):
     mat = torch.sparse_coo_tensor(lbl_wrd[0], lbl_wrd[1], lbl_wrd[2])
     mat._coalesced_(True)
     return mat
Exemple #27
0
    def _add_snake(self,
                   envs: torch.Tensor,
                   snake_channel: int,
                   exception_on_failure: bool) -> (torch.Tensor, torch.Tensor, torch.Tensor):
        """Adds a snake in a certain channel to environments.

        Args:
            envs: Tensor represening the environments.
            snake_channel: Which snake to add
            exception_on_failure: If True then raise an exception if there is no available spawning locations
        """
        l = self.initial_snake_length - 1
        n = envs.shape[0]

        successfully_spawned = torch.zeros(n, dtype=torch.uint8, device=self.device)

        occupied_locations = envs.sum(dim=1, keepdim=True) > EPS
        # Expand this because you can't put a snake right next to another snake
        occupied_locations = (F.conv2d(
            occupied_locations.to(dtype=self.dtype),
            torch.ones((1, 1, 3, 3), dtype=self.dtype, device=self.device),
            padding=1
        ) > EPS).byte()

        available_locations = (envs.sum(dim=1, keepdim=True) < EPS) & ~occupied_locations

        # Remove boundaries
        available_locations[:, :, :l, :] = 0
        available_locations[:, :, :, :l] = 0
        available_locations[:, :, -l:, :] = 0
        available_locations[:, :, :, -l:] = 0

        any_available_locations = available_locations.view(n, -1).max(dim=1)[0].byte()
        if exception_on_failure:
            # If there is no available locations for a snake raise an exception
            if torch.any(~any_available_locations):
                raise RuntimeError('There is no available locations to create snake!')

        successfully_spawned |= any_available_locations

        body_seed_indices = drop_duplicates(torch.nonzero(available_locations), 0)
        # Body seeds is a tensor that contains all zeros except where a snake will be spawned
        # Shape: (n, 1, self.size, self.size)
        body_seeds = torch.sparse_coo_tensor(
            body_seed_indices.t(), torch.ones(len(body_seed_indices)), available_locations.shape,
            device=self.device, dtype=self.dtype
        )

        # Choose random starting directions
        random_directions = torch.randint(4, (n,), device=self.device)
        random_directions_onehot = torch.empty((n, 4), dtype=self.dtype, device=self.device)
        random_directions_onehot.zero_()
        random_directions_onehot.scatter_(1, random_directions.unsqueeze(-1), 1)

        # Create bodies
        bodies = torch.einsum('bchw,bc->bhw', [
            F.conv2d(
                body_seeds.to_dense(),
                LENGTH_3_SNAKES.to(self.device).to(dtype=self.dtype),
                padding=1
            ),
            random_directions_onehot
        ])
        envs[:, 2*(snake_channel+1), :, :] = bodies
        # envs[:, self.body_channels[snake_channel], :, :] = bodies

        # Create heads at end of bodies
        snake_sizes = envs[:, 2*(snake_channel+1), :].view(n, -1).max(dim=1)[0]
        # Only create heads where there is a snake. This catches an edge case where there is no room
        # for a snake to spawn and hence snake size == bodies everywhere (as bodies is all 0)
        snake_sizes[snake_sizes == 0] = -1
        snake_size_mask = snake_sizes[:, None, None].expand((n, self.size, self.size))
        envs[:, 2*(snake_channel+1) - 1, :, :] = (bodies == snake_size_mask).to(dtype=self.dtype)

        # Start tracking head positions and orientations
        new_positions = torch.zeros((n, 3), dtype=torch.long, device=self.device)
        new_positions[:, 0] = random_directions
        heads = envs[:, 2*(snake_channel+1) - 1, :, :]
        locations = torch.nonzero(heads)[:, 1:]
        dones = ~heads.view(n, -1).sum(dim=1).gt(EPS)
        new_positions[~dones, 1:] = \
            torch.nonzero(envs[:, 2*(snake_channel+1) - 1, :, :])[:, 1:]

        return envs, successfully_spawned, new_positions
Exemple #28
0
def spmm_adj(indices, values, shape, b):
    adj = torch.sparse_coo_tensor(indices=indices, values=values, size=shape)
    return torch.spmm(adj, b)
    def loss(self,
             cls_scores,
             bbox_preds,
             dir_cls_preds,
             gt_bboxes,
             gt_labels,
             input_metas,
             gt_bboxes_ignore=None):
        """Calculate loss of FreeAnchor head.

        Args:
            cls_scores (list[torch.Tensor]): Classification scores of
                different samples.
            bbox_preds (list[torch.Tensor]): Box predictions of
                different samples
            dir_cls_preds (list[torch.Tensor]): Direction predictions of
                different samples
            gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Ground truth boxes.
            gt_labels (list[torch.Tensor]): Ground truth labels.
            input_metas (list[dict]): List of input meta information.
            gt_bboxes_ignore (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth boxes that should be ignored. Defaults to None.

        Returns:
            dict[str, torch.Tensor]: Loss items.

                - positive_bag_loss (torch.Tensor): Loss of positive samples.
                - negative_bag_loss (torch.Tensor): Loss of negative samples.
        """
        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
        assert len(featmap_sizes) == self.anchor_generator.num_levels

        anchor_list = self.get_anchors(featmap_sizes, input_metas)
        anchors = [torch.cat(anchor) for anchor in anchor_list]

        # concatenate each level
        cls_scores = [
            cls_score.permute(0, 2, 3, 1).reshape(
                cls_score.size(0), -1, self.num_classes)
            for cls_score in cls_scores
        ]
        bbox_preds = [
            bbox_pred.permute(0, 2, 3, 1).reshape(
                bbox_pred.size(0), -1, self.box_code_size)
            for bbox_pred in bbox_preds
        ]
        dir_cls_preds = [
            dir_cls_pred.permute(0, 2, 3,
                                 1).reshape(dir_cls_pred.size(0), -1, 2)
            for dir_cls_pred in dir_cls_preds
        ]

        cls_scores = torch.cat(cls_scores, dim=1)
        bbox_preds = torch.cat(bbox_preds, dim=1)
        dir_cls_preds = torch.cat(dir_cls_preds, dim=1)

        cls_prob = torch.sigmoid(cls_scores)
        box_prob = []
        num_pos = 0
        positive_losses = []
        for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_,
                dir_cls_preds_) in enumerate(
                    zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds,
                        dir_cls_preds)):

            gt_bboxes_ = gt_bboxes_.tensor.to(anchors_.device)

            with torch.no_grad():
                # box_localization: a_{j}^{loc}, shape: [j, 4]
                pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)

                # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
                object_box_iou = bbox_overlaps_nearest_3d(
                    gt_bboxes_, pred_boxes)

                # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
                t1 = self.bbox_thr
                t2 = object_box_iou.max(
                    dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
                object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp(
                    min=0, max=1)

                # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
                num_obj = gt_labels_.size(0)
                indices = torch.stack(
                    [torch.arange(num_obj).type_as(gt_labels_), gt_labels_],
                    dim=0)

                object_cls_box_prob = torch.sparse_coo_tensor(
                    indices, object_box_prob)

                # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
                """
                from "start" to "end" implement:
                image_box_iou = torch.sparse.max(object_cls_box_prob,
                                                 dim=0).t()

                """
                # start
                box_cls_prob = torch.sparse.sum(
                    object_cls_box_prob, dim=0).to_dense()

                indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()
                if indices.numel() == 0:
                    image_box_prob = torch.zeros(
                        anchors_.size(0),
                        self.num_classes).type_as(object_box_prob)
                else:
                    nonzero_box_prob = torch.where(
                        (gt_labels_.unsqueeze(dim=-1) == indices[0]),
                        object_box_prob[:, indices[1]],
                        torch.tensor(
                            [0]).type_as(object_box_prob)).max(dim=0).values

                    # upmap to shape [j, c]
                    image_box_prob = torch.sparse_coo_tensor(
                        indices.flip([0]),
                        nonzero_box_prob,
                        size=(anchors_.size(0), self.num_classes)).to_dense()
                # end

                box_prob.append(image_box_prob)

            # construct bags for objects
            match_quality_matrix = bbox_overlaps_nearest_3d(
                gt_bboxes_, anchors_)
            _, matched = torch.topk(
                match_quality_matrix,
                self.pre_anchor_topk,
                dim=1,
                sorted=False)
            del match_quality_matrix

            # matched_cls_prob: P_{ij}^{cls}
            matched_cls_prob = torch.gather(
                cls_prob_[matched], 2,
                gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
                                                 1)).squeeze(2)

            # matched_box_prob: P_{ij}^{loc}
            matched_anchors = anchors_[matched]
            matched_object_targets = self.bbox_coder.encode(
                matched_anchors,
                gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))

            # direction classification loss
            loss_dir = None
            if self.use_direction_classifier:
                # also calculate direction prob: P_{ij}^{dir}
                matched_dir_targets = get_direction_target(
                    matched_anchors,
                    matched_object_targets,
                    self.dir_offset,
                    one_hot=False)
                loss_dir = self.loss_dir(
                    dir_cls_preds_[matched].transpose(-2, -1),
                    matched_dir_targets,
                    reduction_override='none')

            # generate bbox weights
            if self.diff_rad_by_sin:
                bbox_preds_[matched], matched_object_targets = \
                    self.add_sin_difference(
                        bbox_preds_[matched], matched_object_targets)
            bbox_weights = matched_anchors.new_ones(matched_anchors.size())
            # Use pop is not right, check performance
            code_weight = self.train_cfg.get('code_weight', None)
            if code_weight:
                bbox_weights = bbox_weights * bbox_weights.new_tensor(
                    code_weight)
            loss_bbox = self.loss_bbox(
                bbox_preds_[matched],
                matched_object_targets,
                bbox_weights,
                reduction_override='none').sum(-1)

            if loss_dir is not None:
                loss_bbox += loss_dir
            matched_box_prob = torch.exp(-loss_bbox)

            # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
            num_pos += len(gt_bboxes_)
            positive_losses.append(
                self.positive_bag_loss(matched_cls_prob, matched_box_prob))

        positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)

        # box_prob: P{a_{j} \in A_{+}}
        box_prob = torch.stack(box_prob, dim=0)

        # negative_loss:
        # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
        negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
            1, num_pos * self.pre_anchor_topk)

        losses = {
            'positive_bag_loss': positive_loss,
            'negative_bag_loss': negative_loss
        }
        return losses
Exemple #30
0
 def test_factory_empty_indices(self):
     device = 'cuda' if self.is_cuda else 'cpu'
     tensor = torch.sparse_coo_tensor([], [], torch.Size([]), device=device)
     expected_indices = torch.tensor([], dtype=torch.long, device=device)
     self.assertEqual(tensor._indices(), expected_indices)
Exemple #31
0
    def forward(self, images, embed, edges, *args, **kwargs):
        batch = images.size(0)
        n_class, word_dim = embed.size()
        feats = self.feat_net(images)
        imgnet_scores = F.softmax(self.imgnet_clf(feats), dim=1)
        feats = feats.view(batch, -1, self.feat_dim)

        if self.use_attn:
            feats_key = feats.view(-1, self.feat_dim)  # [batch*49, 2048]
            feats_key = self.key_net(feats_key).view(
                batch, -1,
                self.hid_dim).unsqueeze(1)  # [batch, 1, 49, hid_dim]

            embed_query = self.query_net(embed).repeat(batch, 1).view(
                batch, n_class, self.hid_dim,
                1)  # [batch, n_class, hid_dim, 1]

            attn = F.softmax(torch.matmul(feats_key, embed_query),
                             dim=2)  # [batch, n_class, 49, 1]

            graph_input = torch.sum(feats.unsqueeze(1) * attn,
                                    dim=2)  # [batch, class, feat_dim]
        else:
            graph_input = torch.mean(feats, dim=1, keepdim=True).repeat(
                1, n_class, 1)  # [batch, class, feat_dim]
            attn = torch.zeros([batch, n_class, feats.size(1)])

        graph_input = graph_input.view(-1, self.feat_dim)
        h0 = self.F_in(graph_input)  # [batch*class, hid_dim]

        # embed_tuple = torch.cat([embed.repeat(n_class, 1), embed.repeat(1, n_class).view(-1, self.word_dim)], dim=1)
        # prop_mat = self.F_rel(embed_tuple)   # [n_class*n_class, 1]
        # prop_mat = prop_mat * adj
        # prop_mat = prop_mat.view(1, n_class, n_class)
        # prop_mat = torch.tanh(prop_mat) # [1, n_class, n_class]

        embed_tuple = torch.cat([embed[edges[0]], embed[edges[1]]], dim=1)
        values = self.F_rel(embed_tuple).view(-1)  # [n_edges]
        values = torch.tanh(values)

        prop_mat = torch.sparse_coo_tensor(edges,
                                           values,
                                           torch.Size([n_class, n_class]),
                                           requires_grad=True).to(
                                               images.device).to_dense()

        hidden = h0
        for t in range(self.t_max):
            msg = torch.matmul(prop_mat,
                               hidden.view(batch, n_class, self.d_dim)).view(
                                   -1, self.d_dim)  # [batch*n_class, hid_dim]
            msg = torch.tanh(msg)
            hidden = self.gru_cell(msg, hidden)

        logits = self.F_out(hidden).view(batch, n_class)

        return {
            "logits": logits,
            "attention": attn,
            "imgnet_scores": imgnet_scores
        }
Exemple #32
0
    def forward(self,
                cls_logits,
                labels,
                weight=None,
                avg_factor=None,
                reduction_override=None,
                **kwargs):

        device = cls_logits.device
        self.n_i, self.n_c = cls_logits.size()
        # expand the labels to all their parent nodes
        target = cls_logits.new_zeros(self.n_i, self.n_c)
        # weight mask, decide which class should be ignored
        #weight_mask = cls_logits.new_zeros(self.n_i, self.n_c)

        unique_label = torch.unique(labels)

        with torch.no_grad():
            sigmoid_cls_logits = torch.sigmoid(cls_logits)
        # for each sample, if its score on unrealated class hight than score_thr, their gradient should not be ignored
        # this is also applied to negative samples
        high_score_inds = torch.nonzero(sigmoid_cls_logits >= self.score_thr)
        weight_mask = torch.sparse_coo_tensor(high_score_inds.t(),
                                              cls_logits.new_ones(
                                                  high_score_inds.shape[0]),
                                              size=(self.n_i, self.n_c),
                                              device=device).to_dense()

        for cls in unique_label:
            cls = cls.item()
            cls_inds = torch.nonzero(labels == cls).squeeze(1)
            if cls == 0:
                # construct target vector for background samples
                target[cls_inds, 0] = 1
                # for bg, set the weight of all classes to 1
                weight_mask[cls_inds] = 0

                cls_inds_cpu = cls_inds.cpu()

                # Solve the rare categories, random choost 1/3 bg samples to suppress rare categories
                rare_cats = self.freq_group['rare']
                rare_cats = torch.tensor(rare_cats, device=cls_logits.device)
                choose_bg_num = int(len(cls_inds) * 0.01)
                choose_bg_inds = torch.tensor(np.random.choice(
                    cls_inds_cpu, size=(choose_bg_num), replace=False),
                                              device=device)

                tmp_weight_mask = weight_mask[choose_bg_inds]
                tmp_weight_mask[:, rare_cats] = 1

                weight_mask[choose_bg_inds] = tmp_weight_mask

                # Solve the common categories, random choost 2/3 bg samples to suppress rare categories
                common_cats = self.freq_group['common']
                common_cats = torch.tensor(common_cats,
                                           device=cls_logits.device)
                choose_bg_num = int(len(cls_inds) * 0.1)
                choose_bg_inds = torch.tensor(np.random.choice(
                    cls_inds_cpu, size=(choose_bg_num), replace=False),
                                              device=device)

                tmp_weight_mask = weight_mask[choose_bg_inds]
                tmp_weight_mask[:, common_cats] = 1

                weight_mask[choose_bg_inds] = tmp_weight_mask

                # Solve the frequent categories, random choost all bg samples to suppress rare categories
                freq_cats = self.freq_group['freq']
                freq_cats = torch.tensor(freq_cats, device=cls_logits.device)
                choose_bg_num = int(len(cls_inds) * 1.0)
                choose_bg_inds = torch.tensor(np.random.choice(
                    cls_inds_cpu, size=(choose_bg_num), replace=False),
                                              device=device)

                tmp_weight_mask = weight_mask[choose_bg_inds]
                tmp_weight_mask[:, freq_cats] = 1

                weight_mask[choose_bg_inds] = tmp_weight_mask

                # Set the weight for bg to 1
                weight_mask[cls_inds, 0] = 1

            else:
                # construct target vector for foreground samples
                cur_labels = [cls]
                cur_labels = torch.tensor(cur_labels, device=cls_logits.device)
                tmp_label_vec = cls_logits.new_zeros(self.n_c)
                tmp_label_vec[cur_labels] = 1
                tmp_label_vec = tmp_label_vec.expand(cls_inds.numel(),
                                                     self.n_c)
                target[cls_inds] = tmp_label_vec
                # construct weight mask for fg samples
                tmp_weight_mask_vec = weight_mask[cls_inds]
                # set the weight for ground truth category
                tmp_weight_mask_vec[:, cur_labels] = 1

                weight_mask[cls_inds] = tmp_weight_mask_vec

        cls_loss = F.binary_cross_entropy_with_logits(cls_logits,
                                                      target.float(),
                                                      reduction='none')

        return torch.sum(weight_mask * cls_loss) / self.n_i
Exemple #33
0
def bdsmm(sparse, dense):
    """
    Batch dense-sparse matrix multiply
    """
    # Make the batch sparse matrix into a block-diagonal matrix
    if sparse.ndimension() > 2:
        # Expand the tensors to account for broadcasting
        output_shape = _matmul_broadcast_shape(sparse.shape, dense.shape)
        expanded_sparse_shape = output_shape[:-2] + sparse.shape[-2:]
        unsqueezed_sparse_shape = [
            1 for _ in range(len(output_shape) - sparse.dim())
        ] + list(sparse.shape)
        repeat_sizes = tuple(
            output_size // sparse_size for output_size, sparse_size in zip(
                expanded_sparse_shape, unsqueezed_sparse_shape))
        sparse = sparse_repeat(sparse, *repeat_sizes)
        dense = dense.expand(*output_shape[:-2], dense.size(-2),
                             dense.size(-1))

        # Figure out how much need to be added to the row/column indices to create
        # a block-diagonal matrix
        *batch_shape, num_rows, num_cols = sparse.shape
        batch_size = torch.Size(batch_shape).numel()
        batch_multiplication_factor = torch.tensor([
            torch.Size(batch_shape[i + 1:]).numel()
            for i in range(len(batch_shape))
        ],
                                                   dtype=torch.long,
                                                   device=sparse.device)
        if batch_multiplication_factor.is_cuda:
            batch_assignment = (sparse._indices()[:-2].float().t()
                                @ batch_multiplication_factor.float()).long()
        else:
            batch_assignment = sparse._indices()[:-2].t(
            ) @ batch_multiplication_factor

        # Create block-diagonal sparse tensor
        indices = sparse._indices()[-2:].clone()
        indices[0].add_(num_rows, batch_assignment)
        indices[1].add_(num_cols, batch_assignment)
        sparse_2d = torch.sparse_coo_tensor(
            indices,
            sparse._values(),
            torch.Size((batch_size * num_rows, batch_size * num_cols)),
            dtype=sparse._values().dtype,
            device=sparse._values().device,
        )

        dense_2d = dense.contiguous().view(batch_size * num_cols, -1)
        res = torch.dsmm(sparse_2d, dense_2d)
        res = res.view(*batch_shape, num_rows, -1)
        return res

    elif dense.dim() > 2:
        *batch_shape, num_rows, num_cols = dense.size()
        batch_size = torch.Size(batch_shape).numel()
        dense = dense.view(batch_size, num_rows, num_cols)
        res = torch.dsmm(
            sparse,
            dense.transpose(0, 1).contiguous().view(-1, batch_size * num_cols))
        res = res.view(-1, batch_size, num_cols)
        res = res.transpose(0, 1).contiguous().view(*batch_shape, -1, num_cols)
        return res

    else:
        return torch.dsmm(sparse, dense)
Exemple #34
0
def torch_sparse_matrix(row, column, data):
    shape = [4, 10]
    indices = [list(row), list(column)]
    return torch.sparse_coo_tensor(indices, data, shape)
Exemple #35
0
    def forward(self,
                images,
                embed,
                edges,
                n_unseen=None,
                n_coco=64,
                imgnet_idx=None,
                edge_weights=None):
        batch = images.size(0)
        n_class, word_dim = embed.size()
        feats = self.feat_net(images)
        imgnet_scores = self.imgnet_clf(feats)
        imgnet_scores = F.softmax(imgnet_scores[:, imgnet_idx], dim=1)
        feats = feats.view(batch, -1, self.feat_dim)

        embed_coco = embed[:n_coco]
        embed_imgnet = embed[n_coco:]

        if self.use_attn:
            feats_key = feats.view(-1, self.feat_dim)  # [batch*49, 2048]
            feats_key = self.key_net(feats_key).view(
                batch, -1,
                self.hid_dim).unsqueeze(1)  # [batch, 1, 49, hid_dim]

            embed_query = self.query_net(embed_coco).repeat(batch, 1).view(
                batch, n_coco, self.hid_dim, 1)  # [batch, n_class, hid_dim, 1]

            attn = F.softmax(torch.matmul(feats_key, embed_query),
                             dim=2)  # [batch, n_class, 49, 1]

            graph_input = torch.sum(feats.unsqueeze(1) * attn,
                                    dim=2)  # [batch, class, feat_dim]
        else:
            graph_input = torch.mean(feats, dim=1, keepdim=True).repeat(
                1, n_coco, 1)  # [batch, class, feat_dim]
            attn = torch.zeros([batch, n_class, feats.size(1)])

        graph_input = torch.cat(
            [graph_input,
             embed_coco.unsqueeze(0).repeat(batch, 1, 1)],
            dim=2)  # [batch, class, feat_dim+word_dim]
        graph_input = graph_input.view(batch * n_coco,
                                       -1)  # [batch*class, feat_dim]

        hc = self.F_in(graph_input).view(batch, n_coco,
                                         -1)  # [batch, class_coco, d_dim]

        X, Xp, mu, log_sigma, hp = self.pos_VAE(
            imgnet_scores, embed_imgnet)  # [batch, class_imgnet, d_dim]
        hp = hp.view(batch, embed_imgnet.size(0),
                     -1)  # [batch, class_coco, d_dim]

        h0 = torch.cat([hc, hp], dim=1)  # [batch, class, d_dim]

        embed_tuple = torch.cat([embed[edges[0]], embed[edges[1]]], dim=1)
        values = self.F_rel(embed_tuple).view(-1)  # [n_edges]
        # values = torch.sigmoid(values)

        prop_mat = torch.sparse_coo_tensor(edges,
                                           values,
                                           torch.Size([n_class, n_class]),
                                           requires_grad=True).to(
                                               images.device).to_dense()
        if edge_weights is not None:
            prop_mat = prop_mat * edge_weights

        prop_mat[
            n_coco:] = 0  # don't let message pass back to imagenet classes

        if self.topK > 0:
            idx = torch.argsort(imgnet_scores, dim=1)[:, :self.topK]
            masks = torch.zeros_like(imgnet_scores).scatter_(
                1, idx, 1).unsqueeze(1)  # [batch, 1, n_imgnet]
            prop_mat = prop_mat.unsqueeze(0).repeat(batch, 1, 1)
            prop_mat[:, n_coco, n_coco:] *= masks

        prop_mat = torch.softmax(prop_mat, dim=1)

        if self.self_cyc:
            eye = torch.eye(n_class,
                            requires_grad=False).float().to(images.device)
            prop_mat = (1.0 - eye) * prop_mat + eye

        if self.normalize:
            prop_mat = self.normalize_A(prop_mat)

        # if n_unseen is not None:
        #     prop_mat = self.adjust_unseen_mask(prop_mat, n_unseen)

        if self.gnn == "ggnn":
            # prop_mat = torch.tanh(prop_mat)
            hidden = h0.view(-1, self.d_dim)
            for t in range(self.t_max):
                msg = torch.matmul(
                    prop_mat, hidden.view(batch, n_class, self.d_dim)).view(
                        -1, self.d_dim)  # [batch*n_class, hid_dim]
                hidden = self.gru_cell(msg, hidden)
        else:
            hidden = h0
            for i, gcn in enumerate(self.gcn):
                if i < len(self.gcn) - 1:
                    hidden = self.activation(gcn(hidden, prop_mat))
                else:
                    hidden = gcn(hidden, prop_mat)

        logits = self.F_out(hidden).view(batch, n_class)

        return {
            "logits": logits,
            "attention": attn,
            "imgnet_scores": imgnet_scores.detach(),
            "VAE": (X, Xp, mu, log_sigma)
        }
Exemple #36
0
def sparse_matrix(data, index, shape, force_format=False):
    fmt = index[0]
    if fmt != 'coo':
        raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
    spmat = th.sparse_coo_tensor(index[1], data, shape)
    return spmat, None
Exemple #37
0
def scipy_sparse_to_pytorch_sparse(sp_input):
    indices = np.array(sp_input.nonzero())
    return torch.sparse_coo_tensor(torch.LongTensor(indices),
                                   torch.Tensor(sp_input.data),
                                   sp_input.shape,
                                   dtype=torch.float32)