Beispiel #1
0
    def __init__(self,
                 weight: sparse.FloatTensor,
                 bias,
                 mask,
                 transpose=False):
        super(SparseLinear, self).__init__()
        if not weight.is_sparse:
            raise ValueError("Weight must be sparse")
        elif weight._nnz() > 0 and not weight.is_coalesced():
            raise ValueError("Weight must be coalesced")

        self.transpose = transpose

        self.in_features = weight.size(1)
        self.out_features = weight.size(0)
        self.mask = mask.clone()

        # in order to add to optimizer
        self.weight = Parameter(weight.data.clone(), requires_grad=False)
        # Don't move after creation to make it a leaf
        self.dense_weight_placeholder = Parameter(
            torch.empty(size=self.weight.size(), device=self.weight.device))
        self.dense_weight_placeholder.is_placeholder = True

        # create links
        self.weight.dense = self.dense_weight_placeholder
        self.weight.mask = self.mask
        self.weight.is_sparse_param = True

        if bias is None:
            self.register_parameter('bias', None)
        else:
            assert bias.size() == torch.Size((weight.size(0), 1))
            self.bias = Parameter(bias.data.clone())
Beispiel #2
0
def convert_sparse_matrix_to_sparse_tensor(X):
    coo = X.tocoo()
    coo.astype(dtype='float32')
    indices = np.mat([coo.row, coo.col]).transpose()

    a = FloatTensor(torch.LongTensor(indices.T.astype('int32')),
                    torch.FloatTensor(coo.data), torch.Size(coo.shape))
    a.requires_grad = False
    return a.cuda()
Beispiel #3
0
 def _sparse_masked_select_abs(self, sparse_tensor: sparse.FloatTensor,
                               thr):
     indices = sparse_tensor._indices()
     values = sparse_tensor._values()
     prune_mask = torch.abs(values) >= thr
     return torch.sparse_coo_tensor(
         indices=indices.masked_select(prune_mask).reshape(2, -1),
         values=values.masked_select(prune_mask),
         size=[self.out_features, self.in_features]).coalesce()
Beispiel #4
0
    def save_sparse_tensor(self, artifact_type: str, params: Dict[str, Any], sparse_tensor: FloatTensor) -> str:
        """Saves an artifact.

        Parameters
        ----------
        artifact_type : str
            Identifier of artifact type.
        params : Dict[str, Any]
            parameters identifying the artifacts provenance.
        sparse_tensor : FloatTensor
            The actual artifact to be stored.

        Returns
        -------
        str
            File storage location.
        """
        artifact = {'edge_idx': sparse_tensor.indices().cpu(),
                    'edge_weight': sparse_tensor.values().cpu(),
                    'shape': sparse_tensor.shape}
        return self.save_artifact(artifact_type, params, artifact)
Beispiel #5
0
    def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'FloatTensor':
        """
        Sparse NdArray constructor for FloatTensor.

        :param indices: the indices of the sparse array
        :param values: the values of the sparse array
        :param shape: the shape of the sparse array
        :return: FloatTensor
        """
        return FloatTensor(torch.LongTensor(indices).T if self.transpose_indices else torch.LongTensor(indices),
                           torch.FloatTensor(values),
                           torch.Size(shape))
Beispiel #6
0
 def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray',
                        shape: List[int]) -> 'FloatTensor':
     return FloatTensor(
         torch.LongTensor(indices).T
         if self.transpose_indices else torch.LongTensor(indices),
         torch.FloatTensor(values), torch.Size(shape))
Beispiel #7
0
    def prepare_batch(self, batch, istest=False):

        volatile = True if istest else False
        # logging.info("istest is %s and volatile is %s", istest, volatile)

        l_batch, l_lengths, \
        r_batch, r_lengths, \
        doc_bow_batch, \
        truewid_descvec_batch, \
        types_batch, \
        coherence_batch, \
        wids_batch, wid_cprobs_batch, nocands_mask_batch = batch

        # Do not mess with type casting here. torch.from_numpy is SLOW
        l_batch = MyTensor(l_batch)
        l_lengths = torch.LongTensor(l_lengths)
        r_batch = MyTensor(r_batch)
        r_lengths = torch.LongTensor(r_lengths)
        wids_batch = torch.LongTensor(wids_batch)
        # nocands_mask_batch = MyTensor(nocands_mask_batch)

        if self.args["usecoh"]:
            batch_rcs, batch_vals, shape = coherence_batch
            batch_rcs = torch.LongTensor(batch_rcs)
            batch_vals = MyTensor(batch_vals)
            coherence_batch = MySparseTensor(batch_rcs.t(), batch_vals,
                                             torch.Size(shape))

        if self.args["usedesc"]:
            truewid_descvec_batch = MyTensor(truewid_descvec_batch)
        if self.args["usetype"]:
            types_batch = MyTensor(types_batch)

        if self.args["cuda"]:
            devid = self.args["device_id"]
            l_batch = l_batch.cuda(device=devid)
            l_lengths = l_lengths.cuda(device=devid)
            r_batch = r_batch.cuda(device=devid)
            r_lengths = r_lengths.cuda(device=devid)
            wids_batch = wids_batch.cuda(device=devid)
            # nocands_mask_batch = nocands_mask_batch.cuda(device=devid)
            if self.args["usecoh"]:
                coherence_batch = coherence_batch.cuda(device=devid)
            if self.args["usedesc"]:
                truewid_descvec_batch = truewid_descvec_batch.cuda(
                    device=devid)
            if self.args["usetype"]:
                types_batch = types_batch.cuda(device=devid)

        l_batch, l_lengths = V(l_batch,
                               volatile=volatile), V(l_lengths,
                                                     volatile=volatile)
        r_batch, r_lengths = V(r_batch,
                               volatile=volatile), V(r_lengths,
                                                     volatile=volatile)
        wids_batch = V(wids_batch, volatile=volatile)
        # nocands_mask_batch = V(nocands_mask_batch)
        if self.args["usecoh"]:
            coherence_batch = V(coherence_batch, volatile=volatile)
        if self.args["usedesc"]:
            truewid_descvec_batch = V(truewid_descvec_batch, volatile=volatile)
        if self.args["usetype"]:
            types_batch = V(types_batch, volatile=volatile)

        batch = l_batch, l_lengths, \
                r_batch, r_lengths, \
                truewid_descvec_batch, \
                types_batch, \
                coherence_batch, \
                wids_batch, wid_cprobs_batch, nocands_mask_batch
        return batch