Пример #1
0
Файл: pcp.py Проект: dfm/ketu
def _svd(method, X, rank, tol, **args):
    rank = min(rank, np.min(X.shape))
    if method == "approximate":
        return fbpca.pca(X, k=rank, raw=True, **args)
    elif method == "exact":
        return np.linalg.svd(X, full_matrices=False, **args)
    elif method == "sparse":
        if rank >= np.min(X.shape):
            return np.linalg.svd(X, full_matrices=False)
        u, s, v = svds(X, k=rank, tol=tol)
        u, s, v = u[:, ::-1], s[::-1], v[::-1, :]
        return u, s, v
    raise ValueError("invalid SVD method")
Пример #2
0
    def __init__(self, X, **kwargs):
        """Perform the Singular Value Decomposition (SVD) of a matrix `X` of shape `(n, p)`.

        Args:
            X (matrix): The matrix on which to perform the SVD. `X` can be a `pandas.DataFrame`,
                however the SVD will be faster with a pure `numpy` matrix which can be extracted
                from a `pandas.DataFrame` thanks to the `values` property.
            kwargs: see http://fbpca.readthedocs.io/en/latest/#fbpca.pca.

        Returns:
            matrix: The left eigenvectors of shape `(n, k)`, usually denoted `U`.
            array: The singular values (square roots of the eigenvalues) of shape `(k,)`, usually
                denoted `s`.
            matrix: The right eigenvectors of shape `(k, p)`, usually denoted `V`.
        """
        self.U, self.s, self.V = fbpca.pca(X, **kwargs)
Пример #3
0
Файл: basis.py Проект: dfm/ketu
def build(lc_pattern, outfile, nbasis=500):
    pool = Pool()

    print("Loading light curves...")
    lcs = []
    fns = glob.glob(lc_pattern)
    lcs = np.array([y for y in pool.map(load_data, fns) if y is not None])
    print("Found {0} light curves...".format(len(lcs)))

    # Normalize the data.
    mu = np.median(lcs, axis=1)
    X = lcs - mu[:, None]

    # Run PCA.
    print("Running PCA...")
    strt = time.time()
    _, _, basis = fbpca.pca(X, k=nbasis, raw=True)
    print("Took {0:.1f} seconds".format(time.time() - strt))

    print(np.any(np.isnan(basis)), np.any(np.isinf(basis)))

    # Compute the prior.
    print("Computing the empirical 'prior'...")
    factor = cho_factor(np.dot(basis, basis.T))
    Y = 1e3 * (lcs / np.median(lcs, axis=1)[:, None] - 1)
    Y[~np.isfinite(Y)] = 0.0  # WTF?!
    weights = cho_solve(factor, np.dot(basis, Y.T))
    weights = np.concatenate((weights, -weights), axis=1)
    basis *= np.sqrt(np.median(weights**2, axis=1))[:, None]

    # Update the light curve files with the corrected CDPP values.
    print("Updating light curve files...")
    K_0 = np.dot(basis.T, basis)
    results = np.array(
        [v for v in pool.map(partial(update_file, K_0), fns) if v is not None],
        dtype=[("epicid", int), ("best_cdpp6", float)]
    )

    # Save the basis.
    print("Saving to {0}...".format(outfile))
    with h5py.File(outfile, "w") as f:
        f.create_dataset("basis", data=basis, compression="gzip")
        f.create_dataset("cdpp", data=results, compression="gzip")
def svd_timing(X, n_comps, n_iter, n_oversamples,
               power_iteration_normalizer='auto', method=None):
    """
    Measure time for decomposition
    """
    print("... running SVD ...")
    if method is not 'fbpca':
        gc.collect()
        t0 = time()
        U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
                                  power_iteration_normalizer,
                                  random_state=random_state, transpose=False)
        call_time = time() - t0
    else:
        gc.collect()
        t0 = time()
        # There is a different convention for l here
        U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
                             l=n_oversamples+n_comps)
        call_time = time() - t0

    return U, mu, V, call_time
Пример #5
0
def svd_test(singular_value_number, n_classes, bottlenecks_t1, ground_truth_t1,
             test_bottlenecks, test_ground_truth, bottlenecks_t2,
             ground_truth_t2, bottlenecks_v, ground_truth_v):
    bottleneck_input = tf.placeholder(tf.float32,
                                      [None, singular_value_number],
                                      name='BottleneckInputPlaceholder')
    ground_truth_input = tf.placeholder(tf.float32, [None, n_classes],
                                        name='GroundTruthInput')
    # 定义一层全连接层来解决新的图片分类问题。
    # 因为训练好的Inception-v3模型已经将原始的图片抽象为了更加容易分类的特征向量了,所以不需要再训练那么复杂的神经网络来完成这个新的分类任务。
    with tf.name_scope('final_training_ops'):
        weights = tf.Variable(
            tf.truncated_normal([singular_value_number, n_classes],
                                stddev=0.001))
        biases = tf.Variable(tf.zeros([n_classes]))
        logits = tf.matmul(bottleneck_input, weights) + biases
        final_tensor = tf.nn.softmax(logits)
    # 定义交叉熵损失函数
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=logits, labels=ground_truth_input)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
        cross_entropy_mean)
    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(final_tensor, 1),
                                      tf.argmax(ground_truth_input, 1))
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    bottlenecks_t1_array = np.array(bottlenecks_t1)
    (u, sigma, vt) = pca(bottlenecks_t1_array, singular_value_number)
    v = vt.transpose()

    bottlenecks_after_svd_t2 = np.dot(bottlenecks_t2, v)
    bottlenecks_after_svd_v = np.dot(bottlenecks_v, v)
    test_bottlenecks_after_svd = np.dot(test_bottlenecks, v)

    with tf.Session() as sess2:
        tf.global_variables_initializer().run()
        # 训练过程
        for i in range(STEPS):
            # 每次获取一个batch的训练数据
            train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(
                bottlenecks_after_svd_t2, ground_truth_t2)
            sess2.run(train_step,
                      feed_dict={
                          bottleneck_input: train_bottlenecks,
                          ground_truth_input: train_ground_truth
                      })
            # 在验证集上测试正确率。
            if i % 200 == 0 or i + 1 == STEPS:
                validation_bottlenecks, validation_ground_truth = get_random_cached_bottlenecks(
                    bottlenecks_after_svd_v, ground_truth_v)
                validation_accuracy = sess2.run(evaluation_step,
                                                feed_dict={
                                                    bottleneck_input:
                                                    validation_bottlenecks,
                                                    ground_truth_input:
                                                    validation_ground_truth
                                                })
                print(
                    'Step %d: Validation accuracy on random sampled %d examples = %.1f%%'
                    % (i, BATCH, validation_accuracy * 100))

    # 在最后的测试数据上测试正确率
        test_accuracy = sess2.run(evaluation_step,
                                  feed_dict={
                                      bottleneck_input:
                                      test_bottlenecks_after_svd,
                                      ground_truth_input: test_ground_truth
                                  })
    print('Final test accuracy (svd %d) = %.1f%%' %
          (singular_value_number, test_accuracy * 100))
Пример #6
0
    def correct(self,
                aperture_mask=None,
                cadence_mask=None,
                gp_timescale=30,
                use_gp=True,
                pld_order=2,
                n_pca_terms=10,
                pld_aperture_mask=None):
        r"""Returns a PLD systematics-corrected LightCurve.

        Parameters
        ----------
        aperture_mask : array-like, 'pipeline', 'all', 'threshold', or None
            A boolean array describing the aperture such that `True` means
            that the pixel will be used to generate the raw flux light curve.
            If `None` or 'all' are passed, all pixels will be used.
            If 'pipeline' is passed, the mask suggested by the official pipeline
            will be returned.
            If 'threshold' is passed, all pixels brighter than 3-sigma above
            the median flux will be used.
        cadence_mask : array-like
            A mask that will be applied to the cadences prior to constructing
            the detrending model. For example, you can pass a boolean array
            of length `n_cadences` where `True` means that the cadence will be
            included in the noise model. You may also pass an array of indices.
            This option enables signals of interest (e.g. planet transits)
            to be excluded from the noise model, which will prevent over-fitting.
            By default, no cadences will be masked.
        gp_timescale : float
            Gaussian Process time scale length term (`tau`) used to define
            length of fit variability in days.
        use_gp : boolean
            Option to turn GP fitting on or off.  You would typically only set
            this to False to speed up the correction (at the cost of precision),
            or if you suspect the presence of systematic noise at long timescales.
        pld_order : int
            The order of Pixel Level De-correlation to be performed. First order
            (`n=1`) uses only the pixel fluxes to construct the design matrix.
            Higher order populates the design matrix with columns constructed
            from the products of pixel fluxes.
        n_pca_terms : int
            Number of terms added to the design matrix from each order of PLD
            when performing Principal Component Analysis for models higher than
            first order. Increasing this value may provide higher precision at
            the expense of computational time.
        pld_aperture_mask : array-like, 'pipeline', 'all', 'threshold', or None
            A boolean array describing the aperture such that `True` means
            that the pixel will be used when selecting the PLD basis vectors.
            If `None` or `all` are passed in, all pixels will be used.
            If 'pipeline' is passed, the mask suggested by the official pipeline
            will be returned.
            If 'threshold' is passed, all pixels brighter than 3-sigma above
            the median flux will be used.

        Returns
        -------
        corrected_lightcurve : `~lightkurve.lightcurve.LightCurve`
            Returns a corrected lightcurve object. Depending on the input, the
            returned object will be a `KeplerLightCurve`, `TessLightCurve`, or
            general `LightCurve` object.
        """
        if use_gp:
            # Verify optional dependency
            try:
                import celerite
            except ImportError:
                log.error("PLD uses the `celerite` Python package. "
                          "See the installation instructions at "
                          "https://docs.lightkurve.org/about/install.html. "
                          "`use_gp` has been set to `False`.")
                use_gp = False

        # Parse the aperture mask to accept strings etc.
        aperture = self.tpf._parse_aperture_mask(aperture_mask)

        # generate flux light curve from desired pixels
        lc = self.tpf.to_lightcurve(aperture_mask=aperture)
        rawflux = lc.flux
        rawflux_err = lc.flux_err

        # create nan mask
        nanmask = np.isfinite(self.time)
        nanmask &= np.isfinite(rawflux)
        nanmask &= np.isfinite(rawflux_err)
        nanmask &= np.abs(rawflux_err) > 1e-12

        # mask out nan values
        rawflux = rawflux[nanmask]
        rawflux_err = rawflux_err[nanmask]
        self.flux = self.flux[nanmask]
        self.flux_err = self.flux_err[nanmask]
        self.time = self.time[nanmask]

        # parse the PLD aperture mask
        pld_pixel_mask = self.tpf._parse_aperture_mask(pld_aperture_mask)

        # find pixel bounds of aperture on tpf
        xmin, xmax = min(np.where(pld_pixel_mask)[0]), max(
            np.where(pld_pixel_mask)[0])
        ymin, ymax = min(np.where(pld_pixel_mask)[1]), max(
            np.where(pld_pixel_mask)[1])

        # crop data cube to include only desired pixels
        # this is required for superstamps to ensure matrix is invertable
        flux_crop = self.flux[:, xmin:xmax + 1, ymin:ymax + 1]
        flux_err_crop = self.flux_err[:, xmin:xmax + 1, ymin:ymax + 1]
        aperture_crop = pld_pixel_mask[xmin:xmax + 1, ymin:ymax + 1]

        # calculate errors (ignore warnings related to zero or negative errors)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            flux_err = np.nansum(flux_err_crop[:, aperture_crop]**2,
                                 axis=1)**0.5

        # first order PLD design matrix
        pld_flux = flux_crop[:, aperture_crop]
        f1 = np.reshape(pld_flux, (len(pld_flux), -1))
        X1 = f1 / np.nansum(pld_flux, axis=-1)[:, None]
        # No NaN pixels
        X1 = X1[:, np.isfinite(X1).all(axis=0)]

        # higher order PLD design matrices
        X_sections = [np.ones((len(flux_crop), 1)), X1]
        for i in range(2, pld_order + 1):
            f2 = np.product(list(multichoose(X1.T, pld_order)), axis=1).T
            try:
                # We use an optional dependency for very fast PCA (fbpca).
                # If the import fails we will fall back on using the slower `np.linalg.svd`
                from fbpca import pca
                components, _, _ = pca(f2, n_pca_terms)
            except ImportError:
                log.error("PLD uses the `fbpca` package. You can pip install "
                          "with `pip install fbpca`. Using `np.linalg.svd` "
                          "instead.")
                components, _, _ = np.linalg.svd(f2)
            X_n = components[:, :n_pca_terms]
            X_sections.append(X_n)

        # Create the design matrix X by stacking X1 and higher order components, and
        # adding a column vector of 1s for numerical stability (see Luger et al.).
        # X has shape (n_components_first + n_components_higher_order + 1, n_cadences)
        X = np.concatenate(X_sections, axis=1)

        # set default transit mask
        if cadence_mask is None:
            cadence_mask = np.ones_like(lc.time, dtype=bool)
        M = lambda x: x[cadence_mask[nanmask]]

        # mask transits in design matrix
        MX = M(X)

        if use_gp:
            # We use a Gaussian Process to model the long term trend.
            # We do this by estimating the long term trend y by applying the
            # preliminary PLD model defined above and subtracting it from the raw light curve.
            # The "in transit" cadences are masked out in this step to prevent the
            # long term approximation from over-fitting the transits.
            XTX = np.dot(MX.T, MX)
            XTX[np.diag_indices_from(XTX)] += 1e-8
            XTy = np.dot(MX.T, M(rawflux))
            y = M(rawflux) - np.dot(MX, np.linalg.solve(XTX, XTy))

            # Estimate the amplitude parameter of a Matern-3/2 kernel GP
            # by computing the standard deviation of y.
            amp = np.nanstd(y)
            tau = gp_timescale  # tau is a user-defined parameter
            # set up gaussian process using celerite
            # we use a Matern-3/2 kernel for its flexibility and non-periodicity
            kernel = celerite.terms.Matern32Term(np.log(amp), np.log(tau))
            gp = celerite.GP(kernel)
            gp.compute(M(self.time), M(rawflux_err))

            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            A = np.dot(MX.T, gp.apply_inverse(MX))
            B = np.dot(MX.T, gp.apply_inverse(M(rawflux)[:, None])[:, 0])

        else:
            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            ivar = 1.0 / M(rawflux_err)**2  # inverse variance
            A = np.dot(MX.T, MX * ivar[:, None])
            B = np.dot(MX.T, M(rawflux) * ivar)

        # apply prior to design matrix weights for numerical stability
        A[np.diag_indices_from(A)] += 1e-8
        C = np.linalg.solve(A, B)

        # compute detrended light curve
        model = np.dot(X, C)
        self.detrended_flux = rawflux - (model - np.nanmean(model))

        # Create and return a new LightCurve object with the corrected flux
        corrected_lc = lc.copy()[nanmask]
        corrected_lc.flux = self.detrended_flux
        corrected_lc.flux_err = flux_err
        return corrected_lc
Пример #7
0
def reduce_dimensionality(X, dim_red_k=100):
    k = min((dim_red_k, X.shape[0], X.shape[1]))
    U, s, Vt = pca(X, k=k)  # Automatically centers.
    return U[:, range(k)] * s[range(k)]
Пример #8
0
    
if __name__ == '__main__':
    datasets, genes_list, n_cells = load_names(data_names, norm=False)

    for name, dataset, genes in zip(data_names, datasets, genes_list):

        name = name.split('/')[-1]

        X = normalize(dataset)

        gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
                   if s >= 500 ]
        X = X[gt_idx]
        
        k = DIMRED
        U, s, Vt = pca(X, k=k)
        X_dimred = U[:, :k] * s[:k]

        viz_genes = [
            #'CD74', 'JUNB', 'B2M',
            'CD14', 'CD68',
            #'PF4',
            #'HBB',
            #'CD19',
        ]

        from ample import gs, uniform
        samp_idx = gs(X_dimred, 20000, replace=False)

        adata = AnnData(X=X_dimred[samp_idx, :])
        sc.pp.neighbors(adata, use_rep='X')
Пример #9
0
def mmp(matrix_train, embedded_matrix=np.empty((0)), mode_dim=5, key_dim=3,
        batch_size=32, optimizer="Adam", learning_rate=0.001, normalize=True,
        iteration=4, epoch=20, lamb=100, rank=200, corruption=0.5, fb=False,
        seed=1, root=1, alpha=1, return_model=False, **unused):
    """
    PureSVD algorithm
    :param matrix_train: rating matrix
    :param embedded_matrix: item or user embedding matrix(side info)
    :param iteration: number of random SVD iterations
    :param rank: SVD top K eigenvalue ranks
    :param fb: facebook package or sklearn package. boolean
    :param seed: Random initialization seed
    :param unused: args that not applicable for this algorithm
    :return:
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embedded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embedded_matrix.T))
    progress.subsection("Create PMI matrix")
    pmi_matrix = get_pmi_matrix(matrix_input, root)
    progress.subsection("Randomized SVD")
    start_time = time.time()
    if fb:
        P, sigma, Qt = pca(pmi_matrix,
                           k=rank,
                           n_iter=iteration,
                           raw=True)
    else:
        P, sigma, Qt = randomized_svd(pmi_matrix,
                                      n_components=rank,
                                      n_iter=iteration,
                                      power_iteration_normalizer='QR',
                                      random_state=seed)
    Q = Qt.T*np.sqrt(sigma)
    # TODO: Verify this. Seems better with this.
    if normalize:
        Q = (Q - np.mean(Q)) / np.std(Q)

    # Type has to match with Tensorflow graph implementation which uses float32
    if isinstance(Q[0][0], np.float64):
        Q = np.float32(Q)

    model = MultiModesPreferenceEstimation(input_dim=matrix_train.shape[1],
                                           embed_dim=rank,
                                           mode_dim=mode_dim,
                                           key_dim=key_dim,
                                           batch_size=batch_size,
                                           alpha=alpha,
                                           lamb=lamb,
                                           learning_rate=learning_rate,
                                           optimizer=Optimizer[optimizer],
                                           item_embeddings=Q)
    model.train_model(matrix_train, corruption, epoch)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    if return_model:
        return model

    RQ = model.get_RQ(matrix_input)
    Y = model.get_Y()
    #Bias = model.get_Bias()
    model.sess.close()
    tf.reset_default_graph()
    return RQ, Y.T, None
Пример #10
0
    mat = mat.append(matAll.loc[(matAll["type"] == "tissue"), ])
    mat["cluster"] = 2
    mat.loc[mat['tissue.cancer'] == can, "cluster"] = 1
    N_samples = len(mat["tissue.cancer"])

    # remove unnecessary fields
    matt = mat.copy()
    matt.drop("batch", axis=1, inplace=True)
    matt.drop("type", axis=1, inplace=True)
    matt.drop("cluster", axis=1, inplace=True)
    matt.drop("tissue.cancer", axis=1, inplace=True)
    matt.set_index('dataset', inplace=True)
    mattm = matt.values

    # compute the PCs - necessary input for the sketching
    U, s, Vt = pca(mattm, k=100)
    X_dimred = U[:, :100] * s[:100]

    # sketch
    N = int(N_samples * sk_sz)  # Number of samples to obtain from the dataset
    sketch_index = gs(X_dimred, N, replace=False)
    X_sketch = X_dimred[sketch_index]

    # get the samples selected in the sketch and output
    reduced = pd.DataFrame(X_sketch)
    pca_out = pd.DataFrame(X_dimred)
    pca_out["dataset"] = list(matt.index)
    red_with_labs = pd.merge(pca_out,
                             reduced,
                             how="inner",
                             on=list(reduced.columns.values))
def latent_scatter(var_unk_pred, y_unk_pred, acquisition, **kwargs):
    chems = kwargs['chems']
    chem2feature = kwargs['chem2feature']
    idx_obs = kwargs['idx_obs']
    idx_unk = kwargs['idx_unk']
    regress_type = kwargs['regress_type']
    prot_target = kwargs['prot_target']

    chem_idx_obs = sorted(set([i for i, _ in idx_obs]))
    chem_idx_unk = sorted(set([i for i, _ in idx_unk]))

    feature_obs = np.array([chem2feature[chems[i]] for i in chem_idx_obs])
    feature_unk = np.array([chem2feature[chems[i]] for i in chem_idx_unk])

    from sklearn.neighbors import NearestNeighbors
    nbrs = NearestNeighbors(n_neighbors=1).fit(feature_obs)
    dist = np.ravel(nbrs.kneighbors(feature_unk)[0])
    print('Distance Spearman r = {}, P = {}'.format(
        *ss.spearmanr(dist, var_unk_pred)))
    print('Distance Pearson rho = {}, P = {}'.format(
        *ss.pearsonr(dist, var_unk_pred)))

    X = np.vstack([feature_obs, feature_unk])
    labels = np.concatenate(
        [np.zeros(len(chem_idx_obs)),
         np.ones(len(chem_idx_unk))])
    sidx = np.argsort(-var_unk_pred)

    from fbpca import pca
    U, s, Vt = pca(
        X,
        k=3,
    )
    X_pca = U * s

    from umap import UMAP
    um = UMAP(
        n_neighbors=15,
        min_dist=0.5,
        n_components=2,
        metric='euclidean',
    )
    X_umap = um.fit_transform(X)

    from MulticoreTSNE import MulticoreTSNE as TSNE
    tsne = TSNE(
        n_components=2,
        n_jobs=20,
    )
    X_tsne = tsne.fit_transform(X)

    if prot_target is None:
        suffix = ''
    else:
        suffix = '_' + prot_target

    for name, coords in zip(
        ['pca', 'umap', 'tsne'],
        [X_pca, X_umap, X_tsne],
    ):
        plt.figure()
        sns.scatterplot(
            x=coords[labels == 1, 0],
            y=coords[labels == 1, 1],
            color='blue',
            alpha=0.1,
        )
        plt.scatter(
            x=coords[labels == 0, 0],
            y=coords[labels == 0, 1],
            color='orange',
            alpha=1.0,
            marker='x',
            linewidths=10,
        )
        plt.savefig('figures/latent_scatter_{}_ypred_{}{}.png'.format(
            name, regress_type, suffix),
                    dpi=300)
        plt.close()

        plt.figure()
        plt.scatter(x=coords[labels == 1, 0],
                    y=coords[labels == 1, 1],
                    c=ss.rankdata(var_unk_pred),
                    alpha=0.1,
                    cmap='coolwarm')
        plt.savefig('figures/latent_scatter_{}_var_{}{}.png'.format(
            name, regress_type, suffix),
                    dpi=300)
        plt.close()

        plt.figure()
        plt.scatter(x=coords[labels == 1, 0],
                    y=coords[labels == 1, 1],
                    c=-acquisition,
                    alpha=0.1,
                    cmap='hot')
        plt.savefig('figures/latent_scatter_{}_acq_{}{}.png'.format(
            name, regress_type, suffix),
                    dpi=300)
        plt.close()
Пример #12
0
def first_svd_comp_fb(h, n_comps=1):
    u,s,v = fbpca.pca(h,k = n_comps, raw = True)

    comps = [(np.sqrt(s[i])*u[:,i],np.sqrt(s[i])*v[i,:]) for i in range(n_comps)]
    return comps
Пример #13
0
def core_scf_routine(
    mods_selected,
    features_selected,
    settings,
    metas,
    gxc_hvftrs,
    ps,
    drop_npcs,
    cross_mod_distance_measure,
    knn,
    relaxation,
    n_cca,
    npc,
    output_pcX_all,
    output_cells_all,
    output_imputed_data_format,
):
    """smooth within modality, impute across modalities, and construct a joint PC matrix
    """
    # GENE * CELL !!!!
    smoothed_features = collections.OrderedDict()
    logging.info("Smoothing within modalities...")
    for mod in mods_selected:
        ti = time.time()
        if settings[mod].mod_category == 'mc':
            _df = gxc_hvftrs[mod]
        else:
            _mat = gxc_hvftrs[mod].data.todense()
            _df = pd.DataFrame(
                _mat,
                index=gxc_hvftrs[mod].gene,
                columns=gxc_hvftrs[mod].cell,
            )
        npc = min(len(metas[mod]), npc)
        k_smooth = min(len(metas[mod]), 30)
        ka = 5
        if k_smooth >= 2 * ka:
            mat_smoothed, mat_knn = smooth_in_modality(
                _df,
                _df,
                k=k_smooth,
                ka=ka,
                npc=npc,
                p=ps[settings[mod].mod_category],
                drop_npc=drop_npcs[settings[mod].mod_category])
            smoothed_features[mod] = mat_smoothed
        else:
            smoothed_features[mod] = _df
        logging.info("{}: {}".format(mod, time.time() - ti))
    # delete
    del gxc_hvftrs[mod]

    # construct a joint matrix (PCA)
    logging.info("Constructing a joint matrix...")
    cells_all = np.hstack([metas[mod].index.values
                           for mod in mods_selected])  # cell (all mods)
    pcX_all = []
    for mod_y in features_selected:  ## to
        logging.info("Imputing into {} space...".format(mod_y))
        # get all_features
        X = []
        for mod_x in mods_selected:
            logging.info("for {} cells...".format(mod_x))
            if mod_x == mod_y:
                smoothed_yy = smoothed_features[
                    mod_y].T  # gene by cell !!! VERY IMPORTANT
                X.append(smoothed_yy)
            else:
                # impute x cells y space
                smoothed_features_x = smoothed_features[mod_x]
                smoothed_features_y = smoothed_features[mod_y]
                if cross_mod_distance_measure == 'correlation':
                    imputed_xy = impute_1pair(
                        mod_x,
                        mod_y,
                        smoothed_features_x,
                        smoothed_features_y,
                        settings,
                        knn=knn,
                        relaxation=relaxation,
                        impute_j=False,
                    )
                elif cross_mod_distance_measure == 'cca':
                    imputed_xy = impute_1pair_cca(
                        mod_x,
                        mod_y,
                        smoothed_features_x,
                        smoothed_features_y,
                        settings,
                        knn=knn,
                        relaxation=relaxation,
                        n_cca=n_cca,
                        impute_j=False,
                    )
                else:
                    raise ValueError("Choose from correlation and cca")
                X.append(imputed_xy)
        X = np.vstack(X)  # cell (all mods) by gene (mod_y)
        # save X (imputed counts)
        np.save(output_imputed_data_format.format(mod_y), X)
        # PCA
        U, s, V = fbpca.pca(X, npc)
        del X
        pcX = U.dot(np.diag(s))
        # normalize PCs
        sigma = np.sqrt(np.sum(s * s) / (pcX.shape[0] * pcX.shape[1]))
        pcX = pcX / sigma
        pcX_all.append(pcX)

    pcX_all = np.hstack(pcX_all)
    # save pcX_all
    np.save(output_pcX_all, pcX_all)
    np.save(output_cells_all, cells_all)
    logging.info("Saved output to: {}".format(output_pcX_all))
    logging.info("Saved output to: {}".format(output_cells_all))
    return pcX_all, cells_all
Пример #14
0
def smooth_in_modality(counts_matrix,
                       norm_counts_matrix,
                       k,
                       ka,
                       npc=100,
                       sigma=1.0,
                       p=0.1,
                       drop_npc=0):
    """Smooth a data matrix
    
    Arguments:
        - counts_matrix (pandas dataframe, feature by cell)
        - norm_counts_matrix (pandas dataframe, feature by cell) log10(CPM+1)
        - k (number of nearest neighbors)
    Return:
        - smoothed cells_matrix (pandas dataframe)
        - markov affinity matrix
    """
    # from sklearn.neighbors import NearestNeighbors
    import fbpca
    import clst_utils

    assert counts_matrix.shape[1] == norm_counts_matrix.shape[1]

    c = norm_counts_matrix.columns.values
    N = len(c)

    # reduce dimension fast version
    U, s, Vt = fbpca.pca(norm_counts_matrix.T.values, k=npc)
    pcs = U.dot(np.diag(s))
    if drop_npc != 0:
        pcs = pcs[:, drop_npc:]

    # get k nearest neighbor distances fast version
    inds, dists = clst_utils.gen_knn_annoy(pcs,
                                           k,
                                           form='list',
                                           metric='euclidean',
                                           n_trees=10,
                                           search_k=-1,
                                           verbose=True,
                                           include_distances=True)

    # remove itself
    dists = dists[:, 1:]
    inds = inds[:, 1:]

    # normalize by ka's distance
    dists = (dists / (dists[:, ka].reshape(-1, 1)))

    # gaussian kernel
    adjs = np.exp(-((dists**2) / (sigma**2)))

    # construct a sparse matrix
    cols = np.ravel(inds)
    rows = np.repeat(np.arange(N), k - 1)  # remove itself
    vals = np.ravel(adjs)
    A = sparse.csr_matrix((vals, (rows, cols)), shape=(N, N))

    # Symmetrize A (union of connection)
    A = A + A.T

    # normalization fast (A is now a weight matrix excluding itself)
    degrees = A.sum(axis=1)
    A = sparse.diags(1.0 / np.ravel(degrees)).dot(A)

    # include itself
    eye = sparse.identity(N)
    A = p * eye + (1 - p) * A

    # smooth fast (future?)
    counts_matrix_smoothed = pd.DataFrame((A.dot(counts_matrix.T)).T,
                                          columns=counts_matrix.columns,
                                          index=counts_matrix.index)
    return counts_matrix_smoothed, A
Пример #15
0
def impute_1pair_cca(
    mod_i,
    mod_j,
    smoothed_features_i,
    smoothed_features_j,
    settings,
    knn,
    relaxation,
    n_cca,
    output_knn_mat_ij='',
    output_knn_mat_ji='',
    impute_j=True,
):
    """
    """
    # set up
    direct_i, direct_j = settings[mod_i].mod_direction, settings[
        mod_j].mod_direction

    mat_ii = smoothed_features_i.T  # cell in mod i; gene in mod i
    mat_jj = smoothed_features_j.T  # cell in mod j; gene in mod j

    genes_i = mat_ii.columns.values
    genes_j = mat_jj.columns.values
    genes_common = np.intersect1d(genes_i, genes_j)

    cells_i = mat_ii.index.values
    cells_j = mat_jj.index.values

    ## CCA euclidean distance
    # normalize the feature matrix
    X = mat_ii[genes_common].T.apply(
        basic_utils.zscore,
        axis=0) * direct_i  # gene by cell, zscore across genes
    Y = mat_jj[genes_common].T.apply(basic_utils.zscore, axis=0) * direct_j
    U, s, Vt = fbpca.pca(X.T.values.dot(Y.values), k=n_cca)
    del X, Y

    mat_norm_i = pd.DataFrame(U, index=mat_ii.index)
    maxk_i = int((len(cells_j) / len(cells_i)) * knn *
                 relaxation) + 1  # max number of NN a cell in i can get
    mat_norm_j = pd.DataFrame(Vt.T, index=mat_jj.index)
    maxk_j = int((len(cells_i) / len(cells_j)) * knn *
                 relaxation) + 1  # max number of NN a cell in j can get

    if impute_j:
        # knn_i and knn_j
        # j <- i for each j, get kNN in i
        knn_ji = get_constrained_knn(mat_norm_j,
                                     mat_norm_i,
                                     knn=knn,
                                     k_saturate=maxk_i,
                                     metric='euclidean')
        mat_knn_ji = sparse_adj_to_mat(knn_ji, len(cells_j), len(cells_i))

        if output_knn_mat_ji:
            sparse.save_npz(output_knn_mat_ji, mat_knn_ji)

        # normalize
        degrees_j = np.ravel(mat_knn_ji.sum(
            axis=1))  # for each cell in j, how many cells in i it connects to
        mat_knn_ji = sparse.diags(1.0 / (degrees_j + 1e-7)).dot(mat_knn_ji)

        # imputation both across and within modality
        mat_ji = mat_knn_ji.dot(mat_ii)  # cell in mod j, gene in mod i

    # i <- j
    knn_ij = get_constrained_knn(mat_norm_i,
                                 mat_norm_j,
                                 knn=knn,
                                 k_saturate=maxk_j,
                                 metric='euclidean')
    mat_knn_ij = sparse_adj_to_mat(knn_ij, len(cells_i), len(cells_j))

    if output_knn_mat_ij:
        sparse.save_npz(output_knn_mat_ij, mat_knn_ij)

    degrees_i = np.ravel(mat_knn_ij.sum(
        axis=1))  # for each cell in i, how many cells in j it connects to
    mat_knn_ij = sparse.diags(1.0 / (degrees_i + 1e-7)).dot(mat_knn_ij)

    mat_ij = mat_knn_ij.dot(mat_jj)  # cell in mod i, gene in mod j

    if impute_j:
        return mat_ij, mat_ji
    else:
        return mat_ij
Пример #16
0
def svd(matrix, k):
    ''' Provide a wrapper for the chosen SVD routine. '''
    U, s, V = pca(matrix, k=k)
    return U, s, V
Пример #17
0
def plot(X, title, labels, bold=None):
    plot_clusters(X, labels)
    if bold:
        plot_clusters(X[bold], labels[bold], s=20)
    plt.title(title)
    plt.savefig('{}.png'.format(title))


if __name__ == '__main__':
    datasets, genes_list, n_cells = load_names(data_names, norm=False)
    datasets, genes = merge_datasets(datasets, genes_list)
    X = vstack(datasets)

    k = DIMRED
    U, s, Vt = pca(normalize(X), k=k)
    X_dimred = U[:, :k] * s[:k]

    labels = (open('data/cell_labels/jurkat_293t_99_1_clusters.txt').read().
              rstrip().split())
    le = LabelEncoder().fit(labels)
    cell_labels = le.transform(labels)

    experiments(
        X_dimred,
        NAMESPACE,
        cell_labels=cell_labels,
        cell_exp_ratio=True,
        #spectral_nmi=True, louvain_ami=True,
        #rare=True,
        #rare_label=le.transform(['293t'])[0],
Пример #18
0
def reconstruct(full_data_input_file,
                tracking_nodes_file,
                keyfile,
                output_file,
                singleframe=True,
                basis_file=None,
                target_position_file=None):

    n = 5
    k = 5

    if isinstance(basis_file, (list, tuple)):
        basis_file = basis_file[0]

    print("Extracting full simulation data")
    full_data_input = Input(full_data_input_file, basis_file,
                            target_position_file)
    coordinates_data, displacement_data, full_id_data, time_data = full_data_input.extract_main(
    )
    # ### Stores orignal number of basis vectors
    full_node_num = coordinates_data.shape[0]

    print("Extracting tracking point ids and weighting functions")
    tracking_node_data = Input(tracking_nodes_file, basis_file)
    tracking_id_data, tracking_node_list, weights = tracking_node_data.extract_tracking_points_and_weights(
    )
    sort_tracking = np.argsort(tracking_id_data)
    tracking_id_data = tracking_id_data[sort_tracking]
    tracking_node_list = tracking_node_list[sort_tracking]

    print("Extracting target position data")
    if target_position_file is not None:
        target_data_input = Input(target_position_file, basis_file)
        target_data, target_id_data, target_velocity_data, target_time_data =\
        target_data_input.extract_simple()
        sort_target = np.argsort(target_id_data)
        target_data = target_data.reshape((-1, 3))
        target_data = target_data[sort_target, :]
        target_data = target_data.reshape((-1, 1))
    else:
        target_data, tracking_ids = append_tracking_point_rows(
            displacement_data[:, -1].reshape((-1, 1)), full_id_data,
            tracking_node_list)
        target_data = target_data[tracking_ids, :]
        target_pkl_data = [tracking_id_data, target_data]
        print("Pickling target data")
        with open(output_file.rsplit(".", 1)[0] + "_td.pkl", "wb") as f:
            pickle.dump(target_pkl_data, f)

    if basis_file is None:
        print("Calculating basis vectors from full data")
        (V, s, Vt) = fbpca.pca(displacement_data, k=k, n_iter=n, raw=True)
        print("Storing calculated basis vectors as .pkl")
        with open("basis_vectors.pkl", "wb") as f:
            pickle.dump(V, f)
    else:
        # Assumes bases are already rearranged
        print("Extracting reduced basis")
        basis_file_data = Input(basis_file, None, False, False)
        V = basis_file_data.extract_basis()
    ### Xsection mapping averages the tracking_nodes nodes to create new nodes for SVD
    V, target_node_indices = append_tracking_point_rows(
        V, full_id_data, tracking_node_list, weights)
    ### Reconstruction of matrix based on SVD
    A_r = reducedOrderApproximation(V, target_node_indices, target_data,
                                    coordinates_data)

    output_keyfile = Input(keyfile, basis_file, target_position_file)

    # if full_data_input.dataTypeHandle.type == "key":
    inputdeck = output_keyfile.dataTypeHandle.get_deck()
    # inputdeck.modify_nodes(full_id_data, A_r)
    inputdeck.modify_nodes(full_id_data, A_r)
    inputdeck.write_inputdeck(newfilename=output_file)
    print("Writing to Output File")
Пример #19
0
embedding_type = "end"
PATH = "../data/distance_data/" + embedding_type + "/"
MAX_DIM = 30000
MAX_DIM_Y = 10000
D = np.load(PATH + "distance.npy")
print("Distance matrix loaded")
D_small = D[:MAX_DIM, :MAX_DIM_Y]
print("Normalize")
D_small = D_small / np.max(D_small)
print("Nonlinearize")
# A non-linearity is used to emphasize words with high similarity
D_small = -(D_small**0.25) + 1
D_small = D_small

print("Calculate SVD...")
U, s, Va = pca(D_small, k=64, raw=True, n_iter=15, l=None)
print(U)
print("DONE!")

err = diffsnorm(D_small, U, s, Va)
print(err)

print("Print embedding vector lengths...")
for i in range(len(U)):
    print(np.sum(U[i, :] * U[i, :])**0.5)

I = np.identity(len(D_small))[:len(D_small), :len(D_small[0])]
D_nondiagonal = D_small + I * 8.0

min_index = np.unravel_index(D_nondiagonal.argmin(), D_nondiagonal.shape)
print(min_index, D[min_index[0], min_index[1]])