Beispiel #1
0
    def from_signatures(cls,
                        signatures,
                        standardize=False,
                        center=True,
                        use_median=True,
                        cluster_signatures=True,
                        signature_cluster_metric='correlation',
                        cluster_samples=True,
                        sample_cluster_metric='euclidean',
                        cluster_method='average'):
        """Generate a GO-PCA signature matrix from individual signatures.

        The GO-PCA signature matrix contains the expression levels of all
        signatures (rows) generated, across all samples (columns) in the
        analysis. See the documentation of the `GOPCASignature` class for
        details on how signature expression levels are calculated.

        Parameters
        ----------
        signatures: Iterable of `GOPCASignature`
        The signatures generated.
        """
        # TODO: finish docstring
        assert isinstance(signatures, Iterable)
        assert isinstance(standardize, bool)
        assert isinstance(center, bool)
        assert isinstance(use_median, bool)
        assert isinstance(cluster_signatures, bool)
        assert isinstance(cluster_samples, bool)

        ### generate the expression matrix
        matrix = ExpMatrix(
            pd.concat([
                sig.get_expression(standardize=standardize,
                                   center=center,
                                   use_median=use_median) for sig in signatures
            ],
                      axis=1).T)
        matrix.genes.name = 'Signatures'
        matrix.samples.name = 'Samples'

        if matrix.p == 1:
            cluster_signatures = False
            cluster_samples = False

        ### clustering
        if cluster_signatures:
            # cluster signatures
            matrix = cluster.cluster_genes(matrix,
                                           metric=signature_cluster_metric,
                                           method=cluster_method)

        order_samples = None
        if cluster_samples:
            # cluster samples
            matrix = cluster.cluster_samples(matrix,
                                             metric=sample_cluster_metric,
                                             method=cluster_method)

        return cls(matrix)
Beispiel #2
0
def main(args=None):

    vinfo = sys.version_info
    if not (vinfo >= (2, 7)):
        raise SystemError('Python interpreter version >= 2.7 required, '
                          'found %d.%d instead.' % (vinfo.major, vinfo.minor))

    if args is None:
        parser = get_argument_parser()
        args = parser.parse_args()

    gopca_file = args.gopca_file
    output_file = args.output_file

    #sig_max_len = args.sig_max_len
    #sig_reverse_order = args.sig_reverse_order

    #sample_cluster_metric = args.sample_cluster_metric
    #no_sample_clustering = args.no_sample_clustering

    # configure root logger
    log_file = args.log_file
    quiet = args.quiet
    verbose = args.verbose
    logger = misc.get_logger(log_file=log_file, quiet=quiet,
                             verbose=verbose)

    result = util.read_gopca_result(gopca_file)
    
    sig_matrix = util.read_gopca_result(gopca_file)

    sig_labels = [sig.get_label(include_id=False)
                  for sig in sig_matrix.signatures]

    matrix = ExpMatrix(genes=sig_labels, samples=sig_matrix.samples,
                       X=sig_matrix.X)
    matrix.index.name = 'Signatures'
    #signatures = result.signatures
    #sig_labels = [sig.get_label(max_name_length=sig_max_len, include_id=False)
    #              for sig in signatures]
    #samples = list(result.samples)

    # generate expression matrix
    #E = ExpMatrix(genes=sig_labels, samples=samples, X=sig_matrix.X)

    # clustering of signatures (rows)
    #E, _ = cluster.cluster_genes(E, reverse=sig_reverse_order)

    exp_logger = logging.getLogger(expression.__name__)
    exp_logger.setLevel(logging.WARNING)
    matrix.write_tsv(output_file)
    exp_logger.setLevel(logging.NOTSET)
    logger.info('Wrote %d x %d signature matrix to "%s".',
                matrix.p, matrix.n, output_file)

    return 0
Beispiel #3
0
    def get_heatmap(self,
                    sig_matrix=None,
                    standardize=False,
                    center=True,
                    use_median=True,
                    include_id=False,
                    include_stats=True,
                    include_pval=True,
                    cluster_genes=True,
                    gene_cluster_metric='correlation',
                    cluster_samples=True,
                    sample_cluster_metric='euclidean',
                    cluster_method='average',
                    colorbar_label=None,
                    **kwargs):
        """Generate a heatmap of the signature gene matrix."""
        # TODO: Finish docstring

        assert isinstance(cluster_genes, bool)
        assert isinstance(cluster_samples, bool)
        assert isinstance(gene_cluster_metric, (str, _oldstr))
        assert isinstance(sample_cluster_metric, (str, _oldstr))
        assert isinstance(cluster_method, (str, _oldstr))

        from . import GOPCASignatureMatrix
        if sig_matrix is not None:
            assert isinstance(sig_matrix, GOPCASignatureMatrix)

        if colorbar_label is None:
            colorbar_label = 'Centered expression'

        matrix = self.matrix.copy()
        if standardize:
            matrix.standardize_genes(inplace=True)
            cb_default_label = ('Standardized expression<br>'
                                '(based on log<sub>2</sub>-scale)')
        elif center:
            matrix.center_genes(use_median=use_median, inplace=True)
            cb_default_label = 'Centered expression<br>(log<sub>2</sub>-scale)'
        else:
            cb_default_label = 'Expression<br>(log<sub>2</sub>-scale)'

        if colorbar_label is None:
            colorbar_label = cb_default_label

        # clustering
        if sig_matrix is not None:
            # user has provided a GOPCASignatureMatrix instance
            # make sure its samples match the signature's
            logger.info('Ordering samples to match order in signature matrix.')
            assert set(sig_matrix.samples) == set(self.samples.values)

            # re-arrange samples according to clustering of signature matrix
            matrix = matrix.loc[:, sig_matrix.samples]

        elif cluster_samples:
            # cluster samples (only if no signature matrix is provided)
            matrix = cluster.cluster_samples(matrix,
                                             metric=sample_cluster_metric,
                                             method=cluster_method)

        if cluster_genes:
            # cluster genes
            matrix = cluster.cluster_genes(matrix,
                                           metric=gene_cluster_metric,
                                           method=cluster_method)

        # add a "Signature"-labeled row to the top,
        # which represents the signature expression vector
        title = self.get_label(include_id=include_id,
                               include_stats=include_stats,
                               include_pval=include_pval)
        mean = np.mean(matrix.X, axis=0)
        header_row = ExpMatrix(genes=['<b>Signature</b>'],
                               samples=matrix.samples,
                               X=np.atleast_2d(mean))
        combined_matrix = pd.concat([header_row, matrix], axis=0)

        heatmap = ExpHeatmap(combined_matrix,
                             title=title,
                             colorbar_label=colorbar_label,
                             **kwargs)

        return heatmap
Beispiel #4
0
def my_matrix():
    genes = ['a', 'b', 'c', 'd', 'e', 'f']
    samples = ['s1', 's2', 's3']
    X = np.arange(18, dtype=np.float64).reshape(6, 3)
    matrix = ExpMatrix(genes=genes, samples=samples, X=X)
    return matrix
def main(args=None):

    vinfo = sys.version_info
    if not (vinfo >= (2, 7)):
        raise SystemError('Python interpreter version >= 2.7 required, '
                          'found %d.%d instead.' % (vinfo.major, vinfo.minor))

    if args is None:
        parser = get_argument_parser()
        args = parser.parse_args()

    expression_file = args.expression_file
    entrez2gene_file = args.entrez2gene_file
    gene_file = args.gene_file
    output_file = args.output_file

    strip_affy_suffix = args.strip_affy_suffix

    log_file = args.log_file
    quiet = args.quiet
    verbose = args.verbose

    # configure root logger
    logger = misc.get_logger(log_file=log_file, quiet=quiet, verbose=verbose)

    # read data
    genome = ExpGeneTable.read_tsv(gene_file)
    matrix = ExpMatrix.read_tsv(expression_file)
    e2g = dict(misc.read_all(entrez2gene_file))

    entrez = matrix.genes

    if strip_affy_suffix:
        # remove "_at" suffix from Entrez IDs
        entrez = [e[:-3] for e in entrez]
    logger.debug(str(entrez[:3]))

    # check that Entrez IDs are unique
    assert len(entrez) == len(set(entrez))

    # convert Entrez IDs to gene names
    f = 0
    genes = []
    X = []

    # g = None
    for i, e in enumerate(entrez):
        # print e
        try:
            g = e2g[e]
        except KeyError:
            f += 1
        else:
            # check if there are multiple entrez IDs pointing to the same gene
            # assert g not in genes
            genes.append(g)
            X.append(matrix.X[i, :])
    assert len(genes) == len(set(genes))
    if f > 0:
        logger.warning(
            'Failed to convert %d / %d entrez IDs '
            'to gene symbols (%.1f%%).', f, matrix.p,
            100 * (f / float(matrix.p)))

    # filter for known protein-coding genes
    X = np.float64(X)
    p = X.shape[0]
    logger.debug(str(X.shape))
    sel = np.zeros(p, dtype=np.bool_)
    for i in range(p):
        if genes[i] in genome:
            sel[i] = True
    sel = np.nonzero(sel)[0]
    genes = [genes[i] for i in sel]
    X = X[sel, :]
    f = p - sel.size
    if f > 0:
        logger.warning(
            'Failed to find %d / %d gene symbols in list of '
            'protein-coding genes (%.1f%%)', f, p, 100 * (f / float(p)))

    # generate new matrix (this automatically sorts the genes alphabetically)
    logger.debug('Genes: %d, Samples: %d, matrix: %s', len(genes),
                 len(matrix.samples), str(X.shape))
    matrix_conv = ExpMatrix(genes=genes, samples=matrix.samples, X=X)

    # write output file
    matrix_conv.write_tsv(output_file)

    return 0
Beispiel #6
0
def rma(cdf_file,
        sample_cel_files,
        pm_probes_only=True,
        bg_correct=True,
        quantile_normalize=True,
        medianpolish=True):
    """Perform RMA on a set of samples.

    Parameters
    ----------
    cdf_file: str
        The path of the Brainarray CDF file to use.
        Note: Brainarray CDF files can be downloaded from
            http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/genomic_curated_CDF.asp
    sample_cel_files: collections.OrderedDict (st => str)
        An ordered dictionary where each key/value-pair corresponds to a
        sample. The *key* is the sample name, and the *value* is the (absolute)
        path of the corresponding CEL file. The CEL files can be gzip'ed.
    pm_probes_only: bool, optional
        Whether or not to only use PM (perfect match) probes and ignore all MM
        (mismatch) probes. [True]
    bg_correct: bool, optional
        Whether or not to apply background correction. [True]
    quantile_normalize: bool, optional
        Whether or not to apply quantile normalization. [True]
    medianpolish: bool, optional
        Whether or not to apply medianpolish. [True]

    Returns
    -------
    genes: tuple of str
        The list of gene names.
    samples: tuple of str
        The list of sample names.
    X: np.ndarray (ndim = 2, dtype = np.float32)
        The expression matrix (genes-by-samples).

    Examples
    --------
    >>> from collections import OrderedDict
    >>> import pyaffy
    >>> cdf_file = '/path/to/brainarray/cdf/HGU133Plus2_Hs_ENTREZG.cdf'
    >>> sample_cel_files = OrderedDict([
            ['Sample 1', '/path/to/sample_1.CEL.gz'],
            ['Sample 2', '/path/to/sample_2.CEL.gz'],
        ])
    >>> genes, samples, X = pyaffy.rma(cdf_file, sample_cel_files)
    """

    ### checks
    assert isinstance(cdf_file, (str, _oldstr))
    assert os.path.isfile(cdf_file), \
            'CDF file "%s" does not exist!' %(cdf_file)

    assert isinstance(sample_cel_files, collections.OrderedDict)
    for sample, cel_file in sample_cel_files.items():
        assert isinstance(sample, (str, _oldstr))
        assert isinstance(cel_file, (str, _oldstr))
        assert os.path.isfile(cel_file), \
                'CEL file "%s" does not exist!' %(cel_file)

    assert isinstance(pm_probes_only, bool)
    assert isinstance(bg_correct, bool)
    assert isinstance(quantile_normalize, bool)
    assert isinstance(medianpolish, bool)

    t00 = time.time()

    ### read CDF data
    logger.info('Parsing CDF file.')
    t0 = time.time()
    # parse the CDF file
    probe_type = 'pm'
    if not pm_probes_only:
        probe_type = 'all'
    name, num_rows, num_cols, pm_probesets = \
            parse_cdf(cdf_file, probe_type=probe_type)

    # concatenate indices of all PM probes into one long vector
    pm_sel = np.concatenate(list(pm_probesets.values()))

    t1 = time.time()
    logger.info('CDF file parsing time: %.2f s', t1 - t0)
    logger.info('CDF array design name: %s', name)
    logger.info('CDF rows / columns: %d x %d', num_rows, num_cols)

    ### read CEL data
    logger.info('Parsing CEL files...')
    t0 = time.time()
    p = pm_sel.size
    n = len(sample_cel_files)
    Y = np.empty((p, n), dtype=np.float32)

    samples = []
    sub_logger = logging.getLogger(celparser.__name__)
    sub_logger.setLevel(logging.WARNING)
    for j, (sample, cel_file) in enumerate(sample_cel_files.items()):
        logger.debug('Parsing CEL file for sample "%s": %s', sample, cel_file)
        samples.append(sample)
        y = parse_cel(cel_file)
        Y[:, j] = y[pm_sel]
    sub_logger.setLevel(logging.NOTSET)
    t1 = time.time()
    logger.info('CEL files parsing time: %.1f s.', t1 - t0)

    ### background correction
    if bg_correct:
        logger.info('Performing background correction...')
        t0 = time.time()
        Y = rma_bg_correct(Y)
        t1 = time.time()
        logger.info('Background correction time: %.1f s.', t1 - t0)
    else:
        logger.info('Skipping background correction.')

    matrix = ExpMatrix(genes=pm_sel, samples=samples, X=Y)

    ### quantile normalization
    if quantile_normalize:
        logger.info('Performing quantile normalization...')
        t0 = time.time()
        matrix = qnorm(matrix)
        t1 = time.time()
        logger.info('Quantile normalization time: %.1f s.', t1 - t0)
    else:
        logger.info('Skipping quantile normalization.')

    ### convert intensities to log2-scale
    Y = np.log2(matrix.values)

    ### probeset summarization (with or without median polish)
    method = 'with'
    if not medianpolish:
        method = 'without'
    logger.info('Summarize probeset intensities (%s medianpolish)...', method)

    t0 = time.time()
    p = len(pm_probesets)
    n = Y.shape[1]
    X = np.empty((p, n), dtype=np.float32)
    cur = 0
    num_converged = 0
    genes = []
    for i, (gene_id, probes) in enumerate(pm_probesets.items()):
        genes.append(gene_id)

        if medianpolish:
            #X_sub = np.ascontiguousarray(Y[cur:(cur + probes.size),:])
            X_sub = Y[cur:(cur + probes.size), :]
            _, row_eff, col_eff, global_eff, converged, num_iter = medpolish(
                X_sub, copy=False)
            X[i, :] = col_eff + global_eff
            if converged:
                num_converged += 1

        else:
            # simply use median across probes
            X[i, :] = np.median(Y[cur:(cur + probes.size), :], axis=0)
            #X[i,:] = np.ma.median(X_sub, axis = 0)

        cur += probes.size

    t1 = time.time()
    logger.info('Probeset summarization time: %.2f s.', t1 - t0)

    if medianpolish:
        logger.debug('Converged: %d / %d (%.1f%%)', num_converged, p,
                     100 * (num_converged / float(p)))

    ### report total time
    t11 = time.time()
    logger.info('Total RMA time: %.1f s.', t11 - t00)

    ### sort alphabetically by gene name
    a = np.lexsort([genes])
    genes = [genes[i] for i in a]
    X = X[a, :]

    return genes, samples, X
Beispiel #7
0
def my_matrix(my_gene_names, my_samples, my_X):
    #genes = ['a', 'b', 'c', 'd']
    #samples = ['s1', 's2', 's3']
    # X = np.arange(12, dtype=np.float64).reshape(4, 3)
    matrix = ExpMatrix(genes=my_gene_names, samples=my_samples, X=my_X)
    return matrix