示例#1
0
def write_gene_model_results(data, organ, strain, path, params, metrics, smoothExprs, bDEGUp, 
                             bDEGDown, xTestInterp, xTestDEG):  
    """
    Write all the results from fitting gene_model() and calling differential expression to disk
    This is a helper function to shorten code in the main script

    Arguments
    =========
    data - clean pandas data frame of the microarray data
    organ - Blood/Spleen 
    strain - AS/CB
    path - a structured dictionary of paths returned by config
    params - numpy array of [rbfVar, rbfScale, noiseVar]
    metrics - numpy array of [maxLogFC, SNR, score, netLogFC, rank]
    smoothExprs - numpy array of interpolated time-series
    bDEGUp/Down - numpy array of booleans DEG yes/no
    
    Returns
    =========
    None - data saved to disk

    """     
    # Write GP params to disk
    df = pd.DataFrame(data=params, columns=['rbfVar', 'rbfScale', 'noiseVar'])
    df['ProbeID'] = data['ProbeID']; df['Symbol'] = data['Symbol']
    df.to_csv(os.path.join(path['GPFit']['Params'], organ + strain + '.csv'), index=False)
    
    # Write GP time-series metrics to disk
    df = pd.DataFrame(data=metrics, columns=['maxLogFC', 'SNR', 'score', 'netLogFC', 'rank'])
    df['ProbeID'] = data['ProbeID']; df['Symbol'] = data['Symbol']
    df.to_csv(os.path.join(path['GPFit']['Metrics'], organ + strain + '.csv'), index=False)
    
    # Write smooth expression to disk
    df = pd.DataFrame(data=smoothExprs, columns=list(map(str, xTestInterp.flatten())))
    df['ProbeID'] = data['ProbeID']; df['Symbol'] = data['Symbol']
    df.to_csv(os.path.join(path['GPFit']['SmoothExprs'], organ + strain + '.csv'), index=False)
    
    # Write DEG results to disk
    colName = ['Day%d' % j for j in xTestDEG]  
    # Up regulated
    df = pd.DataFrame(data=bDEGUp, columns=colName)
    df['ProbeID'] = data['ProbeID']; df['Symbol'] = data['Symbol']
    df.to_csv(os.path.join(path['GPFit']['DEG'], 'b' + organ + strain + 'Up.csv'), index=False)        
    # Down regulated    
    df = pd.DataFrame(data=bDEGDown, columns=colName)
    df['ProbeID'] = data['ProbeID']; df['Symbol'] = data['Symbol']
    df.to_csv(os.path.join(path['GPFit']['DEG'], 'b' + organ + strain + 'Down.csv'), index=False)  
    
    # Get gene list per day and store those
    geneUp = []
    geneDown = []        
    for j in range(bDEGUp.shape[1]):
        geneUp.append(data.loc[bDEGUp[:, j], 'Symbol'])  
        geneDown.append(data.loc[bDEGDown[:, j], 'Symbol']) 
        
    # Write gene lists to csv
    io.write_list_to_csv(os.path.join(path['GPFit']['DEG'], organ + strain + "Up.csv"), colName, geneUp)
    io.write_list_to_csv(os.path.join(path['GPFit']['DEG'], organ + strain + "Down.csv"), colName, geneDown)
示例#2
0
def write_MOHGP_results(organ, strain, path, fit, geneID):
    """
    A helper function akin to 'write_gene_model_results' to save MOHGP results to disk

    Arguments
    =========
    organ - Blood/Spleen
    strain - AS/CB
    path - a structured dictionary of paths returned by config
    fit - a Mixture of Hierarchical Gaussian Process model
    geneID - a pandas data frame containing ordered probeID/geneSymbols
            NOTE: this is different from probesToCluster order
        
    Returns
    =========
    None - data saved to disk

    """
    # Extract the cluster assigned to each probe
    clustNum = np.argmax(fit.phi, axis=1) + 1 # cluster number
    clustName = [strain + '_' + organ[:2] + '_%02d' % i for i in clustNum] # cluster name
    geneID['Cluster'] = clustName # add to data frame
    
    # Extract the gene and probe list
    geneList = []; probeList = []; header = []
    for name in np.unique(clustName):
        bWant = geneID['Cluster'] == name
        geneList.append(list(geneID.loc[bWant, 'Symbol']))
        probeList.append(list(geneID.loc[bWant, 'ProbeID']))
        header.append(name)
   
    # Save to disk
    io.write_list_to_csv(os.path.join(path['Clust']['GeneList'], organ + strain + '.csv'), 
                         header, geneList)
    io.write_list_to_csv(os.path.join(path['Clust']['ProbeList'], organ + strain + '.csv'), 
                         header, probeList) # Probe list 
    
    # Save model and standard plot    
    io.save_pickle(os.path.join(path['Clust']['Model'], organ + strain + ".pickle"), fit)
    io.save_pdf(os.path.join(path['Clust']['Plot'], organ + strain + ".pdf"), standard_plot(fit))
    
    # Compute cluster predictions for xTest where xTest is taken from SmoothExprs
    data = pd.read_csv(os.path.join(path['GPFit']['SmoothExprs'], organ + strain + ".csv"))  
    xTest = data.drop(['ProbeID', 'Symbol'], axis=1).columns.values.astype('float64')[:, None]
    mu, var = fit.predict_components(xTest) # Compute posterior mean and posterior variance
    # Write to disk (mu row ordering is biggest to smallest cluster)
    df = pd.DataFrame(data=np.array(mu), columns=list(map(str, xTest.flatten())))
    df['Cluster'] = header # header = cluster name
    df.to_csv(os.path.join(path['Clust']['Centres'], organ + strain + '.csv'), index=False)
    clustCentre = df # for readability
    
    # Merge smooth expression data frame with gene ID
    smoothExprs = pd.merge(geneID, data, how='left', on=['ProbeID', 'Symbol'])
    
    # Produce alternate plot
    hFig = alternate_plot(smoothExprs, clustCentre, config.COL[organ])    
    io.save_pdf(os.path.join(path['Clust']['Plot'], organ + strain + '2.pdf'), hFig)
示例#3
0
def fit_plot_save(k, smoothExprs, day, probeID, geneSymbol, organ, strain, path):
    """
    Fit k-means, plot and save results

    Arguments
    =========
    k - no. of clusters
    smoothExprs - gene expression rows = genes, columns = day
    day - day
    probeID - probeID
    geneSymbol - geneSymbol
    path - path
    
    Returns
    =========
    None - results are plotted and saved

    """
    model = KMeans(n_clusters=k)
    model.fit(smoothExprs)
    clustCentre = model.cluster_centers_
    # Plot results
    plot_silhouette(silhouette_samples(smoothExprs, model.labels_), model.labels_)
    clust.multi_plot(smoothExprs, clustCentre, day, model.labels_)
    # Hierarchical clustering
    # Ward + Euclidean
    header = ["Cluster%i" % label for label in np.unique(model.labels_)]    
    hclust = hc.linkage(clustCentre, method='ward', metric='euclidean')
    plt.figure(); plt.title("Hclust() Ward + Euclidean")
    hc.dendrogram(hclust, color_threshold=0.0, labels=header)
    #seed=101
    #embedding = tsne.tsne(smoothExprs, no_dims = 3, initial_dims = 20, perplexity = 30.0, seed=seed) # low dimensional embedding
    #tsne.plot(embedding, model.labels_)

    # Save model 
    io.save_pickle(os.path.join(path['Clust']['Model'], organ + strain + ".pickle"), model)
    # Save Gene/Probe List    
    geneList = clust.get_gene_list(model.labels_, geneSymbol)
    probeList = clust.get_gene_list(model.labels_, probeID)
    io.write_list_to_csv(os.path.join(path['Clust']['GeneList'], organ + strain + ".csv"), header, geneList) # Gene list 
    io.write_list_to_csv(os.path.join(path['Clust']['ProbeList'], organ + strain + ".csv"), header, probeList) # Probe list    
    # Save Cluster "centres"    
    dataMatrix = np.hstack((np.array(header)[:, None], clustCentre)) 
    header = list(itertools.chain.from_iterable([["Cluster"], list(day)]))    
    io.write_to_csv(os.path.join(path['Clust']['Centres'], organ + strain + ".csv"), header, dataMatrix) # Cluster "centres"   
    # Save Alternate plot     
    hFig = clust.multi_plot(smoothExprs, clustCentre, day, model.labels_)    
    io.save_pdf(os.path.join(path['Clust']['Plot'], organ + strain + "2.pdf"), hFig) # Plot 
示例#4
0
def merge(organ, strain, groupToMerge, groupLabel, originalLabel, path):
    """
    Merge modules

    Arguments
    =========
    groupToMerge - list of lists e.g [[1,2], [3]]
    groupLabel - list of unique group labels e.g ["A", "B"]

    Returns
    =========
    newLabel - A, B, C etc.
    
    
    """
    # Load gene/probe list
    oldGeneList = pandas.read_csv(os.path.join(path['Clust']['GeneList'], organ + strain + ".csv"), sep=",")
    oldProbeList = pandas.read_csv(os.path.join(path['Clust']['ProbeList'], organ + strain + ".csv"), sep=",")     
    # Initialise vars
    NGroup = len(groupToMerge)
    newLabel = originalLabel.astype(type(groupLabel))    
    newGeneList = []
    newProbeList = []        
    for iGroup in xrange(NGroup):
        tempGeneList = []
        tempProbeList = []
        for label in groupToMerge[iGroup]:
            newLabel[originalLabel == label] = groupLabel[iGroup]  
            bWant = ~pandas.isnull(oldGeneList['Cluster' + str(label)]) # some entries could be NaN 
            tempGeneList.append(np.array(oldGeneList['Cluster' + str(label)][bWant]))
            tempProbeList.append(np.array(oldProbeList['Cluster' + str(label)][bWant]))
        newGeneList.append(list(itertools.chain.from_iterable(tempGeneList)))
        newProbeList.append(list(itertools.chain.from_iterable(tempProbeList)))
    
    # Save Gene/Probe List    
    header = ["Cluster%s" % label for label in groupLabel]
    io.write_list_to_csv(os.path.join(path['ClustMerge']['GeneList'], organ + strain + ".csv"), header, newGeneList)
    io.write_list_to_csv(os.path.join(path['ClustMerge']['ProbeList'], organ + strain + ".csv"), header, newProbeList)      
    
    # Retrieve old clust centres
    data = pandas.read_csv(os.path.join(path['Clust']['Centres'], organ + strain + ".csv"), sep=",")  
    #oldClustCentre = data.values[:, 1:] # pick only the centres
    day = data.columns.values[1:].astype('float')      
    
    # Get smooth exprs 
    data = pandas.read_csv(os.path.join(path['GPFit']['SmoothExprs'], organ + strain + ".csv"), sep=",")
    #bSelect = top_ranked(organ, strain, len(originalLabel), path) # 29/03/16 not applicable anymore as I'm choosing COMMON gene sets using clust.common_ranked()   
    allProbeID = np.array(data['ProbeID'])
    wantedProbeID = np.array(list(itertools.chain.from_iterable(newProbeList)))
    bSelect = np.array([allProbeID[i] in wantedProbeID for i in xrange(len(allProbeID))]) # simply creates a vector of T, F, T, whether gene is in geneSet or      
    smoothExprs = data.values[:, 2:].astype('float')[bSelect, :] 
   
    # get new label of old clust centres i.e 1, 2, 3, 4, 5 --> 'A', 'B', 'A', 'C', 'B'
    # VERY ugly - should've used dictionaries....hey ho
    newLabelOldClustCentre = np.empty((len(np.unique(originalLabel))), dtype='str')
    for iGroup in xrange(NGroup):
        for label in groupToMerge[iGroup]: # I know that label is numeric, else it would fail
            newLabelOldClustCentre[label-1] = groupLabel[iGroup] # -1 as "I" start counting from 1
    
    # Naively take the mean
    clustCentre = np.empty((len(groupLabel), len(day)))
    for i, label in enumerate(groupLabel):
        clustCentre[i, :] = np.mean(smoothExprs[newLabel==label, :], axis=0)
    
#    #Using GPR was creating numerical issues so now (naively) I'm taking the mean         
#    #Compute clust centres (should research into doing this "properly" i.e using MOHGP, but coz for now I'm only
#    #interested in gene symbols it should be fine) 
#    clustCentre = np.empty((len(groupLabel), len(day)))
#    for i, label in enumerate(np.unique(newLabel)):
#        thisClustCentre = oldClustCentre[newLabelOldClustCentre == label, :]        
#        if sum(newLabelOldClustCentre == label) == 1:
#            clustCentre[i, :] = thisClustCentre # no need to GPR
#        else:         
#            xTrain = np.tile(day, sum(newLabelOldClustCentre == label)).flatten()[:, None]
#            yTrain = thisClustCentre.flatten()[:, None]
#            fit = gpr.fit(xTrain, yTrain)        
#            mu, var = fit.predict(day[:, None]) 
#            clustCentre[i, :] = mu.T
          
    # Save Cluster "centres"    
    dataMatrix = np.hstack((np.array(header)[:, None], clustCentre)) # using header from Save Gene/Probe List
    header = list(itertools.chain.from_iterable([["Cluster"], list(day)]))    
    io.write_to_csv(os.path.join(path['ClustMerge']['Centres'], organ + strain + ".csv"), header, dataMatrix) # Cluster "centres"   
    # Save Alternate plot         
    hFig = multi_plot(smoothExprs, clustCentre, day, newLabel)    
    io.save_pdf(os.path.join(path['ClustMerge']['Plot'], organ + strain + "2.pdf"), hFig) # Plot  
    
    return newLabel    
示例#5
0
def MOHGP(probesToCluster, organ, strain, prefix, K, alpha, path, seed=0):
    """
    Word cloud plot for all clusters in a dataset

    Arguments
    =========
    probesToCluster - a set of unique probeIDs to cluster
    organ - blood/spleen
    strain - AS/CB
    prefix - all/common/only
    K - init no. of clusters
    alpha - concentration parameter/strength parameter of the Dirichlet Process Prior
    path - dictionary with all results paths
    seed - to reproduce results due to multiple local optima
    
    Returns
    =========
    None - a Mixture of Hierarchical Gaussian Process model is fitted and saved to disk

    """
    # To reproduce results
    np.random.seed(seed) 
    # Load gene expression data
    data = pandas.read_csv(os.path.join(path['RawData']['Log2FC'], organ + strain + ".csv"), sep=",") # read data
    probeID = np.array(data['ProbeID'])        
    yTrain = data.values[:, 2:].astype('float') # 45,281 genes x S samples              
    xTrain = np.floor(data.columns.values[2:].astype('float64'))[:, None] # floor to get int 0, 0, 2, 2, ..., 12    
    # Subset the data by keeping only probesToCluster
    bWant = np.array([probeID[i] in probesToCluster for i in xrange(len(probeID))]) # simply creates a vector of T, F, T        
    yTrain = yTrain[bWant, :]         
    probeID = np.array(data['ProbeID'][bWant])
    geneSymbol = np.array(data['GeneSymbol'][bWant])
    
    # MOHGP fitting
    # Define the covariance functions for the hierarchical GP structure
    # The model of any cluster of genes has a hierarchical structure, with the unknown cluster-specific 
    # mean drawn from a GP, and then each gene in that cluster being drawn from a GP with said unknown mean function.
    # Covariance function for the latent function that describes EACH cluster. 
    covFunCluster = GPy.kern.RBF(input_dim=1, variance=np.var(yTrain.ravel()), lengthscale=LENGTHSCALE) 
    # Covariance function that describes how EACH time-course (gene) deviates from the cluster
    covFunGene = GPy.kern.RBF(input_dim=1, variance=np.var(yTrain.ravel())/10, lengthscale=LENGTHSCALE) + \
                 GPy.kern.White(1, variance=NOISE_VARIANCE)
    # Set-up the clustering problem NB: For large alpha P resembles Po (i.e the base distribution)
    fit = GPclust.MOHGP(X=xTrain, kernF=covFunCluster, kernY=covFunGene, Y=yTrain, K=K, prior_Z='DP', alpha=alpha)   
    # Constrain lengthscales (to avoid very short lengthscales as per Topa et al. (2012) on arXiv)
    fit.rbf.lengthscale.constrain_bounded(LOWER_BOUND_LENGTHSCALE, UPPER_BOUND_LENGTHSCALE , warning=False)
    fit.add.rbf.lengthscale.constrain_bounded(LOWER_BOUND_LENGTHSCALE, UPPER_BOUND_LENGTHSCALE , warning=False)    
    fit.hyperparam_opt_interval = 1000 # how often to optimize the hyperparameters
    # Optimise hyperparameters
    fit.optimize()
    fit.systematic_splits(verbose=False)
    # Name and reorder fit    
    fit.name = prefix + organ + strain
    fit.reorder()
    labels = np.argmax(fit.phi, axis=1) + 1 # cluster number    
    
    # Compute cluster prediction for xTest where xTest is taken from SmoothExprs
    data = pandas.read_csv(os.path.join(path['GPFit']['SmoothExprs'], organ + strain + ".csv"), sep=",") # read data    
    smoothExprs = data.values[:, 2:].astype('float')[bWant, :]   
    xTest = data.columns.values[2:].astype('float64')[:, None]    
    mu, var = fit.predict_components(xTest)
    clustCentre = np.empty((len(mu), xTest.shape[0]))
    for iClust in xrange(len(mu)):
        clustCentre[iClust, :] = mu[iClust]
    
    # Save model and plot    
    io.save_pickle(os.path.join(path['Clust']['Model'], prefix + organ + strain + ".pickle"), fit)
    io.save_pdf(os.path.join(path['Clust']['Plot'], prefix + organ + strain + ".pdf"), plot(fit))
    # Save Gene/Probe List    
    geneList = get_gene_list(labels, geneSymbol)
    probeList = get_gene_list(labels, probeID)
    header = ["Cluster%i" % label for label in np.unique(labels)]
    io.write_list_to_csv(os.path.join(path['Clust']['GeneList'], prefix + organ + strain + ".csv"), header, geneList) # Gene list 
    io.write_list_to_csv(os.path.join(path['Clust']['ProbeList'], prefix + organ + strain + ".csv"), header, probeList) # Probe list    
    # Save Cluster "centres"    
    dataMatrix = np.hstack((np.array(header)[:, None], clustCentre)) 
    header = list(itertools.chain.from_iterable([["Cluster"], list(xTest.ravel())]))    
    io.write_to_csv(os.path.join(path['Clust']['Centres'], prefix + organ + strain + ".csv"), header, dataMatrix) # Cluster "centres"   
    # Save Alternate plot     
    hFig = multi_plot(smoothExprs, clustCentre, xTest, labels)    
    io.save_pdf(os.path.join(path['Clust']['Plot'], prefix + organ + strain + "2.pdf"), hFig) # Plot 
    # Word cloud
    #vis.word_cloud_plot(organ, strain, prefix, path)
    # Heatmap
    vis.heatmap_plot_by_clusters(organ, strain, prefix, path)