コード例 #1
0
ファイル: util.py プロジェクト: smdabdoub/find
def isolateClusters(selection, datasetName):
    """
    Create a new data set from a selection of clusters.
    
    @type selection: list
    @var selection: The indices of the clusters to be merged.
    @type datasetName: str
    @var datasetName: The display name for the new data set.
    """
    if (len(selection) > 0):
        currFCData = DataStore.getCurrentDataSet()
        if (datasetName == ""):
            datasetName = currFCData.displayname
        clusters = separate(currFCData.data, currFCData.getCurrentClustering())
        # merge selected clusters
        newData = dh.mergeData(selection, clusters)
        # assign new data set to the store
        newFCData = FacsData('', currFCData.labels, newData, parent=currFCData.ID)
        newFCData.displayname = datasetName
        newFCData.selDims = currFCData.selDims
        
        # add basic text annotations
        textAnn = {'parent': currFCData.displayname}
        textAnn['events'] = len(newFCData.data)
        newFCData.annotations['text'] = textAnn
        
        DataStore.add(newFCData)
コード例 #2
0
ファイル: bakker_schut.py プロジェクト: smdabdoub/find
def merge(limit, ids, clusters, dist=None, minpair=None, newID=None):
    """
    Take a set of clusters and iteratively merge them together based on some 
    distance metric until the lower limit is reached.
    
    :@type limit: int
    :@param limit: The target number of clusters.
    :@type ids: list or array
    :@param ids: A list containing the cluster ID of each row of data.
    :@type clusters: list
    :@param clusters: A list of arrays, each containing the 
    """
    if not isinstance(clusters, dict):
        tmp = {}
        for i, cluster in enumerate(clusters):
            tmp[i] = cluster
        clusters = tmp
        
    
    if len(clusters) <= limit:
        unique = np.unique(ids)
        saved = dict(zip(unique, range(len(unique))))
        # reassign cluster ids to a contiguous range
        for i, id in enumerate(ids):
            ids[i] = saved[id]
                
        return ids
    
    # calculate distance matrix
    if (dist is None):
        dist, minpair = distMatrix(clusters)
    else:
        dist, minpair = updateDistMatrix(clusters, dist, minpair, newID)
    
    # combine the two most similar clusters
    merged = dh.mergeData(minpair, clusters)
    
    # merge the minpair clusters and reassign their IDs to the new
    for i in minpair:
        del clusters[i]
    newID = np.max(ids) + 1
    clusters[newID] = merged
    
    for i, id in enumerate(ids):
        if id in minpair:
            ids[i] = newID 
    
    return merge(limit, ids, clusters, dist, minpair, newID)