コード例 #1
0
def cp_colocalization(r, first, second, threshold=0.15, costes=False):
    """Measures overlap, k1/k2, manders, and rank weighted colocalization coefficients.
	References:
	http://www.scian.cl/archivos/uploads/1417893511.1674 starting at slide 35
	Singan et al. (2011) "Dual channel rank-based intensity weighting for quantitative 
	co-localization of microscopy images", BMC Bioinformatics, 12:407.
	"""
    results = []

    A = masked(r, first).astype(float)
    B = masked(r, second).astype(float)

    filt = A > 0
    if filt.sum() == 0:
        return np.nan

    A = A[filt]
    B = B[filt]

    A_thresh, B_thresh = (threshold * A.max(), threshold * B.max())

    A_total, B_total = A[A > A_thresh].sum(), B[B > B_thresh].sum()

    mask = (A > A_thresh) & (B > B_thresh)

    overlap = (A[mask] * B[mask]).sum() / np.sqrt(
        (A[mask]**2).sum() * (B[mask]**2).sum())

    results.append(overlap)

    K1 = (A[mask] * B[mask]).sum() / (A[mask]**2).sum()
    K2 = (A[mask] * B[mask]).sum() / (B[mask]**2).sum()

    results.extend([K1, K2])

    M1 = A[mask].sum() / A_total
    M2 = B[mask].sum() / B_total

    results.extend([M1, M2])

    A_ranks = rankdata(A, method='dense')
    B_ranks = rankdata(B, method='dense')

    R = max([A_ranks.max(), B_ranks.max()])
    weight = ((R - abs(A_ranks - B_ranks)) / R)[mask]
    RWC1 = (A[mask] * weight).sum() / A_total
    RWC2 = (B[mask] * weight).sum() / B_total

    results.extend([RWC1, RWC2])

    if costes:
        A_costes, B_costes = costes_threshold(A, B)
        mask_costes = (A > A_costes) & (B > B_costes)

        C1 = A[mask_costes].sum() / A[A > A_costes].sum()
        C2 = B[mask_costes].sum() / B[B > B_costes].sum()

        results.extend([C1, C2])

    return results
コード例 #2
0
def lstsq_slope(r,first,second):
	A = masked(r,first)
	B = masked(r,second)

	filt = A > 0
	if filt.sum() == 0:
	    return np.nan

	A = A[filt]
	B  = B[filt]
	slope = np.linalg.lstsq(np.vstack([A,np.ones(len(A))]).T,B,rcond=-1)[0][0]

	return slope
コード例 #3
0
    return np.multiply(image,mask)

def mahotas_zernike(r,channel):
    image = masked_rect(r,channel)
    mfeat = mahotas.features.zernike_moments(image.astype(np.uint32), radius = 9, degree=9)
    return mfeat

def mahotas_pftas(r,channel):
    image = masked_rect(r,channel)
    mfeat = mahotas.features.pftas(image.astype(np.uint32))
    ### according to this, at least as good as haralick/zernike and much faster:
    ### https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-8-110
    return mfeat

features_nuclear = {
    'dapi_nuclear_min': lambda r: np.min(masked(r,0)),
    'dapi_nuclear_25': lambda r: np.percentile(masked(r, 0),25),
    'dapi_nuclear_mean': lambda r: masked(r, 0).mean(),
    'dapi_nuclear_median': lambda r: np.median(masked(r, 0)),
    'dapi_nuclear_75': lambda r: np.percentile(masked(r, 0),75),
    'dapi_nuclear_max'   : lambda r: masked(r, 0).max(),
    'dapi_nuclear_int'   : lambda r: masked(r, 0).sum(),
    'dapi_nuclear_sd': lambda r: np.std(masked(r,0)),
    'dapi_nuclear_mad': lambda r: median_absolute_deviation(masked(r,0),scale=1),
    # 'dapi_zernike_nuclear': lambda r: mahotas_zernike(r,0),
    # 'dapi_pftas_nuclear': lambda r: mahotas_pftas(r,0),
    'dm1a_nuclear_min': lambda r: np.min(masked(r,1)),
    'dm1a_nuclear_25': lambda r: np.percentile(masked(r, 1),25),
    'dm1a_nuclear_mean' : lambda r: masked(r, 1).mean(),
    'dm1a_nuclear_median' : lambda r: np.median(masked(r, 1)),
    'dm1a_nuclear_75': lambda r: np.percentile(masked(r, 1),75),