Beispiel #1
0
def _Preprocess2(code, img, ker):
    stpwch = time.time()
    VIS._ProgressBar(0,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='Initializing...')

    ## Initialization ##
    sz = np.shape(img)[2:]  # Get the true size of the image #

    # Inflated size #
    sz_off = int(
        np.ceil(2 * np.max([USER.PRE_BG, USER.PRE_NS, USER.PRE_LT]) /
                USER.RES[0]))  # 2 sigma #
    sz_ = (sz[0] + 2 * sz_off, sz[1] + 2 * sz_off)

    # Construct filters for both sizes - ker will use the original sizes #
    h_bg, h_ns, h_lt = _GetFilters(sz)
    h_bg_, h_ns_, h_lt_ = _GetFilters(sz_)

    # Place the image in the inflated array #
    im = np.zeros([*np.shape(img)[:2], *sz_])
    im[:, :, sz_off:-sz_off, :][:, :, :, sz_off:-sz_off] = img

    # Smooth out the edges #
    for i in range(sz_off + 1):
        fwd = (np.sin((np.pi / 2) * (i / sz_off)) + 1) / 2
        rwd = 1 - fwd

        im[:, :,
           i, :] = im[:, :, sz_off, :] * fwd + im[:, :, -sz_off - 2, :] * rwd
        im[:, :, :,
           i] = im[:, :, :, sz_off] * fwd + im[:, :, :, -sz_off - 2] * rwd
        im[:, :, -i -
           1, :] = im[:, :, sz_off, :] * rwd + im[:, :, -sz_off - 2, :] * fwd
        im[:, :, :,
           -i - 1] = im[:, :, :, sz_off] * rwd + im[:, :, :, -sz_off - 2] * fwd

    ## Filtering ##
    # Kernel #
    VIS._ProgressBar(1,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='Filtering Kernel...')

    ker_bg = ker - _ApplyFilter(ker, h_bg)
    ker_ns = _ApplyFilter(ker_bg, h_ns)
    ker_ = ker_ns * np.ptp(ker) / np.ptp(ker_ns)  # Renormalize #

    # Inflated Image #
    VIS._ProgressBar(2,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='BG Subtraction...  ')

    img_bg = im - _ApplyFilter(im, h_bg_)
    VIS._ProgressBar(7,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='Noise Suppression...')

    img_ns = _ApplyFilter(img_bg, h_ns_)
    VIS._ProgressBar(12,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='Temporal Smoothing...')

    if ((np.shape(img)[0] > USER.PRE_TS // 10 + 1) and (USER.PRE_TS > 0)):
        for f in range(np.shape(img)[0]):  # Temporal smoothing #
            lft = np.maximum(0, f - USER.PRE_TS // 2)
            rgt = np.minimum(f + USER.PRE_TS // 2, np.shape(img)[0])
            img_ts = np.mean(img_ns[lft:rgt, ...], axis=0)
            img_ns[f] = img_ns[f] - img_ts * np.ptp(img_ts) / np.ptp(img_ns[f])
            VIS._ProgressBar(12 + 4 * f / np.shape(img)[0],
                             20,
                             prefix='(%s):\t%8.3f sec' %
                             (code, time.time() - stpwch),
                             suffix='Temporal Smoothing...')

    img_ = img_ns * np.ptp(img) / np.ptp(img_ns)  # Renormalize #
    img_ = img_[:, :, sz_off:-sz_off, :][:, :, :, sz_off:-sz_off]  # Truncate #

    ## Threshold ##
    VIS._ProgressBar(16,
                     20,
                     prefix='(%s):\t%8.3f sec' % (code, time.time() - stpwch),
                     suffix='Obtaining Threshold...')

    eps_mu = _ApplyFilter(im.astype(np.float32), h_lt_)
    eps_var = _ApplyFilter(im.astype(np.float32)**2, h_lt_) - eps_mu**2

    # Compare statistics of the variance and the mean #
    rat_min = np.minimum(np.min(eps_var) / np.min(eps_mu), 1)
    rat_avg = np.minimum(np.mean(eps_var) / np.mean(eps_mu), 1)
    rat_max = np.minimum(np.max(eps_var) / np.max(eps_mu), 1)

    # Use to weight the mean #
    eps_exp = np.sqrt(rat_min**2 + rat_avg**2 + rat_max**2)
    eps_global = 0.0  #np.mean(img)**2/np.var(img)
    eps_ = np.sqrt(
        np.maximum(eps_mu**eps_exp - 1, 0) + 2 * eps_var + eps_global)
    eps_ = eps_[:, :, sz_off:-sz_off, :][:, :, :, sz_off:-sz_off]  # Truncate #

    ## Output ##
    prefix = '(%s):\t%8.3f sec' % (code, time.time() - stpwch)
    VIS._ProgressBar(20, 20, prefix=prefix, suffix='Finished!             ')

    return img_, ker_, eps_
Beispiel #2
0
def _Identify(pos, wgt, img, *, code, vis=False):
	## Initialization ##
	F = len(wgt)
	clouds = list()

	# Progress #
	stpwch = time.time()
	timers = np.zeros((F))
	tru = OP._LoadTruth(code)			

	## Cluster points in each frame ##
	for f in range(F):
		if(len(wgt[f]) == 0): continue	# Check if there are any points in this frame #

		# Create a point cloud based on the points #
		pts = np.concatenate([pos[f], wgt[f][:,None]], axis=1)

		# Segment the point cloud to find clusters #
		cloud = PointCloud(pts, seg=True)

		# Why?? # vvv #
		# Weight threshold #
		clust = _CloudThr(cloud.clust, vis=vis)
		#		# ^^^ #
		clust = _Separate(clust)

		# Append new clusters to the batch #
		clouds.extend(clust)

		## Visualize ##
		"""
		if(vis):
			pts = np.zeros([0, 3]);
			for p in range(len(tru)):
				if(f in tru[p].frm):
					idx = np.nonzero(f == tru[p].frm)[0]
					pts = np.concatenate((pts, tru[p].res[idx,:]), axis=0)

			plt.figure()
			if(USER.KER_Z > 1):
				ax = plt.axes(position=[0,0,1,1], projection='3d')
				imclr = np.repeat((img/np.max(img))[f,0,:,:,None], 3, axis=2)
				xx, yy = np.meshgrid(range(np.shape(img)[2]), range(np.shape(img)[3]))
				ax.plot_surface(xx*USER.RES[0], yy*USER.RES[1], 0*xx, facecolors=imclr, rstride=8, cstride=8, zorder=-100000)
				for c in clust:
					ax.plot(c.abs[:,0], c.abs[:,1], c.abs[:,2], marker='o', linewidth=0, zorder=1000)
			else:
				ax = plt.axes(position=[0,0,1,1])
				ax.imshow(img[f,0,:,:], cmap='gray')
				for c in clust:
					ax.scatter(c.pos[:,0], c.pos[:,1], s=400*c.wgt, c='b')
				ax.plot(pts[:,0], pts[:,1], color='r', marker='o', linewidth=0, fillstyle='none')
			plt.show()
		"""

		# Progress Display #
		timers[f] = time.time() - stpwch
		if(f > 0):
			prefix = '(%s):\t%8.3f sec' % (code, timers[f])
			suffix = '(Remain: %5.0f sec)' % ((F-(f+1)) * np.mean(np.diff(timers[timers > 0])))
			VIS._ProgressBar(f+1, F, prefix=prefix, suffix=suffix)

	## Output ##
	return clouds
Beispiel #3
0
def _Stitch(traj, *, code=''):
	## Initialize ##
	P = len(traj)
	links = np.zeros((P, P))

	## Determine Candidates ##
	for p in range(P-1):
		for q in range(p+1, P):
			# Frame distance #
			dist_f = np.abs(traj[q].tail.frm[0] - traj[p].head.frm[0])
			if(USER.TRK_TOL < dist_f): continue

			# l2 distance #
			dist_l2 = np.sum((traj[p].head.m1a - traj[q].tail.m1a)**2)
			if(dist_f * USER.TRK_RAD**2 < dist_l2): continue

			# If they've passed, consider them #
			links[p,q] = np.sqrt(dist_l2)

	## Evaluate Candidates ##
	pairs = np.array(np.nonzero(links)).T
	Q = len(np.unique(pairs[:,0]))
	stpwch = time.time()
	timers = np.zeros((Q))

	for p in np.unique(pairs[:,0]):
		# Initialize #
		ord = 2								# Order of polynomial to fit #
		eps = list()						# Maximum tolerance #
		err = [list(), list(), list()]		# Error for each dimension #

		cand = pairs[pairs[:,0] == p, 1]	# Find all candidates to this base particle #

		# Evaluate Candidates #
		for c in cand:	# <idx in traj>
			# Get points close to the ends of each trajectory #
			rng_p = traj[p].head.frm[0] - traj[p].json[:,-2] <= USER.TRK_LEN
			rng_c = traj[c].json[:,-2] - traj[p].head.frm[0] <= USER.TRK_LEN + USER.TRK_TOL
			pts = np.concatenate((traj[p].json[rng_p,:], traj[c].json[rng_c,:]), axis=0)

			# Evaluate bounds #
			dom = np.power.outer(pts[:,-2], np.arange(ord+1))
			eps_p = np.max(np.std(traj[p].json[rng_p,:3], axis=0))
			eps_c = np.max(np.std(traj[c].json[rng_c,:3], axis=0))
			eps.append(eps_p + eps_c)

			# Random Sample Consensus #
			beta = np.zeros((ord+1, 3))
			for d in range(3):
				beta[:,d] = _RANSAC(pts[:,-2], pts[:,d], pts[:,-1], ord=ord)
				err[d].append(np.std(dom @ beta[:,d] - pts[:,d]))
		best = np.argmax(eps / np.max(err, axis=0))

		# Merge base with best candidate #
		if(np.max(err, axis=0)[best] < eps[best]):
			# Initialze #
			cloud_base = traj[p].cloud
			cloud_cand = traj[cand[best]].cloud
			cloud_merge = list()

			frm_base = np.array([c.frm[0] for c in cloud_base])
			frm_cand = np.array([c.frm[0] for c in cloud_cand])
			frm_merge = np.union1d(frm_base, frm_cand)

			# Merge the base into the best candidate #
			for f in range(int(min(frm_merge)), int(max(frm_merge)+1)):
				if((f in frm_base) and (f in frm_cand)):		# Compound both clouds #
					c_base = cloud_base[np.nonzero(f == frm_base)[0][0]]
					c_cand = cloud_cand[np.nonzero(f == frm_cand)[0][0]]
					c_total = PC(np.concatenate([c_base.pts, c_cand.pts], axis=0))
					cloud_merge.append(c_total)
				elif((f in frm_base) and (f not in frm_cand)):	# Take from base #
					cloud_merge.append(cloud_base[np.nonzero(f == frm_base)[0][0]])
				elif((f not in frm_base) and (f in frm_cand)):	# Take from cand #
					cloud_merge.append(cloud_cand[np.nonzero(f == frm_cand)[0][0]])
				elif((f in rng_p) or (f in rng_c)):				# Run from the model #
					X = np.power.outer(f, np.arange(ord+1))
					pt = [0, 0, 0, f, np.mean([c.wgt for c in cloud_merge])]
					for d in range(3):
						pt[d] = X @ beta[:,d]
					cloud_merge.append(PC(pt))

			# Discard the base and alter the candidate #
			# Note: by modifying only the past of the best candidate, we allow that candidate to be the base for another candidate particle
			traj[p].removed = True
			traj[cand[best]] = Particle(cloud_merge, hist=[traj[cand[best]].rho, traj[cand[best]].phi])

		# Progress Bar #
		q = np.where(np.unique(pairs[:,0]) == p)[0][0]
		timers[q] = time.time() - stpwch
		if(sum(timers > 0) > 1):
			prefix = '(%s):\t%8.3f sec' % (code, timers[q])
			suffix = '(Remain: %5.0f sec)' % ((Q-(q+1)) * np.mean(np.diff(timers[timers > 0])))
			VIS._ProgressBar(1, 1, q, Q+1, prefix=prefix, suffix=suffix)

	## Output ##
	return [traj[p] for p in range(P) if not traj[p].removed]
Beispiel #4
0
def _Track(clust, img, *, code='', step=1, vis=False):
	# Go frame by frame #
	frames = np.unique(np.concatenate([clust[c].frames for c in range(len(clust))]))
	F = len(frames)
	C = len(clust)

	stpwch = time.time()
	timers = np.zeros((F))

	traj = list()
	for f in range(0, F, step):
		# Get all clusters belonging to this frame #
		clust_f = [clust[c] for c in range(C) if frames[f] in clust[c].frm]
		Cf = len(clust_f)
		
		# Go through each trajectory to see if any match #
		for p in range(len(traj)):
			if(not traj[p].active): continue

			# Identify clusters near the head of the trajectory #
			cand_acen = np.array([clust_f[c].m1a for c in range(Cf)])
			sqdist = np.sum((traj[p].head.m1a - cand_acen)**2, axis=1)
			cand_idx = np.nonzero(sqdist < USER.TRK_RAD**2)[0]

			if(len(cand_idx) == 0): continue
			cand = [clust_f[c] for c in cand_idx]

			# Link! #
			traj_, best = traj[p].Link(cand)

			if(traj_ is not None):
				clust_f[cand_idx[best]].linked = True
				traj[p] = traj_
		
		# Deactivate stagnant particles #
		for p in range(len(traj)):
			if(not traj[p].active): continue
			traj[p].active = (frames[f] - traj[p].head.frm[0] < USER.TRK_TOL)
		
		# Detect merge events #
		emptyclust = list()
		for p in range(len(traj)-1):
			if(not traj[p].active): continue
			
			for q in range(p+1, len(traj)):
				if(not traj[q].active): continue
				if(traj[p].head.frm[0] != traj[q].head.frm[0]): continue	# Require same frame #

				# Check locality #
				if(np.sum((traj[p].head.ares - traj[q].head.ares)**2) < (2*USER.APR_RAD)**2):
					# They are basically the same particle at this point, but check if the recent histories align #
					rho_sim = np.allclose(traj[p].rho[:,:USER.TRK_MIN], traj[q].rho[:,:USER.TRK_MIN], atol=1E-3)
					if(not rho_sim): continue

					phi_sim = np.allclose(traj[p].phi[:,:USER.TRK_MIN,:], traj[q].phi[:,:USER.TRK_MIN,:], atol=1E-3)
					if(not phi_sim): continue

					# We have an overlap! Absorb the smaller one (q) #
					intersection = np.intersect1d(traj[p].frm, traj[q].frm)
					valid_frames = [f for f in traj[q].frm if f not in intersection]
					if(len(valid_frames) == 0):	emptyclust.append(q);	continue

					q_clouds = [traj[q].cloud[c] for c in range(traj[q].num) if traj[q].cloud[c].frm[0] in valid_frames]
					traj[q] = Particle(q_clouds)
					traj[q].active = False
		for e in np.sort(np.unique(emptyclust))[::-1]:
			traj.remove(traj[e])
		
		# Particle cleanup #
		bad = [p for p in range(len(traj)) if(not traj[p].active and len(traj[p].cloud) < USER.TRK_MIN)]
		for b in bad[::-1]:
			traj.remove(traj[b])

		# Create a new particle for each unlinked cluster #
		traj.extend([Particle([clust_f[c]]) for c in range(Cf) if not clust_f[c].linked])

		# Progress Display #
		timers[f] = time.time() - stpwch
		if(f > 1):
			prefix = '(%s):\t%8.3f sec' % (code, timers[f])
			suffix = '(Remain: %5.0f sec)' % ((F-(f+1)) * np.mean(np.diff(timers[timers > 0])))
			VIS._ProgressBar(f+1, F, prefix=prefix, suffix=suffix)

	## Output ##
	return traj
Beispiel #5
0
def TEST_LIMITS(img, ker, eps, *, code='', visual=False):
    # Test runs for limitations only - 64x64! #
    ## Initialize ##
    F = np.shape(img)[0]
    Z = np.shape(img)[1]
    Y = np.shape(img)[2]
    X = np.shape(img)[3]

    pos = [None] * F
    wgt = [None] * F
    H_A, H_S = _IDFilters([Z * np.shape(ker)[1], Y, X])

    tru = OP._LoadTruth(code)
    error = np.full([int(USER.REC_ITER // 3), F, 3], np.nan)

    # Progress #
    stpwch = time.time()
    timers = np.zeros((F))
    t_remain = np.nan

    ## Recover Emitter Positions ##
    admm = ADMM(ker)
    for f in np.arange(F):
        psi_f = np.zeros((np.shape(ker)[0], Z * np.shape(ker)[1], Y, X))
        for z in range(Z):
            pb = (code, f, F, z, Z, 0, 1, 0, 1,
                  timers[f - (1 if (f > 0) else 0)], t_remain)
            zrng = [z * USER.KER_Z, (z + 1) * USER.KER_Z]

            # Split the image into each plane #
            img_ = img[f, z, :, :] / eps[f, z, :, :]
            eps_ = eps[f, z, :, :] / np.max(eps)

            # Obtain the point clouds per frame #
            psi_f[:, zrng[0]:zrng[1], ...], error = admm.Recover(img_,
                                                                 code=code,
                                                                 pb=pb,
                                                                 vis=visual,
                                                                 error=error)

        # Identify points in the cloud #
        Psi_f = npf.fftn(psi_f)
        psi_a = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
        psi_s = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))

        # Determine where the smaller blur is bigger than the larger one #
        lhs = psi_a
        rhs = psi_s * (1 + 1 / eps_) + np.mean(psi_f)
        idx = np.nonzero(lhs > rhs)
        pos[f] = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T + f]).T
        wgt[f] = np.round(np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

        # Progress Display #
        timers[f] = time.time() - stpwch
        if (sum(timers > 0) > 1):
            t_remain = (F - (f + 1)) * np.mean(np.diff(timers[timers > 0]))
            prefix = '(%s):\t%8.3f sec' % (code, timers[f])
            suffix = '(Remain: %5.0f sec)' % (t_remain)
            VIS._ProgressBar(f + 1, F, prefix=prefix, suffix=suffix)

    #if(error is not None):
    spi.savemat(OP.FOLD_MAT + code + ' error.mat', {'error': error})
    print(code + ' done!')
Beispiel #6
0
    def Recover(self,
                xi,
                *,
                code='',
                pb=('', 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
                vis=False,
                error=None,
                err=None):
        ## Initialization ##
        # Hard-code the random seed to make it reproducible #
        np.random.seed(0)

        # Pass xi into the goal image `y`, but pass it through the selection matrix `sig` first #
        y = np.zeros((np.shape(self.sig)[0], np.size(xi)))
        y[0, ...] = xi.reshape(np.size(xi), order="F")
        self.y[:] = np.reshape(self.sig @ y, self.sz, order="F")
        self.y_ = self.y.copy()

        # Reinitialize psi and the slack variables #
        self.Psi[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_0[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_1[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_2[:] = fftw.zeros_aligned(self.sz, dtype='complex64')

        # Localization error #
        if (vis or (error is not None)):
            pts = np.zeros([0, 3])
            if (error is not None):
                tru = OP._LoadMot(code)
                pts = tru[tru[:, :, 3] == pb[1], :]
                temp = []
                temp[:] = pts[:, 0]
                pts[:, 0] = pts[:, 1]  # Swap X & Y #
                pts[:, 1] = temp[:]
                H_A, H_S = _IDFilters(self.sz[1:])
            else:
                tru = OP._LoadTruth(code)
                for p in range(len(tru)):
                    if (pb[1] in tru[p].frm):
                        f = np.nonzero(pb[1] == tru[p].frm)[0]
                        pts = np.concatenate((pts, tru[p].res[f, :]), axis=0)

        ## Iterate Through ADMM ##
        stpwch = time.time()
        timers = np.zeros((USER.REC_ITER))
        for i in range(USER.REC_ITER):
            # Separate the result of this iteration from the previous solution `self.Psi`.  This allows us to build Psi incrementally, modularizing the regularization.
            Psi = fftw.zeros_aligned(self.sz, dtype='complex64')

            # Perform Regularizations #
            Psi = self.Reg_Accuracy(Psi, i)
            Psi = self.Reg_Sparcity(
                Psi, 1)  #np.minimum(np.maximum(2*i/USER.REC_ITER, 1/2), 3/2))
            Psi = self.Reg_Temporal(Psi)

            # Copy in the new result #
            self.Psi[:] = Psi.copy()

            # Alert us if we have an issue! #
            if (np.any(np.isnan(Psi))): raise ValueError("Psi has NaN values!")

            # Visualization #
            if (vis and (np.mod(i, USER.REC_ITER // 20) == 0)):
                self.BT_Psi()
                plt.clf()
                plt.gca(position=[0, 0, 1, 1])
                plt.imshow(np.log10(
                    npf.fftshift(np.sum(np.abs(self.psi), axis=(0, 1)))),
                           cmap='gray')
                plt.plot(pts[:, 0],
                         pts[:, 1],
                         color='r',
                         marker='o',
                         linewidth=0,
                         fillstyle='none')
                plt.clim(-3, 0)
                plt.draw()
                plt.pause(0.1)
            if ((error is not None) and (np.mod(i, 3) == 0) and (i > 60)):
                # Get psi #
                self.BT_Psi()

                # Find where psi is important #
                psi_f = npf.fftshift(np.abs(self.psi), axes=(-2, -1))

                # Identify points in the cloud #
                Psi_f = npf.fftn(psi_f)
                psi_a = np.real_if_close(
                    npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
                psi_s = np.real_if_close(
                    npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))
                lhs = psi_a
                rhs = psi_s * (1 + 1) + np.mean(psi_f)
                idx = np.nonzero(lhs > rhs)
                pos = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T]).T
                wgt = np.round(
                    np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

                if (0 < len(wgt) < 10000):
                    # Attempt a triangulation #
                    # Create a point cloud based on the points #
                    pnts = np.concatenate([pos, wgt[:, None]], axis=1)

                    # Segment the point cloud to find clusters #
                    cloud = PointCloud(pnts, seg=True)

                    # Why?? # vvv #
                    # Weight threshold #
                    clust = _CloudThr(cloud.clust)
                    #		# ^^^ #

                    clust = _Separate(clust)

                    if (len(clust) == 0): continue

                    # Evaluate the average minimum error per particle #
                    dist_x = np.zeros([np.shape(pts)[0], len(clust)])
                    dist_y = np.zeros([np.shape(pts)[0], len(clust)])
                    dist_z = np.zeros([np.shape(pts)[0], len(clust)])

                    # Evaluate the distance between each point and all clusters #
                    for c in range(len(clust)):
                        diff = (pts[:, :3] - clust[c].res) * [
                            *USER.RES, USER.DOF[0] / USER.KER_Z
                        ]
                        dist_x[:, c] = np.abs(diff[:, 0])
                        dist_y[:, c] = np.abs(diff[:, 1])
                        dist_z[:, c] = np.abs(diff[:, 2])

                    # Get the minimum error per cluster, average over all particles #
                    error[int(i // 3), pb[1], 0] = np.mean(np.min(dist_x,
                                                                  1))  # X
                    error[int(i // 3), pb[1], 1] = np.mean(np.min(dist_y,
                                                                  1))  # Y
                    error[int(i // 3), pb[1], 2] = np.mean(np.min(dist_z,
                                                                  1))  # Z
            #if((err is not None) and (np.mod(i, 3) == 0)):
            #	err[pb(1),i,:] = ComputeError(xi)

            # Progress Bar #
            timers[i] = time.time() - stpwch
            if (i > 0):
                prefix = '(%s):\t%8.3f sec' % (pb[0], pb[-2] + timers[i])
                #suffix = '(Remain: %5.0f sec)' % (pb[-1])
                suffix = '(Remain: %3.0f:%2.0f:%2.0f)  ' % (pb[-1] // 3600,
                                                            (pb[-1] % 3600) //
                                                            60, pb[-1] % 60)
                if (pb[4] > 1):  # Show Z progress #
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=pb[3] + 1,
                                     sub_I=pb[4],
                                     prefix=prefix,
                                     suffix=suffix)
                elif (pb[6] > 1
                      or pb[8] > 1):  # Show chunked iteration progress #
                    i_ = i + 1 + (pb[5] * pb[8] + pb[7]) * USER.REC_ITER
                    I_ = pb[6] * pb[8] * USER.REC_ITER
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=i_,
                                     sub_I=I_,
                                     prefix=prefix,
                                     suffix=suffix)
                else:
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=i,
                                     sub_I=USER.REC_ITER,
                                     prefix=prefix,
                                     suffix=suffix)
        if (vis):
            plt.ioff()
            plt.show()

        ## Output ##
        self.BT_Psi()
        return np.abs(self.psi), error
Beispiel #7
0
def _Recover(img, ker, eps, *, code='', step=1, vis=False):
    ## Initialize ##
    f0 = 0
    F = np.shape(img)[0]
    Z = np.shape(img)[1]
    Y = np.shape(img)[2]
    X = np.shape(img)[3]
    C = np.minimum(X, Y) if ((not USER.REC_CHUNK) or ((X <= 128) and
                                                      (Y <= 128))) else 64

    pos = [None] * F
    wgt = [None] * F
    H_A, H_S = _IDFilters([np.shape(ker)[0], Z * np.shape(ker)[1], Y, X])

    tru = OP._LoadTruth(code)

    # Progress #
    stpwch = time.time()
    timers = np.zeros((F))
    t_remain = np.nan

    # Truth #
    #error = np.full([int(USER.REC_ITER//3), F], np.nan)

    ## Recover Emitter Positions ##
    ker_ = ker[..., (Y - C) // 2:(Y + C) // 2, :][...,
                                                  (X - C) // 2:(X + C) // 2]
    admm = ADMM(ker_)
    for f in np.arange(F, step=step):
        psi_f = np.zeros((np.shape(ker)[0], Z * np.shape(ker)[1], Y, X))
        for z in range(Z):
            zrng = [z * USER.KER_Z, (z + 1) * USER.KER_Z]

            # Split the image into each plane #
            img_ = img[f + f0, z, :, :] / eps[
                f + f0,
                z, :, :]  # << ------------------------------------------------------------ #
            eps_ = eps[f + f0, z, :, :] / np.max(eps)

            # Chunk the image and obtain point clouds per frame #
            img_chunks, xrng, yrng, overlay = _Chunk(img_, C=C)
            M = np.shape(xrng)[0]
            N = np.shape(yrng)[0]
            for m in range(M):
                for n in range(N):
                    pb = (code, f, F, z, Z, m, M, n, N,
                          timers[f -
                                 (1 if (M == 1 and N == 1 and f > 0) else 0)],
                          t_remain)
                    if (np.ptp(img_chunks[n, m, ...]) > 2 * np.std(img_)):
                        psi, _ = admm.Recover(img_chunks[n, m, ...],
                                              code=code,
                                              pb=pb,
                                              vis=False)
                        psi = np.fft.fftshift(psi, axes=(-2, -1))
                        psi_f[:, zrng[0]:zrng[1],
                              ...][...,
                                   yrng[n,
                                        0]:yrng[n,
                                                1], :][...,
                                                       xrng[m,
                                                            0]:xrng[m,
                                                                    1]] += psi
                    timers[f] = time.time() - stpwch
            psi_f[:, zrng[0]:zrng[1], ...] /= np.maximum(overlay, 1)

        # Identify points in the cloud #
        Psi_f = npf.fftn(psi_f)
        psi_a = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
        psi_s = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))

        # Determine where the smaller blur is bigger than the larger one #
        lhs = psi_a
        rhs = psi_s * (1 + 1 / eps_) + np.mean(psi_f) * (
            1 + (USER.KER_T > 1))  #eps_ * np.std(psi_f)/np.mean(psi_f)
        idx = np.nonzero(lhs > rhs)
        pos[f] = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T + f]).T
        wgt[f] = np.round(np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

        # Visualization #
        if (vis):
            plt.figure(figsize=(15, 5))
            ax = plt.axes(position=[0, 0, 1 / 3, 0.9])
            ax.imshow(img_, cmap='gray')
            ax.set_title('Input image #%i/%i' % (f + 1, F + 1))

            ax = plt.axes(position=[1 / 3, 0, 1 / 3, 0.9])
            ax.imshow(np.sum(psi_f, axis=(0, 1)), cmap='gray')
            ax.set_title('Deconvolution')

            ax = plt.axes(position=[2 / 3, 0, 1 / 3, 0.9])
            ax.imshow(img_, cmap='gray')
            if (len(wgt[f]) > 0):
                ax.scatter(pos[f][:, 0],
                           pos[f][:, 1],
                           s=100 * (wgt[f] / np.max(wgt[f])),
                           c='r')
            ax.set_title('Point Cloud')

            if (USER.KER_Z > 1):
                plt.figure(figsize=(6, 6))
                ax = plt.axes(projection='3d',
                              position=[-0.05, -0.07, 1.1, 1.1])
                if (len(wgt[f]) > 0):
                    ax.scatter(pos[f][:, 0],
                               pos[f][:, 1],
                               pos[f][:, 2],
                               s=100 * (wgt[f] / np.max(wgt[f])))
                ax.view_init(azim=30, elev=10)
                ax.set_xlim(0, np.shape(img)[3])
                ax.set_ylim(0, np.shape(img)[2])
                ax.set_zlim(0, USER.KER_Z)
            plt.show()

        # Progress Display #
        timers[f] = time.time() - stpwch
        if (sum(timers > 0) > 1):
            t_remain = (F - (f + 1)) * np.mean(np.diff(timers[timers > 0]))
            prefix = '(%s):\t%8.3f sec' % (code, timers[f])
            #suffix = '(Remain: %5.0f sec)' % (t_remain)
            suffix = '(Remain: %3.0f:%2.0f:%2.0f)' % (t_remain // 3600,
                                                      (t_remain % 3600) // 60,
                                                      t_remain % 60)
            VIS._ProgressBar(f + 1, F, prefix=prefix, suffix=suffix)

    #import scipy.io as spi
    #spi.savemat(OP.FOLD_MAT + code + ' error.mat', {'error': error})

    ## Output ##
    return pos, wgt