def distance_generation(): filepath = "/home/zezhou/McGillResearch/2019Manuscript_Analysis/femsimulation/t4concentration/" os.chdir(filepath) # Change this to loop to process all data tmp = "/home/zezhou/McGillResearch/2019Manuscript_Analysis/femsimulation/t4concentration/sampling/ecc0995_sampling.txt" data = np.loadtxt( tmp, skiprows=2 ) # 1st column: x axis; 2nd column: y axis. 3rd column: z axis # Read the coordinate lattice x = data[:, 0] y = data[:, 1] a, b = module.ellipse_para(ecc=0.995) wall = np.zeros_like(x) # Create the depletion z profile for i in range(len(x)): xtmp = x[i] ytmp = y[i] rdis = xtmp**2 / a**2 + ytmp**2 / b**2 if rdis > 1: continue def depletion(r): # r: dist to boundary; x,y: current coordinate of the point of interest; a,b: ecc's parameter return xtmp**2 * (b - r)**2 + ytmp**2 * (a - r)**2 - (a - r)**2 * ( b - r)**2 sol = root(depletion, x0=1e-5, method='lm') wall[i] = sol.x np.savetxt('ecc0995_rdist.txt', wall) plt.plot(wall, 'r') plt.show() return
def cav_ecc06(): a, b = module.ellipse_para(ecc=0) n = 1000 # sampling pts t = np.linspace(0, 2 * np.pi, n) x = a * np.cos(t) y = b * np.sin(t) return x, y
def wall_deplt_intver_wca(ini): # plt wall_dep concentration prob = simprob_integrationver_wca(ini) a, b = module.ellipse_para(ecc) mask = (xred**2 / a**2 + yred**2 / b**2) < 1 fig1, ax1 = plt.subplots() ax1.set_aspect('equal') t4labelsize = 14 # tcf = ax1.tricontourf(simdata[:, 0], simdata[:, 1], A*np.exp(-rdistdata/tau), cmap='inferno') tcf = ax1.tricontourf(xred[mask], yred[mask], prob[mask], cmap='inferno') cb = fig1.colorbar(tcf, ticks=[0, 0.5, 1], shrink=0.8) cb.ax.tick_params(labelsize=14) cb.set_label('Normalized potential of \nwall-depletion', fontsize=14, horizontalalignment='center', rotation=-90, labelpad=50) ax1.set_xticks([-1, 0, 1]) ax1.set_xticklabels([-1, 0, 1], fontdict={'fontsize': t4labelsize}) ax1.set_yticks([-0.8, 0, 0.8]) ax1.set_yticklabels([-0.8, 0, 0.8], fontdict={'fontsize': t4labelsize}) ax1.set_xlabel('X-position ' r'$(\mu m)$', fontsize=t4labelsize + 3) ax1.set_ylabel('Y-position ' r'$(\mu m)$', fontsize=t4labelsize + 3) plt.tight_layout() plt.show() return xred, yred, prob
def potentialintegration_wca(x, y, e, sig): # x,y: coordinate for the point of interest # r, theta: polar coordinate for the point of interest # e: eccentricity of the cavity # rdc: decay length for the wall depletion a, b = module.ellipse_para(e) tgv = 1e12 def intfunc(theta): dx = a * np.cos(theta) - x dy = b * np.sin(theta) - y r = np.sqrt(dx**2 + dy**2) if r < sig * 2**(1 / 6): out = 4 * ( (sig / r)**12 - (sig / r)**6 + 1 / 4) * np.sqrt(a**2 * (np.sin(theta))**2 + b**2 * (np.cos(theta))**2) else: out = 0 if out >= tgv: out = tgv return out # t = time.time() result = integrate.quad(intfunc, 0, 2 * np.pi, points=np.linspace(0, 2 * np.pi, 10)) # print('single int time:'+str(time.time()-t)) return result[0]
def potential_generation_testversion(): filepath = "/home/zezhou/McGillResearch/2019Manuscript_Analysis/femsimulation/t4concentration/" os.chdir(filepath) # Change this to loop to process all data tmp = "/home/zezhou/McGillResearch/2019Manuscript_Analysis/femsimulation/t4concentration/sampling/ecc08_sampling.txt" data = np.loadtxt( tmp, skiprows=2 ) # 1st column: x axis; 2nd column: y axis. 3rd column: z axis # Read the coordinate lattice e = 0.8 rdc = 0.6 x = data[:, 0] y = data[:, 1] reduce_samplemask = (np.arange(0, len(x)) % samplescale == 0 ) # using local variable here. xred = x[reduce_samplemask] yred = y[reduce_samplemask] a, b = module.ellipse_para(e) mask = (xred**2 / a**2 + yred**2 / b**2) < 1 z = [] tgv = 1 tmp = time.time() fig1, ax1 = plt.subplots() ax1.set_aspect('equal') print("Start integration calculating") print("Current time:" + str(tmp)) for i in range(len(xred)): # if i%200==100: # # print("Step:"+str(i)+" in "+ str(len(x))) # # print("Time taking:"+str(time.time()-tmp)) # amin = np.min(z) # amax= np.max(z) # level = np.linspace(amin,amax,20).tolist() # # tcf = ax1.tricontourf(xred[:i], yred[:i], z, cmap='inferno',levels=level) # # plt.pause(0.01) if mask[i] == 0: z.append(tgv) else: z.append(potentialintegration(xred[i], yred[i], e, rdc)) # os.chdir("/home/zezhou/McGillResearch/2019Manuscript_Analysis/femsimulation/walldepletion/") # np.savetxt('wdp_'+str(e)+'_'+str(rdc)+'_'+str(samplescale)+'samplescale', np.array(z)) print("Finish calculation, ellapsed time:" + str(time.time() - tmp)) # tcf = ax1.tricontourf(xred[mask], yred[mask], np.array(z)[mask], cmap='inferno',levels=level) # ax1.set_xticks([-1, 0, 1]) # ax1.set_yticks([-0.8, 0, 0.8]) # plt.tight_layout() # plt.show() return np.array(z)
def potential_generation_plotuseonly_wca(rdc): # Read the coordinate lattice a, b = module.ellipse_para(ecc) mask = (xred**2 / a**2 + yred**2 / b**2) < 1 z = [] tgv = 1 tmp = time.time() print("Start integration calculating") print("Current time:" + str(tmp)) for i in range(len(xred)): if mask[i] == 0: z.append(tgv) else: z.append(potentialintegration_wca(xred[i], yred[i], ecc, rdc)) print("Finish calculation, ellapsed time:" + str(time.time() - tmp)) return xred, yred, ured, np.array(z)
def potentialintegration(x, y, e, rdc): # x,y: coordinate for the point of interest # r, theta: polar coordinate for the point of interest # e: eccentricity of the cavity # rdc: decay length for the wall depletion a, b = module.ellipse_para(e) def intfunc(theta): dx = a * np.cos(theta) - x dy = b * np.sin(theta) - y return np.exp(-np.sqrt(dx**2 + dy**2) / rdc) * np.sqrt(a**2 * (np.sin(theta))**2 + b**2 * (np.cos(theta))**2) # t = time.time() result = integrate.quad(intfunc, 0, 2 * np.pi) # print('single int time:'+str(time.time()-t)) return result[0]
def potential_generation_wca(sig): # Read the coordinate lattice a, b = module.ellipse_para(ecc) mask = (xred**2 / a**2 + yred**2 / b**2) < 1 z = [] tgv = 1 tmp = time.time() print("Start integration calculating") print("Current time:" + str(tmp)) for i in range(len(xred)): if mask[i] == 0: z.append(tgv) else: z.append(potentialintegration_wca(xred[i], yred[i], ecc, sig)) print("Finish calculation, ellapsed time:" + str(time.time() - tmp)) # triang = tri.Triangulation(xred, yred) # plt.tricontourf(triang, z) # plt.show() return np.array(z)
def simprob_integrationver(x): A = x[0] rdc = x[1] B = np.abs(x[2]) ####################### tgv = 1e12 # Set the outside probability equal to 0. Here I set t4 concentration to 1e30(tgv) to make sure the particle does not penetrate a, b = module.ellipse_para(ecc) outmask = (xred**2 / a**2 + yred**2 / b**2) >= 1 ured[outmask] = tgv wdp_intver = potential_generation( rdc) # wall depletion potential. Integration version landscape = A * wdp_intver + B * ured np.savetxt('landscape_ecc0_add.txt', landscape) prob = np.exp(-landscape) prob = np.transpose(prob / np.sum(prob)) # plt.tricontourf(xred[inmask], yred[inmask], prob[inmask], cmap='inferno') # plt.show() # plt.imshow(prob) # plt.show() return prob
nx = 633 # nofp in x dir. ny = 64 # nofp in y dir. sol_ini = [1, 0.1, 1e3] # Initial guess of the solution wdf_pltflag = 1 # if want to see the check plt of simulation scale = 6.25 # 6.25pixel/um # Real data & mesh h = np.loadtxt(datapath) h = np.transpose(h / np.sum(h)) x = np.loadtxt(xpath) / scale # xedge from hist y = np.loadtxt(ypath) / scale # yedge from # Simulation data & mesh simdata = np.loadtxt(simpath, skiprows=2) a, b = module.ellipse_para(ecc=ecc) xs = np.linspace(-xsample, xsample, nx + 1) ys = np.linspace(-ysample, ysample, ny + 1) # Loss function definition def lossfunc(ini): # Loss function define by 2-norm integration. Can be modified. prob = wdf( ini ) # Input is initial guess. Sim data loading is manually loaded by changing the filepath in wdf. prob = feedthrough(prob, xs, ys, h, x, y) global wdf_pltflag # Check plot of discrete sim result and data if wdf_pltflag == 1:
def swappingplt(): jump = 0 # jump to zoom plot n_lines = 1 # number of trajs shown in each axis if jump == 0: dataset = ['ecc0', 'ecc06', 'ecc08', 'ecc095', 'ecc098', 'ecc0995'] ecc = [0, 0.6, 0.8, 0.95, 0.98, 0.995] # dataset = ['ecc0995'] a = 8 # figure size # Create a figure and axis fig = plt.figure(figsize=[a, a], tight_layout=True) threshold = 0.3 # Pole region is 1/3 of the entire cell length ind = 0 pixelratio = 6.25 lagtime_lower = 50 # dwell time lower bond in frames. (17fps) lagtime_upper = 270 # dwell time upper bond in frames. for item in dataset: data = tot_vec_overlay_clean[ item + '_delx'] # take the major axis position datay = tot_vec_overlay_clean[item + '_dely'] state1 = (data > (np.max(data * threshold))) state2 = (data < (np.min(data * threshold))) import itertools def counts(sig): sig = list(sig) l = [(k, len(list(g))) for k, g in itertools.groupby(sig)] ton = [] toff = [] on_ind = [] # start indices of ton ind_temp = 0 # current index for x in l: if x[0] == 1: ton.append(x[1]) on_ind.append(ind_temp) else: toff.append(x[1]) ind_temp += x[1] return ton, on_ind state1_time, start_ind_s1 = counts(state1) state2_time, start_ind_s2 = counts(state2) state1_timemask = (np.array(state1_time) <= lagtime_upper) * ( np.array(state1_time) >= lagtime_lower ) # pick up the indices of the short lag time event state1_longtimeind = [] idx = 0 for item in state1_timemask: if item == True: state1_longtimeind.append( np.arange(start_ind_s1[idx], start_ind_s1[idx] + state1_time[idx] + 1)) idx += 1 state2_timemask = (np.array(state2_time) <= lagtime_upper) * ( np.array(state2_time) >= lagtime_lower) state2_longtimeind = [] idx = 0 for item in state2_timemask: if item == True: state2_longtimeind.append( np.arange(start_ind_s2[idx], start_ind_s2[idx] + state2_time[idx] + 1)) idx += 1 ax = fig.add_subplot(3, 3, ind + 1) indstart = int(np.random.rand() * len(state1_longtimeind) * 0.8) for item in state1_longtimeind[indstart:indstart + n_lines]: ax.plot(data[item] / pixelratio, datay[item] / pixelratio) indstart = int(np.random.rand() * len(state2_longtimeind) * 0.8) for item in state2_longtimeind[indstart:indstart + n_lines]: ax.plot(data[item] / pixelratio, datay[item] / pixelratio) ax.plot( np.max(data * threshold) * np.ones(10) / pixelratio, np.linspace(-2, 2, 10), 'r--') ax.plot( np.min(data * threshold) * np.ones(10) / pixelratio, np.linspace(-2, 2, 10), 'r--') ax.set_xlim([-2, 2]) ax.set_ylim([-2, 2]) temp_ecc = ecc[ind] a, b = module.ellipse_para(temp_ecc) theta = np.linspace(0, 2 * np.pi) ax.plot(a * np.cos(theta) * 1.05, b * np.sin(theta) * 1.05, 'k--', label=dataset[ind]) ax.set_xlabel(r'Position ($\mu m$)') ax.set_ylabel(r'Position ($\mu m$)') ax.legend() ind += 1 plt.show() return
def histshortdwellevent(): dataset = [ 'ecc0', 'ecc06', 'ecc08', 'ecc09', 'ecc095', 'ecc098', 'ecc0995' ] ecc = [0, 0.6, 0.8, 0.9, 0.95, 0.98, 0.995] # dataset = ['ecc0995'] a = 8 # figure size # Create a figure and axis fig = plt.figure(figsize=[a, a], tight_layout=True) threshold = 0.3 # Pole region is 1/3 of the entire cell length ind = 0 pixelratio = 6.25 #short dwell event dt_s1 = 0 # dwell time lower bond in frames. (17fps) dt_s2 = 5 # dwell time upper bond in frames. #long dwell event lagtime_lower = np.array([ 0.5, 0.8, 0.7, 1.1, 1.5, 2.9, 3.3 ]) * 17 # Average of long time-scale extracted from the fitting lagtime_upper = 270 # dwell time upper bond in frames. for item in dataset: if item == 'ecc0': x = tot_vec_overlay_clean[item + '_delx'] # take the major axis position y = tot_vec_overlay_clean[item + '_dely'] os.chdir( '/home/zezhou/McGillResearch/2019Manuscript_Analysis/additionaldata/ecc0/' ) a = 0.11 * (np.loadtxt('20210205_data_27.txt')[:, 0] - np.loadtxt('20210205_data_27_ct.txt')[1]) b = 0.11 * (np.loadtxt('20210205_data_27.txt')[:, 1] - np.loadtxt('20210205_data_27_ct.txt')[0]) mask = (np.abs(a) <= 1) * (np.abs(b) <= 1) x = np.append(x, 6.25 * a[mask]) # newly added data y = np.append(y, 6.25 * b[mask]) # newly added data a = 0.11 * (np.loadtxt('20210205_data_28.txt')[:, 0] - np.loadtxt('20210205_data_28_ct.txt')[1]) b = 0.11 * (np.loadtxt('20210205_data_28.txt')[:, 1] - np.loadtxt('20210205_data_28_ct.txt')[0]) mask = (np.abs(a) <= 1) * (np.abs(b) <= 1) x = np.append(x, 6.25 * a[mask]) # newly added data y = np.append(y, 6.25 * b[mask]) # newly added data a = 0.11 * (np.loadtxt('20210208_data_2ur.txt')[:, 0] - np.loadtxt('20210208_data_2ur_ct.txt')[1]) b = 0.11 * (np.loadtxt('20210208_data_2ur.txt')[:, 1] - np.loadtxt('20210208_data_2ur_ct.txt')[0]) mask = (np.abs(a) <= 1) * (np.abs(b) <= 1) x = np.append(x, 6.25 * a[mask]) # newly added data y = np.append(y, 6.25 * b[mask]) # newly added data a = 0.11 * (np.loadtxt('20210208_data_2bl.txt')[:, 0] - np.loadtxt('20210208_data_2bl_ct.txt')[1]) b = 0.11 * (np.loadtxt('20210208_data_2bl.txt')[:, 1] - np.loadtxt('20210208_data_2bl_ct.txt')[0]) mask = (np.abs(a) <= 1) * (np.abs(b) <= 1) x = np.append(x, 6.25 * a[mask]) # newly added data y = np.append(y, 6.25 * b[mask]) # newly added data data = x datay = y else: data = tot_vec_overlay_clean[ item + '_delx'] # take the major axis position datay = tot_vec_overlay_clean[item + '_dely'] state1 = (data > (np.max(data * threshold))) state2 = (data < (np.min(data * threshold))) import itertools def counts(sig): sig = list(sig) l = [(k, len(list(g))) for k, g in itertools.groupby(sig)] ton = [] toff = [] on_ind = [] # start indices of ton ind_temp = 0 # current index for x in l: if x[0] == 1: ton.append(x[1]) on_ind.append(ind_temp) else: toff.append(x[1]) ind_temp += x[1] return ton, on_ind state1_time, start_ind_s1 = counts(state1) state2_time, start_ind_s2 = counts(state2) state1_timemask = (np.array(state1_time) <= dt_s2) * ( np.array(state1_time) >= dt_s1 ) # pick up the indices of the short lag time event state1_longtimeind = [] idx = 0 for item in state1_timemask: if item == True: state1_longtimeind.append( np.arange(start_ind_s1[idx] + 1, start_ind_s1[idx] + state1_time[idx] + 1)) idx += 1 state2_timemask = (np.array(state2_time) <= dt_s2) * (np.array(state2_time) >= dt_s1) state2_longtimeind = [] idx = 0 for item in state2_timemask: if item == True: state2_longtimeind.append( np.arange(start_ind_s2[idx] + 1, start_ind_s2[idx] + state2_time[idx] + 1)) idx += 1 ax = fig.add_subplot(3, 3, ind + 1) # merge all index into 1D array total_longind = [] for item in state1_longtimeind: for number in item: total_longind.append(number) for item in state2_longtimeind: for number in item: total_longind.append(number) total_longind = np.array(total_longind) - 1 ax.hist2d(data[total_longind] / pixelratio, datay[total_longind] / pixelratio, bins=30, range=[[-2, 2], [-2, 2]]) ax.plot( np.max(data * threshold) * np.ones(10) / pixelratio, np.linspace(-2, 2, 10), 'r--') ax.plot( np.min(data * threshold) * np.ones(10) / pixelratio, np.linspace(-2, 2, 10), 'r--') ax.set_xlim([-2, 2]) ax.set_ylim([-2, 2]) temp_ecc = ecc[ind] a, b = module.ellipse_para(temp_ecc) theta = np.linspace(0, 2 * np.pi) ax.plot(a * np.cos(theta) * 1.05, b * np.sin(theta) * 1.05, 'k--', label=dataset[ind]) ax.set_xlabel(r'Position ($\mu m$)', fontsize=15) ax.set_ylabel(r'Position ($\mu m$)', fontsize=15) ax.legend() ind += 1 plt.show()
## Radius/Theta density plot for i in range(len(dataset)): ax1.plot(r_edge[i], r_density[i] / n_datapts[i]) ### Overlap -pi-0 with 0-pi. tmp = theta_density[i] / n_datapts[i] #current density tmp[-1] = tmp[0] # off-set the edge(otherwise the boundry will be zero) ind = int(3 * len(tmp) / 4) # off-set the density to -pi/2--3pi/2 tmp = np.concatenate((tmp[ind:], tmp[:ind])) mid_ind = int(len(tmp) / 2) ##prepare reverse tmp_swap = tmp[:mid_ind][::-1] + tmp[ mid_ind:] ## adding tmp(theta)+tmp(pi-theta) ############################ ax2.plot(theta_edge[i][:mid_ind] + np.pi / 2, tmp_swap) ##shift x ticks to -pi/2 to pi/2. 0 is center ax2.legend(legend) ## ecc0.995 region a, b = module.ellipse_para(ecc=0.995) print('a is:' + str(a)) print('b is:' + str(b)) x, y = ecc0995_tp() a = 6.25 * a b = 6.25 * b r = np.linspace(0, 1, 5) theta = np.linspace(-np.pi, np.pi, 1000) ax3.plot(x, y, '+') for i in r: ax3.plot(a * np.cos(theta) * np.sqrt(i), b * np.sin(theta) * np.sqrt(i)) plt.show()