def singlePartAnalysis(surf, img, pxlen, thres, h_tip, ar_tip, N_part, N_pxl, param_string_list, binwidth_list, bin_min_list, bin_max_list): # get single particles surf_obj_list, surf_labeled = mf.identObj(surf, thres) img_obj_list, img_labeled = mf.identObj(img, thres) mf.plotThres( surf, surf_labeled, pxlen, 'surface (found ' + str(len(surf_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres) + 'nm)') mf.plotThres( img, img_labeled, pxlen, 'image (found ' + str(len(img_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres) + 'nm)') surf_param_list = [ calcParams(obj_i, pxlen, thres) for obj_i in surf_obj_list ] img_param_list = [ calcParams(obj_i, pxlen, thres) for obj_i in img_obj_list ] # plot histograms of parameters N_param = len(surf_param_list[0]) for i in range(N_param): # iterate over all parameters (mean, std, etc.) surf_dist = [ surf_param_list[j][i] for j in range(len(surf_param_list)) ] img_dist = [img_param_list[j][i] for j in range(len(img_param_list))] fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True) bins = np.arange(min(surf_dist + img_dist), max(surf_dist + img_dist) + binwidth_list[i], binwidth_list[i]) ax1.hist(surf_dist, color='b', bins=bins, edgecolor='black', linewidth=2) ax2.hist(img_dist, color='r', bins=bins, edgecolor='black', linewidth=2) ax1.set_xlim(bin_min_list[i], bin_max_list[i]) ax2.set_xlim(bin_min_list[i], bin_max_list[i]) ax2.set_xlabel(param_string_list[i]) ax1.set_ylabel('frequency') ax2.set_ylabel('frequency') ax1.set_title('surface (found ' + str(len(surf_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres) + 'nm)') ax2.set_title('image (found ' + str(len(img_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres) + 'nm)') fig.suptitle(r'$h_{tip}=$' + str(h_tip) + r'$nm,\ a.r.=$' + str(ar_tip) + r'$,\ N_{pxl}=$' + str(N_pxl) + r'$,\ l_{px}=$' + str(pxlen) + r'$nm,\ thres=$' + str(thres) + r'$nm$')
def reconstLogNorm(z, pxlen, thres, N_part, R_mu_real, R_sigma_real): z_obj_list, z_labeled = mf.identObj(z, thres) mf.plotThres(z, z_labeled, pxlen, 'found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres)+'nm') R_list = [(np.max(np.shape(obj_i)))*pxlen/2 for obj_i in z_obj_list] R_mean = np.mean(R_list) R_std = np.std(R_list) R_mu = np.log(R_mean / np.sqrt(1 + R_std**2 / R_mean**2)) # recalculated gaussian R_sigma = np.sqrt(np.log(1 + (R_std/R_mean)**2)) # reculaculated gaussian x = np.linspace(R_mean - 3*R_std, R_mean + 3*R_std, 1000) pdf = 1 / (x * R_sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x) - R_mu)**2 / (2*R_sigma**2)) pdf_real = 1 / (x * R_sigma_real * np.sqrt(2*np.pi)) * np.exp(-(np.log(x) - R_mu_real)**2 / (2*R_sigma_real**2)) plt.figure() plt.hist(R_list, bins=12, density=True, edgecolor='black', linewidth=2, color='grey', alpha=0.5) plt.plot(x, pdf, color='r', linewidth=3.5, label='empiric distribution (R_mu = {0:.3f}, R_sigma = {1:.3f})'.format(R_mu, R_sigma)) plt.plot(x, pdf_real, color='green', linewidth=3.5, label='real distribution (R_mu = {0:.3f}, R_sigma = {1:.3f})'.format(R_mu_real, R_sigma_real)) plt.xlabel(r'$R_{part} [nm]$') plt.ylabel('frequency') plt.title('found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres)+'nm') plt.legend(loc=1) plt.tight_layout() return R_mean, R_std, R_mu, R_sigma
#rtip=s*500 htip = hspikemax * 1.02 spikedist = 2 * np.sqrt(rtip**2 - (rtip - hspikemax)**2) + 2 #MAP------------------------------------------------- # z=mf.genFlat(Npx) # #z=mf.genNormNoise(z,pxlen, 2, 2) # z=mf.genHexSpikes(z,pxlen,hspikemin,hspikemax,spikedist,rspikemin,rspikemax, # 0.9,0.2,par='r', xmin=spikedist/2, xmax=len(z)*pxlen-spikedist/2, # ymin=spikedist/2, ymax=len(z)*pxlen-spikedist/2) # mf.plotfalsecol(z,pxlen) # np.savetxt('map1.dat',z, header=str(pxlen)+' '+str(Npx)+' '+str(rtip)+' '+str(s)+' \n pxlen, Npx, Rtip, s') # #Tip------------------------------------------------ # #occhio che h/a>>pxlen # if hspikemax>rtip: print('Warning: possible spikes higher than semisphere tip') # tip=mf.genSemisphTip(pxlen,htip,r=rtip) # np.savetxt('tip1.dat',tip, header=str(pxlen)+' '+str(Npx)+' '+str(rtip)+' '+str(s)+' \n pxlen, Npx, Rtip, s') # mf.plotfalsecol(tip,pxlen) # # #IMG------------------------------------------------ # img = mph.grey_dilation(z, structure=-tip) # img = mf.genNormNoise(img,pxlen, 4, 2) # np.savetxt('img1.dat',img, header=str(pxlen)+' '+str(Npx)+' '+str(rtip)+' '+str(s)+' \n pxlen, Npx, Rtip, s') # mf.plotfalsecol(img,pxlen) # #--------------------------------------------------- img = np.loadtxt('img1.dat') img = mph.filters.median_filter(img, 5) img_obj_list, img_labeled, img_obj_ind = mf.identObj(img, thres, Npx_min=5) mf.plotThres(img, img_labeled, pxlen)
def reconstLogNorm(z, pxlen, R_tip, sigma_noise, thres, N_part, R_median_real, R_mu_real, R_sigma_real): # z_obj_list, z_labeled, z_obj_ind = mf.identObj(z, thres) # mf.plotThres(z, z_labeled, pxlen, 'found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres)+'nm') z_obj_list, z_labeled = mf.identObj_Laplace(z, thres) mf.plotThres( z, z_labeled, pxlen, 'found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres) + 'nm') R_list = np.array([np.max(obj_i) / 2 for obj_i in z_obj_list]) logR_list = np.log(R_list) R_mu_fit, R_sigma_fit = norm.fit(logR_list, loc=R_mu_real, scale=R_sigma_real) R_median_fit = np.exp(R_mu_fit) x = np.linspace(R_mu_fit - 3 * R_sigma_fit, R_mu_fit + 3 * R_sigma_fit, 1000) pdf_fit = norm.pdf(x, R_mu_fit, R_sigma_fit) pdf_real = norm.pdf(x, R_mu_real, R_sigma_real) plt.figure() plt.hist(logR_list, bins=12, density=True, edgecolor='black', linewidth=2, color='grey', alpha=0.5) plt.plot( x, pdf_fit, color='r', linewidth=3.5, label= 'empiric distribution (R_median = {0:.3f}, mu = {1:.3f}, sigma = {2:.3f})' .format(R_median_fit, R_mu_fit, R_sigma_fit)) plt.plot( x, pdf_real, color='green', linewidth=3.5, label= 'real distribution (R_median = {0:.3f}, mu = {1:.3f}, sigma = {2:.3f})' .format(R_median_real, R_mu_real, R_sigma_real)) plt.xlabel(r'$\ln(R_{part}/nm)$') plt.ylabel('frequency') plt.title('gaussian fit, ' + r'$\sigma_{noise} = $' + str(sigma_noise) + ' nm, ' + r'$R_{tip} = $' + str(R_tip) + ' nm, ' + r'$\frac{\mu_{fit} - \mu_{real}}{\mu_{real}} = $' + '{:.3f}'.format((R_mu_fit - R_mu_real) / R_mu_real) + r', $\frac{\sigma_{fit} - \sigma_{real}}{\sigma_{real}} = $' + '{:.3f}'.format((R_sigma_fit - R_sigma_real) / R_sigma_real)) plt.legend(loc=1) plt.tight_layout() R_std_fit = np.sqrt((np.exp(R_sigma_fit**2) - 1) * np.exp(2 * R_mu_fit) * np.exp(R_sigma_fit**2)) x_log = np.linspace(R_median_fit - 3 * R_std_fit, R_median_fit + 3 * R_std_fit, 1000) pdf_log_fit = lognorm.pdf(x_log, scale=R_median_fit, s=R_sigma_fit) pdf_log_real = lognorm.pdf(x_log, scale=R_median_real, s=R_sigma_real) plt.figure() plt.hist(R_list, bins=12, density=True, edgecolor='black', linewidth=2, color='grey', alpha=0.5) plt.plot( x_log, pdf_log_fit, color='r', linewidth=3.5, label='empiric distribution (R_median = {0:.3f}, sigma = {1:.3f})'. format(R_median_fit, R_sigma_fit)) plt.plot( x_log, pdf_log_real, color='green', linewidth=3.5, label='real distribution (median = {0:.3f}, sigma = {1:.3f})'.format( R_median_real, R_sigma_real)) plt.xlabel(r'$R_{part} [nm]$') plt.ylabel('frequency') plt.title('from fit calculated lognorm, ' + r'$\sigma_{noise} = $' + str(sigma_noise) + ' nm, ' + r'$R_{tip} = $' + str(R_tip) + ' nm') plt.legend(loc=1) plt.tight_layout() return R_median_fit, R_mu_fit, R_sigma_fit