def eps_eq_core(a1, a2, eps, out, prefix): if (hasattr(a1, "__len__")): # traverse list assert len(a1) == len(a2) for i in xrange(len(a1)): if (not eps_eq_core(a1[i], a2[i], eps, out, prefix+" ")): return False return True if (isinstance(a1, complex)): # complex numbers if (not eps_eq_core(a1.real, a2.real, eps, out, prefix+"real ")): return False if (not eps_eq_core(a1.imag, a2.imag, eps, out, prefix+"imag ")): return False return True ok = True if (a1 == 0 or a2 == 0): if (abs(a1-a2) > eps): ok = False else: l1 = round(math.log(abs(a1))) l2 = round(math.log(abs(a2))) m = math.exp(-max(l1, l2)) if (abs(a1*m-a2*m) > eps): ok = False if (out is not None): annotation = "" if (not ok): annotation = " eps_eq ERROR" print >> out, prefix + str(a1) + annotation print >> out, prefix + str(a2) + annotation print >> out, prefix.rstrip() return True return ok
def preprocess_image(h5file, flag): f = h5py.File(h5file, 'r') f.keys() file = f['trans_data'][:] f.close() p_list = [] for i in range(0, 11): for j in range(0, 11): value = file[i][j] p_list.append(value) v_min = 0 p_max = max(p_list) p_min = min(p_list) print p_max, smath.log((float(p_max))) if p_min > float(0): add_value = 0 else: add_value = smath.minus(0, p_min) + 1 print "==add_value==", add_value image = flex.vec3_double() original = open(h5file.split(".")[0] + '_original.dat', 'w') for x in range(0, 11): for y in range(0, 11): if flag == 0: value = file[x][y] elif flag == 1: value = smath.log( (float(file[x][y]) + add_value) ) #lg value is too small...4 5 6 7...cannot be recognized elif flag == 2: v_max = 255 print "=------v_max------=", v_max value = round((v_max - v_min) * (file[x][y] - p_min) / (p_max - p_min) + v_min) else: v_max = int(max(p_list) / 10) print "=------v_max------=", v_max value = round((v_max - v_min) * (file[x][y] - p_min) / (p_max - p_min) + v_min) image.append([x, y, value]) print >> original, x, y, value original.close() return image
def q2(self,rmax): a = 3.019 b = -1.28945 x = float(rmax) if x<10: x = 10.0 if x > 200: x = 200.0 x = smath.log(x) y = a+b*x return smath.exp(y)
def q1(self,rmax): a=-3.724 b=1.269 c=-0.2639 x = float(rmax) if x<10: x = 10.0 if x > 200: x = 200.0 x = smath.log(x) y = a+b*x+c*x*x return smath.exp(y)
def precision_approx_equal(self,other,precision=24): # Use concepts from IEEE-754 to determine if the difference between # two numbers is within floating point error. Precision is expressed in # bits (single precision=24; double precision=53). Not within scope to # do this for double precision literals; only interested in the case # where the data are from a ~single precision digital-analog converter. if precision > 52: raise ValueError() if self==other: return True if (self > 0.) != (other > 0.) : return False #compute the exponent import math T = abs(self) Np = math.floor(precision-math.log(T,2)) significand = int(T * 2**Np) val1 = significand/(2**Np) # nearest floating point representation of self val2 = (significand+1)/(2**Np) # next-nearest return abs(T-abs(other)) <= abs(val1-val2)
def tst_image(n, N=100, filename=None): nmax = n nmax0 = 8 nl_array0 = math.nl_array(nmax0) nls0 = nl_array0.nl() nl_array = math.nl_array(nmax) nls = nl_array.nl() if (filename is not None): image = read_data(filename) else: moments = flex.random_double(nls0.size()) # moments=flex.double(nls.size(),0 ) # moments[3] = 1.0 # moments[7] = 1.0 image = generate_image(n, moments) orig = open('original.dat', 'w') for pt in image: print >> orig, pt[0], pt[1], pt[2] orig.close() Nq = 100 Nphi = 100 c2_image = from_I_to_C2(nmax, image, Nq, Nphi) c2_output = open('c2.dat', 'w') for pt in c2_image: print >> c2_output, pt[0], pt[1], pt[2] c2_output.close() coef_table = calc_integral_triple_zernike2d(nmax) moments[0] = 0 new_mom = moments.concatenate(flex.double(nls.size() - nls0.size(), 0)) nmax4c2 = 2 * nmax Inm = math.nl_c_array(nmax4c2) Inm.load_coefs(nls, new_mom) cnm = calc_Cnm_from_Inm(Inm, coef_table, nmax4c2) cnm_coef = cnm.coefs() print "#cnm[0]", cnm_coef[0] # cnm_coef[0]=0 ### calculate 2d zernike moments for C2_image ### image = c2_image NP = int(smath.sqrt(image.size())) N = NP / 2 grid_2d = math.two_d_grid(N, nmax) grid_2d.clean_space(image) grid_2d.construct_space_sum() zernike_2d_mom = math.two_d_zernike_moments(grid_2d, nmax) c2_moments = zernike_2d_mom.moments() #coefs = flex.real( c2_moments ) #cnm_coef = flex.real( c2_moments ) cnm_coef = c2_moments c2_reconst = generate_image2(n, cnm_coef, c2_image, Nq) c2_r = open('c2r.dat', 'w') for pt in c2_reconst: print >> c2_r, pt[0], pt[1], pt[2] c2_r.close() ls_score = 0 np_tot = c2_image.size() for p1, p2 in zip(c2_image, c2_reconst): ls_score += (p1[2] - p2[2])**2.0 print nmax, nls.size(), ls_score, np_tot * smath.log( ls_score / np_tot), "SUM" for nl, c2m in zip(nls, cnm_coef): print nl, c2m exit() # ### calculate 2d zernike moments for C2_image ### image = c2_image NP = int(smath.sqrt(image.size())) N = NP / 2 grid_2d = math.two_d_grid(N, nmax) grid_2d.clean_space(image) grid_2d.construct_space_sum() tt2 = time.time() zernike_2d_mom = math.two_d_zernike_moments(grid_2d, nmax) c2_moments = zernike_2d_mom.moments() #coefs = flex.real( c2_moments ) coefs = (c2_moments) for nl, c2m, c2mm in zip(nls, cnm_coef, coefs): if (nl[0] / 2 * 2 == nl[0]): print c2m, c2mm coefs = flex.real(moments) nl_array.load_coefs(nls, coefs) for nl, c in zip(nls, moments): if (abs(c) < 1e-3): c = 0 print nl, c print reconst = flex.complex_double(NP**2, 0) for nl, c in zip(nls, moments): n = nl[0] l = nl[1] if (l > 0): c = c * 2.0 rap = math.zernike_2d_polynome(n, l) i = 0 for x in range(0, NP): x = x - N for y in range(0, NP): y = y - N rr = smath.sqrt(x * x + y * y) / N if rr > 1.0: value = 0.0 else: tt = smath.atan2(y, x) value = rap.f(rr, tt) reconst[i] = reconst[i] + value * c i = i + 1 rebuilt = open('rebuilt.dat', 'w') i = 0 for x in range(0, NP): for y in range(0, NP): value = reconst[i].real print >> rebuilt, x, y, image[i][2], value i = i + 1 rebuilt.close()
def preprocess_image(h5file): f = h5py.File(h5file,'r') f.keys() file = f['trans_data'][:] f.close() p_list=[] for i in range(0, 11): for j in range(0, 11): value = file[i][j] p_list.append(value) v_min=0 p_max=max(p_list) p_min=min(p_list) print p_max,smath.log((float(p_max))) if p_min>float(0): add_value = 0 else: add_value = smath.minus(0, p_min)+1 image = flex.vec3_double() original = open(h5file.split(".")[0]+'_original.dat','w') for x in range(0, 11): for y in range(0, 11): value = file[x][y] image.append([x,y,value]) print>>original, x,y, value original.close() return image def tst_2d_zernike_mom(n, l, file): # image = preprocess_image(h5file) image = ImageToDat(file) feature_maxtix = [] NP=int(smath.sqrt( image.size() )) N=int(NP/2) # print"=====",NP, N grid_2d = math.two_d_grid(N, nmax) grid_2d.clean_space( image ) grid_2d.construct_space_sum() zernike_2d_mom = math.two_d_zernike_moments( grid_2d, nmax ) moments = zernike_2d_mom.moments() coefs = flex.real( moments ) nl_array = math.nl_array( nmax ) nls = nl_array.nl() # nl_array.load_coefs( nls, coefs ) # lfg = math.log_factorial_generator(nmax) for nl, c in zip( nls, moments): if(abs(c)<1e-3): c=0 feature_maxtix.append(c.real) # print nl,c return feature_maxtix def DatToMatrix(filename): f = open(filename) # res = np.zeros((500,500), dtype=np.float) res = np.zeros((11, 11), dtype=np.float) for line in f.readlines(): data = line.split(" ") x=int(data[0]) y=int(data[1]) value=float(data[2]) res[x][y] = value f.close() return res def MatrixToImage(data): data =data*float(.1) #to see the shape clearly... new_im = Image.fromarray(data) return new_im def H5ToMatrix(filename): f = h5py.File(filename,'r') f.keys() file = f['trans_data'][:] return file def plot_h5(filename): data = H5ToMatrix(filename) im = MatrixToImage(data) im.show() if __name__ == "__main__": args = sys.argv[1:] cluster_num = args[0] t_s = time.time() n = 2 l = 2 nmax = max(5, n) # create a list of images path = '/Users/wyf/Documents/SFX/NoGap' # path = '/Users/wyf/Documents/crop_image' # make sure the list is in order,1,2,3,...11,...99,100...1000,1001... since matching node's id depends on the files' order files = os.listdir(path) files.remove('.DS_Store') files.sort(key = lambda x: int(x.split(".")[0].split("transfered")[1])) h5_list = [os.path.join(path, f) for f in files if f.endswith('7.h5')] # imlist=[] # for p in range(len(h5_list)): # # pylab.subplot(4, 5, p + 1) # h5name = h5_list[p] # mat = H5ToMatrix(h5name) # im = MatrixToImage(mat) # imlist.append(im) t1 = time.time() features = np.zeros([len(h5_list), 12], dtype=np.float) # when namx=5, a vec has 21 items, but the real part is the same when l<0, so just calculate l>=0, 12 items. for i, f in enumerate(h5_list): features[i] = tst_2d_zernike_mom(n, l, f) # print "features Matrix:" # print features t2 = time.time() print "time used for getting features matrix: ", t2 - t1 t3 = time.time() tree_list = Clustering.hcluster(cluster_num, features) # tree_list_plot = Clustering.hcluster(cluster_num, features) t4 = time.time() print "time used for clustering: ", t4 - t3 # input outcome print "cluster outcome:" t5 = time.time() res = open(str(cluster_num)+'_ac_res.dat','w') while (len(tree_list)>0): for root in tree_list: print root.get_cluster_elements() print>>res, root.get_cluster_elements() tree_list.remove(root) t6 = time.time() # print "time used for generating result: ", t6 - t5 # for i in range(len(tree_list_plot)): # if len(tree_list_plot[i].get_cluster_elements())!=1: # clusters = tree_list_plot[i].extract_clusters(0.2 * tree_list_plot[i].distance) # Clustering.draw_dendrogram(tree_list_plot[i],imlist,filename='sunset.pdf') # else: # im.show(imlist[i]) t_e = time.time() print "total time used: ", t_e - t_s print "OK"
def write_saxs_image(self, file_name): pp= open(file_name,'w') for ii in range(self.np): for jj in range(self.np): print >> pp, ii, jj, smath.log( self.saxs_image[ (ii,jj) ]+1e-12 ) print >> pp
def write_scattering_pattern(data,np,outobj): for ii in range(np): for jj in range(np): print >> outobj, ii,jj,smath.log(data[(ii,jj)] ) print >> outobj