def ref_ali3dm_new(refdata): from utilities import print_msg from utilities import model_circle, get_im from filter import filt_tanl, filt_gaussl, filt_table from morphology import threshold from fundamentals import rops_table from alignment import ali_nvol from math import sqrt import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] mask = refdata[5] ali50S = refdata[6] if fscc is None: flmin = 0.38 aamin = 0.1 idmin = 0 else: flmin, aamin, idmin = minfilt(fscc) aamin /= 2.0 msg = "Minimum tangent filter derived from volume %2d: cut-off frequency = %10.3f, fall-off = %10.3f\n" % ( idmin, flmin, aamin) print_msg(msg) vol = [] for i in xrange(numref): vol.append(get_im(os.path.join(outdir, "vol%04d.hdf" % total_iter), i)) stat = Util.infomask(vol[i], mask, False) vol[i] -= stat[0] vol[i] /= stat[1] vol[i] *= mask vol[i] = threshold(vol[i]) del stat reftab = rops_table(vol[idmin]) for i in xrange(numref): if (i != idmin): vtab = rops_table(vol[i]) ftab = [None] * len(vtab) for j in xrange(len(vtab)): ftab[j] = sqrt(reftab[j] / vtab[j]) vol[i] = filt_table(vol[i], ftab) if ali50S: vol = ali_nvol(vol, get_im("mask-50S.spi")) for i in xrange(numref): if (not (varf is None)): vol[i] = vol[i].filter_by_image(varf) filt_tanl(vol[i], flmin, aamin).write_image( os.path.join(outdir, "volf%04d.hdf" % total_iter), i)
def ref_ali3dm_new( refdata ): from utilities import print_msg from utilities import model_circle, get_im from filter import filt_tanl, filt_gaussl, filt_table from morphology import threshold from fundamentals import rops_table from alignment import ali_nvol from math import sqrt import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] mask = refdata[5] ali50S = refdata[6] if fscc is None: flmin = 0.38 aamin = 0.1 idmin = 0 else: flmin, aamin, idmin = minfilt( fscc ) aamin /= 2.0 msg = "Minimum tangent filter derived from volume %2d: cut-off frequency = %10.3f, fall-off = %10.3f\n"%(idmin, flmin, aamin) print_msg(msg) vol = [] for i in xrange(numref): vol.append(get_im( os.path.join(outdir, "vol%04d.hdf"%total_iter), i )) stat = Util.infomask( vol[i], mask, False ) vol[i] -= stat[0] vol[i] /= stat[1] vol[i] *= mask vol[i] = threshold(vol[i]) del stat reftab = rops_table( vol[idmin] ) for i in xrange(numref): if(i != idmin): vtab = rops_table( vol[i] ) ftab = [None]*len(vtab) for j in xrange(len(vtab)): ftab[j] = sqrt( reftab[j]/vtab[j] ) vol[i] = filt_table( vol[i], ftab ) if ali50S: vol = ali_nvol(vol, get_im( "mask-50S.spi" )) for i in xrange(numref): if(not (varf is None) ): vol[i] = vol[i].filter_by_image( varf ) filt_tanl( vol[i], flmin, aamin ).write_image( os.path.join(outdir, "volf%04d.hdf" % total_iter), i )
def filt_vols( vols, fscs, mask3D ): from math import sqrt from filter import fit_tanh, filt_tanl, filt_table from fundamentals import rops_table from morphology import threshold flmin = 1.0 flmax = -1.0 nvol = len(vols) for i in xrange(nvol): fl, aa = fit_tanh( fscs[i] ) if (fl < flmin): flmin = fl aamin = aa if (fl > flmax): flmax = fl idmax = i print " Filter tanl, parameters: ",flmin-0.05, " ", aamin volmax = vols[idmax] volmax = filt_tanl( volmax, flmin-0.05, aamin ) pmax = rops_table( volmax ) for i in xrange(nvol): ptab = rops_table( vols[i] ) for j in xrange( len(ptab) ): ptab[j] = sqrt( pmax[j]/ptab[j] ) vols[i] = filt_table( vols[i], ptab ) #stat = Util.infomask( vols[i], mask3D, False ) #volf -= stat[0] Util.mul_img( vols[i], mask3D ) #volf = threshold( volf ) return vols
def constant( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D from morphology import threshold # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 #print_msg("steady #%6d\n"%(ref_ali2d_counter)) fl = 0.4 aa = 0.1 #msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) #print_msg(msg) from utilities import model_circle nx = ref_data[2].get_xsize() stat = Util.infomask(ref_data[2], model_circle(nx//2-2,nx,nx), False) ref_data[2] -= stat[0] #tavg = filt_tanl(threshold(ref_data[2]), fl, aa) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0]*2 return tavg, cs
def reference4(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl, filt_gaussl from fundamentals import fshift, fft from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: #print_msg("reference4\n") cs = [0.0] * 3 stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) volf = threshold(volf) #Util.mul_img(volf, ref_data[0]) #fl, aa = fit_tanh(ref_data[3]) fl = 0.25 aa = 0.1 #msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) #print_msg(msg) volf = fft(filt_gaussl(filt_tanl(fft(volf), 0.35, 0.2), 0.3)) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n" % ( cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs
def julien( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 ref_ali2d_counter = ref_ali2d_counter % 50 print_msg("ref_ali2d #%6d\n"%(ref_ali2d_counter)) fl = min(0.1+ref_ali2d_counter*0.003, 0.4) aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0]*2 if ref_data[1] > 0: tavg, cs[0], cs[1] = center_2D(tavg, ref_data[1]) msg = "Center x = %10.3f, y = %10.3f\n"%(cs[0], cs[1]) print_msg(msg) return tavg, cs
def constant(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D from morphology import threshold # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 #print_msg("steady #%6d\n"%(ref_ali2d_counter)) fl = 0.4 aa = 0.1 #msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) #print_msg(msg) from utilities import model_circle nx = ref_data[2].get_xsize() stat = Util.infomask(ref_data[2], model_circle(nx // 2 - 2, nx, nx), False) ref_data[2] -= stat[0] #tavg = filt_tanl(threshold(ref_data[2]), fl, aa) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0] * 2 return tavg, cs
def ref_ali2d_c(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("ref_ali2d #%6d\n" % (ref_ali2d_counter)) fl = min(0.1 + ref_ali2d_counter * 0.003, 0.4) aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0] * 2 if (ref_data[1] > 0): tavg, cs[0], cs[1] = center_2D(tavg, ref_data[1]) msg = "Center x = %10.3f, y = %10.3f\n" % (cs[0], cs[1]) print_msg(msg) return tavg, cs
def reference4( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl, filt_gaussl from fundamentals import fshift, fft from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: #print_msg("reference4\n") cs = [0.0]*3 stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) volf = threshold(volf) #Util.mul_img(volf, ref_data[0]) #fl, aa = fit_tanh(ref_data[3]) fl = 0.25 aa = 0.1 #msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) #print_msg(msg) volf = fft(filt_gaussl(filt_tanl(fft(volf),0.35,0.2),0.3)) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs
def ref_7grp( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl, filt_gaussinv from fundamentals import fshift from morphology import threshold from math import sqrt # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: #cs = [0.0]*3 stat = Util.infomask(ref_data[2], None, False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) volf = Util.muln_img(threshold(volf), ref_data[0]) fl, aa = fit_tanh(ref_data[3]) msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) if(ref_data[1] == 1): cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) B_factor = 10.0 volf = filt_gaussinv( volf, 10.0 ) return volf,cs
def ref_7grp(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl, filt_gaussinv from fundamentals import fshift from morphology import threshold from math import sqrt # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: #cs = [0.0]*3 stat = Util.infomask(ref_data[2], None, False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) volf = Util.muln_img(threshold(volf), ref_data[0]) fl, aa = fit_tanh(ref_data[3]) msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) if (ref_data[1] == 1): cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n" % ( cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) B_factor = 10.0 volf = filt_gaussinv(volf, 10.0) return volf, cs
def helical2( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from morphology import threshold # Prepare the reference in helical refinement, i.e., low-pass filter. # Input: list ref_data # 2 - raw volume # Output: filtered, and masked reference image global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("helical2 #%6d\n"%(ref_ali2d_counter)) stat = Util.infomask(ref_data[2], None, True) volf = ref_data[2] - stat[0] volf = threshold(volf) fl = 0.25#0.17 aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) from utilities import read_text_file dipr=read_text_file('symdoc.txt',-1) #here pixel size, fract, rmax and rmin will have to be read from external text file from alignment import helios volf, dp, dphi = helios(volf, 2.175, dipr[0][-1], dipr[1][-1], 0.7, 30,3) print_msg("New delta z and delta phi : %s, %s\n\n"%(dp,dphi)) fofo = open('symdoc.txt','a') fofo.write(' %12.4f %12.4f\n'%(dp,dphi)) fofo.close() return volf,[0.0,0.0,0.0]
def helical(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from morphology import threshold # Prepare the reference in helical refinement, i.e., low-pass filter . # Input: list ref_data # 0 - raw volume # Output: filtered, and masked reference image global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("helical #%6d\n" % (ref_ali2d_counter)) stat = Util.infomask(ref_data[0], None, True) volf = ref_data[0] - stat[0] nx = volf.get_xsize() ny = volf.get_ysize() nz = volf.get_zsize() #for i in xrange(nz): # volf.insert_clip(filt_tanl(volf.get_clip(Region(0,0,i,nx,ny,1)),0.4,0.1),[0,0,i]) volf = threshold(volf) fl = 0.45 #0.17 aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf #,[0.,0.,0.]
def helical( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from morphology import threshold # Prepare the reference in helical refinement, i.e., low-pass filter . # Input: list ref_data # 0 - raw volume # Output: filtered, and masked reference image global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("helical #%6d\n"%(ref_ali2d_counter)) stat = Util.infomask(ref_data[0], None, True) volf = ref_data[0] - stat[0] nx = volf.get_xsize() ny = volf.get_ysize() nz = volf.get_zsize() #for i in xrange(nz): # volf.insert_clip(filt_tanl(volf.get_clip(Region(0,0,i,nx,ny,1)),0.4,0.1),[0,0,i]) volf = threshold(volf) fl = 0.45#0.17 aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf#,[0.,0.,0.]
def spruce_up_var_m( refdata ): from utilities import print_msg from utilities import model_circle, get_im from filter import filt_tanl, filt_gaussl from morphology import threshold import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] mask = refdata[5] ali50S = refdata[6] if ali50S: mask_50S = get_im( "mask-50S.spi" ) if fscc is None: flmin = 0.4 aamin = 0.1 else: flmin,aamin,idmin=minfilt( fscc ) aamin = aamin msg = "Minimum tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fflmin, aamin) print_msg(msg) for i in xrange(numref): volf = get_im( os.path.join(outdir, "vol%04d.hdf"% total_iter) , i ) if(not (varf is None) ): volf = volf.filter_by_image( varf ) volf = filt_tanl(volf, flmin, aamin) stat = Util.infomask(volf, mask, True) volf -= stat[0] Util.mul_scalar(volf, 1.0/stat[1]) nx = volf.get_xsize() stat = Util.infomask(volf,model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True) volf -= stat[0] Util.mul_img( volf, mask ) volf = threshold(volf) volf = filt_gaussl( volf, 0.4) if ali50S: if i==0: v50S_0 = volf.copy() v50S_0 *= mask_50S else: from applications import ali_vol_3 from fundamentals import rot_shift3D v50S_i = volf.copy() v50S_i *= mask_50S params = ali_vol_3(v50S_i, v50S_0, 10.0, 0.5, mask=mask_50S) volf = rot_shift3D( volf, params[0], params[1], params[2], params[3], params[4], params[5], 1.0) volf.write_image( os.path.join(outdir, "volf%04d.hdf"%total_iter), i )
def ref_ali3d(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 fl = ref_data[2].cmp("dot", ref_data[2], { "negative": 0, "mask": ref_data[0] }) print_msg("ref_ali3d Step = %5d GOAL = %10.3e\n" % (ref_ali2d_counter, fl)) cs = [0.0] * 3 #filt = filt_from_fsc(fscc, 0.05) #vol = filt_table(vol, filt) # here figure the filtration parameters and filter vol for the next iteration #fl, fh = filt_params(res) #vol = filt_btwl(vol, fl, fh) # store the filtered reference volume #lk = 0 #while(res[1][lk] >0.9 and res[0][lk]<0.25): # lk+=1 #fl = res[0][lk] #fh = min(fl+0.1,0.49) #vol = filt_btwl(vol, fl, fh) #fl, fh = filt_params(fscc) #print "fl, fh, iter",fl,fh,Iter #vol = filt_btwl(vol, fl, fh) stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) #volf = threshold(volf) Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) #fl = 0.4 #aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n" % ( cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs
def ref_ali3d( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 fl = ref_data[2].cmp("dot",ref_data[2], {"negative":0, "mask":ref_data[0]} ) print_msg("ref_ali3d Step = %5d GOAL = %10.3e\n"%(ref_ali2d_counter,fl)) cs = [0.0]*3 #filt = filt_from_fsc(fscc, 0.05) #vol = filt_table(vol, filt) # here figure the filtration parameters and filter vol for the next iteration #fl, fh = filt_params(res) #vol = filt_btwl(vol, fl, fh) # store the filtered reference volume #lk = 0 #while(res[1][lk] >0.9 and res[0][lk]<0.25): # lk+=1 #fl = res[0][lk] #fh = min(fl+0.1,0.49) #vol = filt_btwl(vol, fl, fh) #fl, fh = filt_params(fscc) #print "fl, fh, iter",fl,fh,Iter #vol = filt_btwl(vol, fl, fh) stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) #volf = threshold(volf) Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) #fl = 0.4 #aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs
def spruce_up_variance(ref_data): from utilities import print_msg from filter import filt_tanl, fit_tanh, filt_gaussl from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # 4 1.0/variance # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: mask = ref_data[0] center = ref_data[1] vol = ref_data[2] fscc = ref_data[3] varf = ref_data[4] print_msg("spruce_up with variance\n") cs = [0.0] * 3 if not (varf is None): volf = vol.filter_by_image(varf) #fl, aa = fit_tanh(ref_data[3]) fl = 0.22 aa = 0.15 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) stat = Util.infomask(volf, None, True) volf = volf - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) from utilities import model_circle nx = volf.get_xsize() stat = Util.infomask( volf, model_circle(nx // 2 - 2, nx, nx, nx) - model_circle(nx // 2 - 6, nx, nx, nx), True) volf -= stat[0] Util.mul_img(volf, mask) volf = threshold(volf) volf = filt_gaussl(volf, 0.4) return volf, cs
def ref_aliB_cone(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold from math import sqrt # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - reference PW # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: print_msg("ref_aliB_cone\n") #cs = [0.0]*3 stat = Util.infomask(ref_data[2], None, True) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) volf = threshold(volf) Util.mul_img(volf, ref_data[0]) from fundamentals import rops_table pwem = rops_table(volf) ftb = [] for idum in xrange(len(pwem)): ftb.append(sqrt(ref_data[1][idum] / pwem[idum])) from filter import filt_table volf = filt_table(volf, ftb) fl, aa = fit_tanh(ref_data[3]) #fl = 0.41 #aa = 0.15 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) stat = Util.infomask(volf, None, True) volf -= stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) """ if(ref_data[1] == 1): cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) """ return volf
def ref_aliB_cone( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold from math import sqrt # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - reference PW # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: print_msg("ref_aliB_cone\n") #cs = [0.0]*3 stat = Util.infomask(ref_data[2], None, True) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) volf = threshold(volf) Util.mul_img(volf, ref_data[0]) from fundamentals import rops_table pwem = rops_table(volf) ftb = [] for idum in xrange(len(pwem)): ftb.append(sqrt(ref_data[1][idum]/pwem[idum])) from filter import filt_table volf = filt_table(volf, ftb) fl, aa = fit_tanh(ref_data[3]) #fl = 0.41 #aa = 0.15 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) stat = Util.infomask(volf, None, True) volf -= stat[0] Util.mul_scalar(volf, 1.0/stat[1]) """ if(ref_data[1] == 1): cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) """ return volf
def spruce_up_variance( ref_data ): from utilities import print_msg from filter import filt_tanl, fit_tanh, filt_gaussl from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # 4 1.0/variance # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: mask = ref_data[0] center = ref_data[1] vol = ref_data[2] fscc = ref_data[3] varf = ref_data[4] print_msg("spruce_up with variance\n") cs = [0.0]*3 if not(varf is None): volf = vol.filter_by_image(varf) #fl, aa = fit_tanh(ref_data[3]) fl = 0.22 aa = 0.15 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) stat = Util.infomask(volf, None, True) volf = volf - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) from utilities import model_circle nx = volf.get_xsize() stat = Util.infomask(volf, model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True) volf -= stat[0] Util.mul_img(volf, mask) volf = threshold(volf) volf = filt_gaussl(volf, 0.4) return volf, cs
def ref_ali3dm_ali_50S(refdata): from filter import fit_tanh, filt_tanl from utilities import get_im from fundamentals import rot_shift3D import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] #mask_50S = get_im( "mask-50S.spi" ) flmin = 1.0 flmax = -1.0 for iref in xrange(numref): fl, aa = fit_tanh(fscc[iref]) if (fl < flmin): flmin = fl aamin = aa if (fl > flmax): flmax = fl aamax = aa print 'iref,fl,aa: ', iref, fl, aa # filter to minimum resolution print 'flmin,aamin:', flmin, aamin for iref in xrange(numref): v = get_im(os.path.join(outdir, "vol%04d.hdf" % total_iter), iref) v = filt_tanl(v, flmin, aamin) if ali50s: from utilities import get_params3D, set_params3D, combine_params3 from applications import ali_vol_shift, ali_vol_rotate if iref == 0: v50S_ref = alivol_mask_getref(v, mask_50S) else: v = alivol_mask(v, v50S_ref, mask_50S) if not (varf is None): print 'filtering by fourier variance' v.filter_by_image(varf) v.write_image(os.path.join(outdir, "volf%04d.hdf" % total_iter), iref)
def ref_ali3dm_ali_50S( refdata ): from filter import fit_tanh, filt_tanl from utilities import get_im from fundamentals import rot_shift3D import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] #mask_50S = get_im( "mask-50S.spi" ) flmin = 1.0 flmax = -1.0 for iref in xrange(numref): fl, aa = fit_tanh( fscc[iref] ) if (fl < flmin): flmin = fl aamin = aa if (fl > flmax): flmax = fl aamax = aa print 'iref,fl,aa: ', iref, fl, aa # filter to minimum resolution print 'flmin,aamin:', flmin, aamin for iref in xrange(numref): v = get_im(os.path.join(outdir, "vol%04d.hdf"%total_iter), iref) v = filt_tanl(v, flmin, aamin) if ali50s: from utilities import get_params3D, set_params3D, combine_params3 from applications import ali_vol_shift, ali_vol_rotate if iref==0: v50S_ref = alivol_mask_getref( v, mask_50S ) else: v = alivol_mask( v, v50S_ref, mask_50S ) if not(varf is None): print 'filtering by fourier variance' v.filter_by_image( varf ) v.write_image(os.path.join(outdir, "volf%04d.hdf"%total_iter), iref)
def spruce_up(ref_data): from utilities import print_msg from filter import filt_tanl, fit_tanh from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: print_msg("Changed4 spruce_up\n") cs = [0.0] * 3 stat = Util.infomask(ref_data[2], None, True) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) volf = threshold(volf) # Apply B-factor from filter import filt_gaussinv from math import sqrt B = 1.0 / sqrt(2. * 14.0) volf = filt_gaussinv(volf, B, False) nx = volf.get_xsize() from utilities import model_circle stat = Util.infomask( volf, model_circle(nx // 2 - 2, nx, nx, nx) - model_circle(nx // 2 - 6, nx, nx, nx), True) volf -= stat[0] Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) #fl = 0.35 #aa = 0.1 aa /= 2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf, cs
def ref_ali3dm(refdata): from filter import fit_tanh, filt_tanl from utilities import get_im from fundamentals import rot_shift3D import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] #varf = refdata[4] mask = refdata[5] print 'filter every volume at (0.4, 0.1)' for iref in xrange(numref): v = get_im(os.path.join(outdir, "vol%04d.hdf" % total_iter), iref) v = filt_tanl(v, 0.4, 0.1) v *= mask v.write_image(os.path.join(outdir, "volf%04d.hdf" % total_iter), iref)
def ref_ali3d( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold fl = ref_data[2].cmp("dot",ref_data[2], {"negative":0, "mask":ref_data[0]} ) cs = [0.0]*3 stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) volf = filt_tanl(volf, fl, aa) volf.process_inplace("normalize") if ref_data[1] == 1: cs = volf.phase_cog() volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs, fl
def ref_ali3dm( refdata ): from filter import fit_tanh, filt_tanl from utilities import get_im from fundamentals import rot_shift3D import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] #varf = refdata[4] mask = refdata[5] print 'filter every volume at (0.4, 0.1)' for iref in xrange(numref): v = get_im(os.path.join(outdir, "vol%04d.hdf"%total_iter), iref) v = filt_tanl(v, 0.4, 0.1) v *= mask v.write_image(os.path.join(outdir, "volf%04d.hdf"%total_iter), iref)
def helical2( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from morphology import threshold # Prepare the reference in helical refinement, i.e., low-pass filter. # Input: list ref_data # 2 - raw volume # Output: filtered, and masked reference image global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("helical2 #%6d\n"%(ref_ali2d_counter)) volf = ref_data[0] #stat = Util.infomask(ref_data[1], None, True) #volf = ref_data[0] - stat[0] #volf = threshold(volf) fl = 0.17 aa = 0.2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf
def steady( ref_data ): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("steady #%6d\n"%(ref_ali2d_counter)) fl = 0.12 + (ref_ali2d_counter//3)*0.1 aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0]*2 return tavg, cs
def ref_ali3d(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold fl = ref_data[2].cmp("dot", ref_data[2], { "negative": 0, "mask": ref_data[0] }) cs = [0.0] * 3 stat = Util.infomask(ref_data[2], ref_data[0], False) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) volf = filt_tanl(volf, fl, aa) volf.process_inplace("normalize") if ref_data[1] == 1: cs = volf.phase_cog() volf = fshift(volf, -cs[0], -cs[1], -cs[2]) return volf, cs, fl
def spruce_up( ref_data ): from utilities import print_msg from filter import filt_tanl, fit_tanh from morphology import threshold # Prepare the reference in 3D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: print_msg("Changed4 spruce_up\n") cs = [0.0]*3 stat = Util.infomask(ref_data[2], None, True) volf = ref_data[2] - stat[0] Util.mul_scalar(volf, 1.0/stat[1]) volf = threshold(volf) # Apply B-factor from filter import filt_gaussinv from math import sqrt B = 1.0/sqrt(2.*14.0) volf = filt_gaussinv(volf, B, False) nx = volf.get_xsize() from utilities import model_circle stat = Util.infomask(volf, model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True) volf -= stat[0] Util.mul_img(volf, ref_data[0]) fl, aa = fit_tanh(ref_data[3]) #fl = 0.35 #aa = 0.1 aa /= 2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf, cs
def steady(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from utilities import center_2D # Prepare the reference in 2D alignment, i.e., low-pass filter and center. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FRC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("steady #%6d\n" % (ref_ali2d_counter)) fl = 0.12 + (ref_ali2d_counter // 3) * 0.1 aa = 0.1 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) tavg = filt_tanl(ref_data[2], fl, aa) cs = [0.0] * 2 return tavg, cs
def helical2(ref_data): from utilities import print_msg from filter import fit_tanh, filt_tanl from morphology import threshold # Prepare the reference in helical refinement, i.e., low-pass filter. # Input: list ref_data # 2 - raw volume # Output: filtered, and masked reference image global ref_ali2d_counter ref_ali2d_counter += 1 print_msg("helical2 #%6d\n" % (ref_ali2d_counter)) volf = ref_data[0] #stat = Util.infomask(ref_data[1], None, True) #volf = ref_data[0] - stat[0] #volf = threshold(volf) fl = 0.17 aa = 0.2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) volf = filt_tanl(volf, fl, aa) return volf
def ref_sort3d(refdata): from filter import fit_tanh, filt_tanl from utilities import get_im from fundamentals import rot_shift3D import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] #varf = refdata[4] mask = refdata[5] low_pass_filter = refdata[6] import time from time import strftime, localtime theme='filter every volume at (%f, 0.1)'%low_pass_filter line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" print(line+theme) print 'filter every volume at (%f, 0.1)'%low_pass_filter for iref in xrange(numref): v = get_im(os.path.join(outdir, "vol%04d.hdf"%total_iter), iref) v = filt_tanl(v, low_pass_filter, 0.1) v *= mask v.write_image(os.path.join(outdir, "volf%04d.hdf"%total_iter), iref)
def filterlocal(ui, vi, m, falloff, myid, main_node, number_of_proc): from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv, mpi_send, mpi_recv from mpi import MPI_SUM, MPI_FLOAT, MPI_INT from utilities import bcast_number_to_all, bcast_list_to_all, model_blank, bcast_EMData_to_all, reduce_EMData_to_root from morphology import threshold_outside from filter import filt_tanl from fundamentals import fft, fftip if(myid == main_node): nx = vi.get_xsize() ny = vi.get_ysize() nz = vi.get_zsize() # Round all resolution numbers to two digits for x in xrange(nx): for y in xrange(ny): for z in xrange(nz): ui.set_value_at_fast( x,y,z, round(ui.get_value_at(x,y,z), 2) ) dis = [nx,ny,nz] else: falloff = 0.0 radius = 0 dis = [0,0,0] falloff = bcast_number_to_all(falloff, main_node) dis = bcast_list_to_all(dis, myid, source_node = main_node) if(myid != main_node): nx = int(dis[0]) ny = int(dis[1]) nz = int(dis[2]) vi = model_blank(nx,ny,nz) ui = model_blank(nx,ny,nz) bcast_EMData_to_all(vi, myid, main_node) bcast_EMData_to_all(ui, myid, main_node) fftip(vi) # volume to be filtered st = Util.infomask(ui, m, True) filteredvol = model_blank(nx,ny,nz) cutoff = max(st[2] - 0.01,0.0) while(cutoff < st[3] ): cutoff = round(cutoff + 0.01, 2) #if(myid == main_node): print cutoff,st pt = Util.infomask( threshold_outside(ui, cutoff - 0.00501, cutoff + 0.005), m, True) # Ideally, one would want to check only slices in question... if(pt[0] != 0.0): #print cutoff,pt[0] vovo = fft( filt_tanl(vi, cutoff, falloff) ) for z in xrange(myid, nz, number_of_proc): for x in xrange(nx): for y in xrange(ny): if(m.get_value_at(x,y,z) > 0.5): if(round(ui.get_value_at(x,y,z),2) == cutoff): filteredvol.set_value_at_fast(x,y,z,vovo.get_value_at(x,y,z)) mpi_barrier(MPI_COMM_WORLD) reduce_EMData_to_root(filteredvol, myid, main_node, MPI_COMM_WORLD) return filteredvol
def main(): import sys import os import math import random import pyemtbx.options import time from random import random, seed, randint from optparse import OptionParser progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <inputfile> <outputfile> Generic 2-D image processing programs. Functionality: 1. Phase flip a stack of images and write output to new file: sxprocess.py input_stack.hdf output_stack.hdf --phase_flip 2. Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size. The window size will change accordingly. sxprocess input.hdf output.hdf --changesize --ratio=0.5 3. Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes or a 3-D volume. sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024] 4. Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs: sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64 5. Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3): sxprocess.py bdb:test3 class_averages_generation_3.hdf list3_12.txt --isacgroup=12 --params=originalid 6. Retrieve original image numbers of images listed in ISAC output stack of averages: sxprocess.py select1.hdf ohk.txt 7. Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file. Optionally use a tangent low-pass filter. Also works for a stack of images, in which case the output is also a stack. sxprocess.py vol.hdf ref.hdf avol.hdf < 0.25 0.2> --adjpw sxprocess.py vol.hdf pw.txt avol.hdf < 0.25 0.2> --adjpw 8. Generate a 1D rotationally averaged power spectrum of an image. sxprocess.py vol.hdf --rotwp=rotpw.txt # Output will contain three columns: (1) rotationally averaged power spectrum (2) logarithm of the rotationally averaged power spectrum (3) integer line number (from zero to approximately to half the image size) 9. Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data. sxprocess.py --transfromparams=phi,theta,psi,tx,ty,tz input.txt output.txt The output file is then imported and 3D transformed volume computed: sxheader.py bdb:p --params=xform.projection --import=output.txt mpirun -np 2 sxrecons3d_n.py bdb:p tvol.hdf --MPI The reconstructed volume is in the position of the volume computed using the input.txt parameters and then transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz) 10. Import ctf parameters from the output of sxcter into windowed particle headers. There are three possible input files formats: (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph. In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'. Normally this is done by e2boxer.py during windowing. Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created. sxprocess.py --input=bdb:data --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 # Output will be a vritual stack bdb:Gdata sxprocess.py --input="bdb:directory/stacks*" --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 To concatenate output files: cd directory e2bdb.py . --makevstack=bdb:allparticles --filt=G IMPORTANT: Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them. 11. Scale 3D shifts. The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor sxprocess.py orientationparams.txt scaledparams.txt scale=0.5 12. Generate 3D mask from a given 3-D volume automatically or using threshold provided by user. 13. Postprocess 3-D or 2-D images: for 3-D volumes: calculate FSC with provided mask; weight summed volume with FSC; estimate B-factor from FSC weighted summed two volumes; apply negative B-factor to the weighted volume. for 2-D images: calculate B-factor and apply negative B-factor to 2-D images. 14. Winow stack file -reduce size of images without changing the pixel size. """ parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--order", action="store_true", help="Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.", default=False) parser.add_option("--order_lookup", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_metropolis", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_pca", action="store_true", help="Test/Debug.", default=False) parser.add_option("--initial", type="int", default=-1, help="Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)") parser.add_option("--circular", action="store_true", help="Select circular ordering (fisr image has to be similar to the last", default=False) parser.add_option("--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering") parser.add_option("--changesize", action="store_true", help="resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.", default=False) parser.add_option("--ratio", type="float", default=1.0, help="The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round") parser.add_option("--pw", action="store_true", help="compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes", default=False) parser.add_option("--wn", type="int", default=-1, help="Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))") parser.add_option("--phase_flip", action="store_true", help="Phase flip the input stack", default=False) parser.add_option("--makedb", metavar="param1=value1:param2=value2", type="string", action="append", help="One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0") parser.add_option("--generate_projections", metavar="param1=value1:param2=value2", type="string", action="append", help="Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64.") parser.add_option("--isacgroup", type="int", help="Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.", default=-1) parser.add_option("--isacselect", action="store_true", help="Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.", default=False) parser.add_option("--params", type="string", default=None, help="Name of header of parameter, which one depends on specific option") parser.add_option("--adjpw", action="store_true", help="Adjust rotationally averaged power spectrum of an image", default=False) parser.add_option("--rotpw", type="string", default=None, help="Name of the text file to contain rotationally averaged power spectrum of the input image.") parser.add_option("--transformparams", type="string", default=None, help="Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz). Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters.") # import ctf estimates done using cter parser.add_option("--input", type="string", default= None, help="Input particles.") parser.add_option("--importctf", type="string", default= None, help="Name of the file containing CTF parameters produced by sxcter.") parser.add_option("--defocuserror", type="float", default=1000000.0, help="Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent. The error is computed as (std dev defocus)/defocus*100%") parser.add_option("--astigmatismerror", type="float", default=360.0, help="Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees.") # import ctf estimates done using cter parser.add_option("--scale", type="float", default=-1.0, help="Divide shifts in the input 3D orientation parameters text file by the scale factor.") # generate adaptive mask from an given 3-D volume parser.add_option("--adaptive_mask", action="store_true", help="create adavptive 3-D mask from a given volume", default=False) parser.add_option("--nsigma", type="float", default= 1., help="number of times of sigma of the input volume to obtain the the large density cluster") parser.add_option("--ndilation", type="int", default= 3, help="number of times of dilation applied to the largest cluster of density") parser.add_option("--kernel_size", type="int", default= 11, help="convolution kernel for smoothing the edge of the mask") parser.add_option("--gauss_standard_dev", type="int", default= 9, help="stanadard deviation value to generate Gaussian edge") parser.add_option("--threshold", type="float", default= 9999., help="threshold provided by user to binarize input volume") parser.add_option("--ne", type="int", default= 0, help="number of times to erode the binarized input image") parser.add_option("--nd", type="int", default= 0, help="number of times to dilate the binarized input image") parser.add_option("--postprocess", action="store_true", help="postprocess unfiltered odd, even 3-D volumes",default=False) parser.add_option("--fsc_weighted", action="store_true", help="postprocess unfiltered odd, even 3-D volumes") parser.add_option("--low_pass_filter", action="store_true", default=False, help="postprocess unfiltered odd, even 3-D volumes") parser.add_option("--ff", type="float", default=.25, help="low pass filter stop band frequency in absolute unit") parser.add_option("--aa", type="float", default=.1, help="low pass filter falloff" ) parser.add_option("--mask", type="string", help="input mask file", default=None) parser.add_option("--output", type="string", help="output file name", default="postprocessed.hdf") parser.add_option("--pixel_size", type="float", help="pixel size of the data", default=1.0) parser.add_option("--B_start", type="float", help="starting frequency in Angstrom for B-factor estimation", default=10.) parser.add_option("--FSC_cutoff", type="float", help="stop frequency in Angstrom for B-factor estimation", default=0.143) parser.add_option("--2d", action="store_true", help="postprocess isac 2-D averaged images",default=False) parser.add_option("--window_stack", action="store_true", help="window stack images using a smaller window size", default=False) parser.add_option("--box", type="int", default= 0, help="the new window size ") (options, args) = parser.parse_args() global_def.BATCH = True if options.phase_flip: nargs = len(args) if nargs != 2: print "must provide name of input and output file!" return from EMAN2 import Processor instack = args[0] outstack = args[1] nima = EMUtil.get_image_count(instack) from filter import filt_ctf for i in xrange(nima): img = EMData() img.read_image(instack, i) try: ctf = img.get_attr('ctf') except: print "no ctf information in input stack! Exiting..." return dopad = True sign = 1 binary = 1 # phase flip assert img.get_ysize() > 1 dict = ctf.to_dict() dz = dict["defocus"] cs = dict["cs"] voltage = dict["voltage"] pixel_size = dict["apix"] b_factor = dict["bfactor"] ampcont = dict["ampcont"] dza = dict["dfdiff"] azz = dict["dfang"] if dopad and not img.is_complex(): ip = 1 else: ip = 0 params = {"filter_type": Processor.fourier_filter_types.CTF_, "defocus" : dz, "Cs": cs, "voltage": voltage, "Pixel_size": pixel_size, "B_factor": b_factor, "amp_contrast": ampcont, "dopad": ip, "binary": binary, "sign": sign, "dza": dza, "azz":azz} tmp = Processor.EMFourierFilter(img, params) tmp.set_attr_dict({"ctf": ctf}) tmp.write_image(outstack, i) elif options.changesize: nargs = len(args) if nargs != 2: ERROR("must provide name of input and output file!", "change size", 1) return from utilities import get_im instack = args[0] outstack = args[1] sub_rate = float(options.ratio) nima = EMUtil.get_image_count(instack) from fundamentals import resample for i in xrange(nima): resample(get_im(instack, i), sub_rate).write_image(outstack, i) elif options.isacgroup>-1: nargs = len(args) if nargs != 3: ERROR("Three files needed on input!", "isacgroup", 1) return from utilities import get_im instack = args[0] m=get_im(args[1],int(options.isacgroup)).get_attr("members") l = [] for k in m: l.append(int(get_im(args[0],k).get_attr(options.params))) from utilities import write_text_file write_text_file(l, args[2]) elif options.isacselect: nargs = len(args) if nargs != 2: ERROR("Two files needed on input!", "isacgroup", 1) return from utilities import get_im nima = EMUtil.get_image_count(args[0]) m = [] for k in xrange(nima): m += get_im(args[0],k).get_attr("members") m.sort() from utilities import write_text_file write_text_file(m, args[1]) elif options.pw: nargs = len(args) if nargs < 2: ERROR("must provide name of input and output file!", "pw", 1) return from utilities import get_im, write_text_file from fundamentals import rops_table d = get_im(args[0]) ndim = d.get_ndim() if ndim ==3: pw = rops_table(d) write_text_file(pw, args[1]) else: nx = d.get_xsize() ny = d.get_ysize() if nargs ==3: mask = get_im(args[2]) wn = int(options.wn) if wn == -1: wn = max(nx, ny) else: if( (wn<nx) or (wn<ny) ): ERROR("window size cannot be smaller than the image size","pw",1) n = EMUtil.get_image_count(args[0]) from utilities import model_blank, model_circle, pad from EMAN2 import periodogram p = model_blank(wn,wn) for i in xrange(n): d = get_im(args[0], i) if nargs==3: d *=mask st = Util.infomask(d, None, True) d -= st[0] p += periodogram(pad(d, wn, wn, 1, 0.)) p /= n p.write_image(args[1]) elif options.adjpw: if len(args) < 3: ERROR("filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)","adjpw",1) return img_stack = args[0] from math import sqrt from fundamentals import rops_table, fft from utilities import read_text_file, get_im from filter import filt_tanl, filt_table if( args[1][-3:] == 'txt'): rops_dst = read_text_file( args[1] ) else: rops_dst = rops_table(get_im( args[1] )) out_stack = args[2] if(len(args) >4): fl = float(args[3]) aa = float(args[4]) else: fl = -1.0 aa = 0.0 nimage = EMUtil.get_image_count( img_stack ) for i in xrange(nimage): img = fft(get_im(img_stack, i) ) rops_src = rops_table(img) assert len(rops_dst) == len(rops_src) table = [0.0]*len(rops_dst) for j in xrange( len(rops_dst) ): table[j] = sqrt( rops_dst[j]/rops_src[j] ) if( fl > 0.0): img = filt_tanl(img, fl, aa) img = fft(filt_table(img, table)) img.write_image(out_stack, i) elif options.rotpw != None: if len(args) != 1: ERROR("Only one input permitted","rotpw",1) return from utilities import write_text_file, get_im from fundamentals import rops_table from math import log10 t = rops_table(get_im(args[0])) x = range(len(t)) r = [0.0]*len(x) for i in x: r[i] = log10(t[i]) write_text_file([t,r,x],options.rotpw) elif options.transformparams != None: if len(args) != 2: ERROR("Please provide names of input and output files with orientation parameters","transformparams",1) return from utilities import read_text_row, write_text_row transf = [0.0]*6 spl=options.transformparams.split(',') for i in xrange(len(spl)): transf[i] = float(spl[i]) write_text_row( rotate_shift_params(read_text_row(args[0]), transf) , args[1]) elif options.makedb != None: nargs = len(args) if nargs != 1: print "must provide exactly one argument denoting database key under which the input params will be stored" return dbkey = args[0] print "database key under which params will be stored: ", dbkey gbdb = js_open_dict("e2boxercache/gauss_box_DB.json") parmstr = 'dummy:'+options.makedb[0] (processorname, param_dict) = parsemodopt(parmstr) dbdict = {} for pkey in param_dict: if (pkey == 'invert_contrast') or (pkey == 'use_variance'): if param_dict[pkey] == 'True': dbdict[pkey] = True else: dbdict[pkey] = False else: dbdict[pkey] = param_dict[pkey] gbdb[dbkey] = dbdict elif options.generate_projections: nargs = len(args) if nargs != 3: ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\ "sxprocess - generate projections",1) return inpstr = args[0] outstk = args[1] micpref = args[2] parmstr = 'dummy:'+options.generate_projections[0] (processorname, param_dict) = parsemodopt(parmstr) parm_CTF = False parm_format = 'bdb' parm_apix = 2.5 if 'CTF' in param_dict: if param_dict['CTF'] == 'True': parm_CTF = True if 'format' in param_dict: parm_format = param_dict['format'] if 'apix' in param_dict: parm_apix = float(param_dict['apix']) boxsize = 64 if 'boxsize' in param_dict: boxsize = int(param_dict['boxsize']) print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize scale_mult = 2500 sigma_add = 1.5 sigma_proj = 30.0 sigma2_proj = 17.5 sigma_gauss = 0.3 sigma_mic = 30.0 sigma2_mic = 17.5 sigma_gauss_mic = 0.3 if 'scale_mult' in param_dict: scale_mult = float(param_dict['scale_mult']) if 'sigma_add' in param_dict: sigma_add = float(param_dict['sigma_add']) if 'sigma_proj' in param_dict: sigma_proj = float(param_dict['sigma_proj']) if 'sigma2_proj' in param_dict: sigma2_proj = float(param_dict['sigma2_proj']) if 'sigma_gauss' in param_dict: sigma_gauss = float(param_dict['sigma_gauss']) if 'sigma_mic' in param_dict: sigma_mic = float(param_dict['sigma_mic']) if 'sigma2_mic' in param_dict: sigma2_mic = float(param_dict['sigma2_mic']) if 'sigma_gauss_mic' in param_dict: sigma_gauss_mic = float(param_dict['sigma_gauss_mic']) from filter import filt_gaussl, filt_ctf from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank,pad,model_gauss_noise,set_params2D, set_params_proj from projection import prep_vol,prgs seed(14567) delta = 29 angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S") nangle = len(angles) modelvol = [] nvlms = EMUtil.get_image_count(inpstr) from utilities import get_im for k in xrange(nvlms): modelvol.append(get_im(inpstr,k)) nx = modelvol[0].get_xsize() if nx != boxsize: ERROR("Requested box dimension does not match dimension of the input model.", \ "sxprocess - generate projections",1) nvol = 10 volfts = [[] for k in xrange(nvlms)] for k in xrange(nvlms): for i in xrange(nvol): sigma = sigma_add + random() # 1.5-2.5 addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma, sigma, 38, 38, 40 ) scale = scale_mult * (0.5+random()) vf, kb = prep_vol(modelvol[k] + scale*addon) volfts[k].append(vf) del vf, modelvol if parm_format == "bdb": stack_data = "bdb:"+outstk delete_bdb(stack_data) else: stack_data = outstk + ".hdf" Cs = 2.0 pixel = parm_apix voltage = 120.0 ampcont = 10.0 ibd = 4096/2-boxsize iprj = 0 width = 240 xstart = 8 + boxsize/2 ystart = 8 + boxsize/2 rowlen = 17 from random import randint params = [] for idef in xrange(3, 8): irow = 0 icol = 0 mic = model_blank(4096, 4096) defocus = idef * 0.5#0.2 if parm_CTF: astampl=defocus*0.15 astangl=50.0 ctf = generate_ctf([defocus, Cs, voltage, pixel, ampcont, 0.0, astampl, astangl]) for i in xrange(nangle): for k in xrange(12): dphi = 8.0*(random()-0.5) dtht = 8.0*(random()-0.5) psi = 360.0*random() phi = angles[i][0]+dphi tht = angles[i][1]+dtht s2x = 4.0*(random()-0.5) s2y = 4.0*(random()-0.5) params.append([phi, tht, psi, s2x, s2y]) ivol = iprj % nvol #imgsrc = randint(0,nvlms-1) imgsrc = iprj % nvlms proj = prgs(volfts[imgsrc][ivol], kb, [phi, tht, psi, -s2x, -s2y]) x = xstart + irow * width y = ystart + icol * width mic += pad(proj, 4096, 4096, 1, 0.0, x-2048, y-2048, 0) proj = proj + model_gauss_noise( sigma_proj, nx, nx ) if parm_CTF: proj = filt_ctf(proj, ctf) proj.set_attr_dict({"ctf":ctf, "ctf_applied":0}) proj = proj + filt_gaussl(model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss) proj.set_attr("origimgsrc",imgsrc) proj.set_attr("test_id", iprj) # flags describing the status of the image (1 = true, 0 = false) set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0]) set_params_proj(proj, [phi, tht, psi, s2x, s2y]) proj.write_image(stack_data, iprj) icol += 1 if icol == rowlen: icol = 0 irow += 1 iprj += 1 mic += model_gauss_noise(sigma_mic,4096,4096) if parm_CTF: #apply CTF mic = filt_ctf(mic, ctf) mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096), sigma_gauss_mic) mic.write_image(micpref + "%1d.hdf" % (idef-3), 0) drop_spider_doc("params.txt", params) elif options.importctf != None: print ' IMPORTCTF ' from utilities import read_text_row,write_text_row from random import randint import subprocess grpfile = 'groupid%04d'%randint(1000,9999) ctfpfile = 'ctfpfile%04d'%randint(1000,9999) cterr = [options.defocuserror/100.0, options.astigmatismerror] ctfs = read_text_row(options.importctf) for kk in xrange(len(ctfs)): root,name = os.path.split(ctfs[kk][-1]) ctfs[kk][-1] = name[:-4] if(options.input[:4] != 'bdb:'): ERROR('Sorry, only bdb files implemented','importctf',1) d = options.input[4:] #try: str = d.index('*') #except: str = -1 from string import split import glob uu = os.path.split(d) uu = os.path.join(uu[0],'EMAN2DB',uu[1]+'.bdb') flist = glob.glob(uu) for i in xrange(len(flist)): root,name = os.path.split(flist[i]) root = root[:-7] name = name[:-4] fil = 'bdb:'+os.path.join(root,name) sourcemic = EMUtil.get_all_attributes(fil,'ptcl_source_image') nn = len(sourcemic) gctfp = [] groupid = [] for kk in xrange(nn): junk,name2 = os.path.split(sourcemic[kk]) name2 = name2[:-4] ctfp = [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0] for ll in xrange(len(ctfs)): if(name2 == ctfs[ll][-1]): # found correct if(ctfs[ll][8]/ctfs[ll][0] <= cterr[0]): # acceptable defocus error ctfp = ctfs[ll][:8] if(ctfs[ll][10] > cterr[1] ): # error of astigmatism exceed the threshold, set astigmatism to zero. ctfp[6] = 0.0 ctfp[7] = 0.0 gctfp.append(ctfp) groupid.append(kk) break if(len(groupid) > 0): write_text_row(groupid, grpfile) write_text_row(gctfp, ctfpfile) cmd = "{} {} {} {}".format('e2bdb.py',fil,'--makevstack=bdb:'+root+'G'+name,'--list='+grpfile) #print cmd subprocess.call(cmd, shell=True) cmd = "{} {} {} {}".format('sxheader.py','bdb:'+root+'G'+name,'--params=ctf','--import='+ctfpfile) #print cmd subprocess.call(cmd, shell=True) else: print ' >>> Group ',name,' skipped.' cmd = "{} {} {}".format("rm -f",grpfile,ctfpfile) subprocess.call(cmd, shell=True) elif options.scale > 0.0: from utilities import read_text_row,write_text_row scale = options.scale nargs = len(args) if nargs != 2: print "Please provide names of input and output file!" return p = read_text_row(args[0]) for i in xrange(len(p)): p[i][3] /= scale p[i][4] /= scale write_text_row(p, args[1]) elif options.adaptive_mask: from utilities import get_im from morphology import adaptive_mask, binarize, erosion, dilation nsigma = options.nsigma ndilation = options.ndilation kernel_size = options.kernel_size gauss_standard_dev = options.gauss_standard_dev nargs = len(args) if nargs ==0: print " Create 3D mask from a given volume, either automatically or from the user provided threshold." elif nargs > 2: print "Too many inputs are given, try again!" return else: inputvol = get_im(args[0]) input_path, input_file_name = os.path.split(args[0]) input_file_name_root,ext=os.path.splitext(input_file_name) if nargs == 2: mask_file_name = args[1] else: mask_file_name = "adaptive_mask_for_"+input_file_name_root+".hdf" # Only hdf file is output. if options.threshold !=9999.: mask3d = binarize(inputvol, options.threshold) for i in xrange(options.ne): mask3d = erosion(mask3d) for i in xrange(options.nd): mask3d = dilation(mask3d) else: mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size, gauss_standard_dev) mask3d.write_image(mask_file_name) elif options.postprocess: from utilities import get_im from fundamentals import rot_avg_table from morphology import compute_bfactor,power from statistics import fsc from filter import filt_table, filt_gaussinv from EMAN2 import periodogram e1 = get_im(args[0],0) if e1.get_zsize()==1: nimage = EMUtil.get_image_count(args[0]) if options.mask !=None: m = get_im(options.mask) else: m = None for i in xrange(nimage): e1 = get_im(args[0],i) if m: e1 *=m guinerline = rot_avg_table(power(periodogram(e1),.5)) freq_max = 1/(2.*pixel_size) freq_min = 1./options.B_start b,junk=compute_bfactor(guinerline, freq_min, freq_max, pixel_size) tmp = b/pixel_size**2 sigma_of_inverse=sqrt(2./tmp) e1 = filt_gaussinv(e1,sigma_of_inverse) if options.low_pass_filter: from filter import filt_tanl e1 =filt_tanl(e1,options.ff, options.aa) e1.write_image(options.output) else: nargs = len(args) e1 = get_im(args[0]) if nargs >1: e2 = get_im(args[1]) if options.mask !=None: m = get_im(options.mask) else: m =None pixel_size = options.pixel_size from math import sqrt if m !=None: e1 *=m if nargs >1 :e2 *=m if options.fsc_weighted: frc = fsc(e1,e2,1) ## FSC is done on masked two images #### FSC weighting sqrt((2.*fsc)/(1+fsc)); fil = len(frc[1])*[None] for i in xrange(len(fil)): if frc[1][i]>=options.FSC_cutoff: tmp = frc[1][i] else: tmp = 0.0 fil[i] = sqrt(2.*tmp/(1.+tmp)) if nargs>1: e1 +=e2 if options.fsc_weighted: e1=filt_table(e1,fil) guinerline = rot_avg_table(power(periodogram(e1),.5)) freq_max = 1/(2.*pixel_size) freq_min = 1./options.B_start b,junk = compute_bfactor(guinerline, freq_min, freq_max, pixel_size) tmp = b/pixel_size**2 sigma_of_inverse=sqrt(2./tmp) e1 = filt_gaussinv(e1,sigma_of_inverse) if options.low_pass_filter: from filter import filt_tanl e1 =filt_tanl(e1,options.ff, options.aa) e1.write_image(options.output) elif options.window_stack: nargs = len(args) if nargs ==0: print " Reduce image size of a stack" return else: output_stack_name = None inputstack = args[0] if nargs ==2:output_stack_name = args[1] input_path,input_file_name=os.path.split(inputstack) input_file_name_root,ext=os.path.splitext(input_file_name) if input_file_name_root[0:3]=="bdb":stack_is_bdb= True else: stack_is_bdb= False if output_stack_name is None: if stack_is_bdb: output_stack_name ="bdb:reduced_"+input_file_name_root[4:] else:output_stack_name = "reduced_"+input_file_name_root+".hdf" # Only hdf file is output. nimage = EMUtil.get_image_count(inputstack) from fundamentals import window2d for i in xrange(nimage): image = EMData() image.read_image(inputstack,i) w = window2d(image,options.box,options.box) w.write_image(output_stack_name,i) else: ERROR("Please provide option name","sxprocess.py",1)
def main(): from logger import Logger, BaseLogger_Files arglist = [] i = 0 while( i < len(sys.argv) ): if sys.argv[i]=='-p4pg': i = i+2 elif sys.argv[i]=='-p4wd': i = i+2 else: arglist.append( sys.argv[i] ) i = i+1 progname = os.path.basename(arglist[0]) usage = progname + " stack outdir <mask> --focus=3Dmask --radius=outer_radius --delta=angular_step" +\ "--an=angular_neighborhood --maxit=max_iter --CTF --sym=c1 --function=user_function --independent=indenpendent_runs --number_of_images_per_group=number_of_images_per_group --low_pass_frequency=.25 --seed=random_seed" parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--focus", type ="string", default ='', help="bineary 3D mask for focused clustering ") parser.add_option("--ir", type = "int", default =1, help="inner radius for rotational correlation > 0 (set to 1)") parser.add_option("--radius", type = "int", default =-1, help="particle radius in pixel for rotational correlation <nx-1 (set to the radius of the particle)") parser.add_option("--maxit", type = "int", default =25, help="maximum number of iteration") parser.add_option("--rs", type = "int", default =1, help="step between rings in rotational correlation >0 (set to 1)" ) parser.add_option("--xr", type ="string", default ='1', help="range for translation search in x direction, search is +/-xr ") parser.add_option("--yr", type ="string", default ='-1', help="range for translation search in y direction, search is +/-yr (default = same as xr)") parser.add_option("--ts", type ="string", default ='0.25', help="step size of the translation search in both directions direction, search is -xr, -xr+ts, 0, xr-ts, xr ") parser.add_option("--delta", type ="string", default ='2', help="angular step of reference projections") parser.add_option("--an", type ="string", default ='-1', help="angular neighborhood for local searches") parser.add_option("--center", type ="int", default =0, help="0 - if you do not want the volume to be centered, 1 - center the volume using cog (default=0)") parser.add_option("--nassign", type ="int", default =1, help="number of reassignment iterations performed for each angular step (set to 3) ") parser.add_option("--nrefine", type ="int", default =0, help="number of alignment iterations performed for each angular step (set to 0)") parser.add_option("--CTF", action ="store_true", default =False, help="do CTF correction during clustring") parser.add_option("--stoprnct", type ="float", default =3.0, help="Minimum percentage of assignment change to stop the program") parser.add_option("--sym", type ="string", default ='c1', help="symmetry of the structure ") parser.add_option("--function", type ="string", default ='do_volume_mrk05', help="name of the reference preparation function") parser.add_option("--independent", type ="int", default = 3, help="number of independent run") parser.add_option("--number_of_images_per_group", type ="int", default =1000, help="number of groups") parser.add_option("--low_pass_filter", type ="float", default =-1.0, help="absolute frequency of low-pass filter for 3d sorting on the original image size" ) parser.add_option("--nxinit", type ="int", default =64, help="initial image size for sorting" ) parser.add_option("--unaccounted", action ="store_true", default =False, help="reconstruct the unaccounted images") parser.add_option("--seed", type ="int", default =-1, help="random seed for create initial random assignment for EQ Kmeans") parser.add_option("--smallest_group", type ="int", default =500, help="minimum members for identified group") parser.add_option("--sausage", action ="store_true", default =False, help="way of filter volume") parser.add_option("--chunkdir", type ="string", default ='', help="chunkdir for computing margin of error") parser.add_option("--PWadjustment", type ="string", default ='', help="1-D power spectrum of PDB file used for EM volume power spectrum correction") parser.add_option("--protein_shape", type ="string", default ='g', help="protein shape. It defines protein preferred orientation angles. Currently it has g and f two types ") parser.add_option("--upscale", type ="float", default =0.5, help=" scaling parameter to adjust the power spectrum of EM volumes") parser.add_option("--wn", type ="int", default =0, help="optimal window size for data processing") parser.add_option("--interpolation", type ="string", default ="4nn", help="3-d reconstruction interpolation method, two options trl and 4nn") (options, args) = parser.parse_args(arglist[1:]) if len(args) < 1 or len(args) > 4: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options" else: if len(args)>2: mask_file = args[2] else: mask_file = None orgstack =args[0] masterdir =args[1] global_def.BATCH = True #---initialize MPI related variables from mpi import mpi_init, mpi_comm_size, MPI_COMM_WORLD, mpi_comm_rank,mpi_barrier,mpi_bcast, mpi_bcast, MPI_INT,MPI_CHAR sys.argv = mpi_init(len(sys.argv),sys.argv) nproc = mpi_comm_size(MPI_COMM_WORLD) myid = mpi_comm_rank(MPI_COMM_WORLD) mpi_comm = MPI_COMM_WORLD main_node= 0 # import some utilities from utilities import get_im,bcast_number_to_all,cmdexecute,write_text_file,read_text_file,wrap_mpi_bcast, get_params_proj, write_text_row from applications import recons3d_n_MPI, mref_ali3d_MPI, Kmref_ali3d_MPI from statistics import k_means_match_clusters_asg_new,k_means_stab_bbenum from applications import mref_ali3d_EQ_Kmeans, ali3d_mref_Kmeans_MPI # Create the main log file from logger import Logger,BaseLogger_Files if myid ==main_node: log_main=Logger(BaseLogger_Files()) log_main.prefix = masterdir+"/" else: log_main =None #--- fill input parameters into dictionary named after Constants Constants ={} Constants["stack"] = args[0] Constants["masterdir"] = masterdir Constants["mask3D"] = mask_file Constants["focus3Dmask"] = options.focus Constants["indep_runs"] = options.independent Constants["stoprnct"] = options.stoprnct Constants["number_of_images_per_group"] = options.number_of_images_per_group Constants["CTF"] = options.CTF Constants["maxit"] = options.maxit Constants["ir"] = options.ir Constants["radius"] = options.radius Constants["nassign"] = options.nassign Constants["rs"] = options.rs Constants["xr"] = options.xr Constants["yr"] = options.yr Constants["ts"] = options.ts Constants["delta"] = options.delta Constants["an"] = options.an Constants["sym"] = options.sym Constants["center"] = options.center Constants["nrefine"] = options.nrefine #Constants["fourvar"] = options.fourvar Constants["user_func"] = options.function Constants["low_pass_filter"] = options.low_pass_filter # enforced low_pass_filter #Constants["debug"] = options.debug Constants["main_log_prefix"] = args[1] #Constants["importali3d"] = options.importali3d Constants["myid"] = myid Constants["main_node"] = main_node Constants["nproc"] = nproc Constants["log_main"] = log_main Constants["nxinit"] = options.nxinit Constants["unaccounted"] = options.unaccounted Constants["seed"] = options.seed Constants["smallest_group"] = options.smallest_group Constants["sausage"] = options.sausage Constants["chunkdir"] = options.chunkdir Constants["PWadjustment"] = options.PWadjustment Constants["upscale"] = options.upscale Constants["wn"] = options.wn Constants["3d-interpolation"] = options.interpolation Constants["protein_shape"] = options.protein_shape # ----------------------------------------------------- # # Create and initialize Tracker dictionary with input options Tracker = {} Tracker["constants"] = Constants Tracker["maxit"] = Tracker["constants"]["maxit"] Tracker["radius"] = Tracker["constants"]["radius"] #Tracker["xr"] = "" #Tracker["yr"] = "-1" # Do not change! #Tracker["ts"] = 1 #Tracker["an"] = "-1" #Tracker["delta"] = "2.0" #Tracker["zoom"] = True #Tracker["nsoft"] = 0 #Tracker["local"] = False #Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] Tracker["upscale"] = Tracker["constants"]["upscale"] #Tracker["upscale"] = 0.5 Tracker["applyctf"] = False # Should the data be premultiplied by the CTF. Set to False for local continuous. #Tracker["refvol"] = None Tracker["nxinit"] = Tracker["constants"]["nxinit"] #Tracker["nxstep"] = 32 Tracker["icurrentres"] = -1 #Tracker["ireachedres"] = -1 #Tracker["lowpass"] = 0.4 #Tracker["falloff"] = 0.2 #Tracker["inires"] = options.inires # Now in A, convert to absolute before using Tracker["fuse_freq"] = 50 # Now in A, convert to absolute before using #Tracker["delpreviousmax"] = False #Tracker["anger"] = -1.0 #Tracker["shifter"] = -1.0 #Tracker["saturatecrit"] = 0.95 #Tracker["pixercutoff"] = 2.0 #Tracker["directory"] = "" #Tracker["previousoutputdir"] = "" #Tracker["eliminated-outliers"] = False #Tracker["mainiteration"] = 0 #Tracker["movedback"] = False #Tracker["state"] = Tracker["constants"]["states"][0] #Tracker["global_resolution"] =0.0 Tracker["orgstack"] = orgstack #-------------------------------------------------------------------- # import from utilities from utilities import sample_down_1D_curve,get_initial_ID,remove_small_groups,print_upper_triangular_matrix,print_a_line_with_timestamp from utilities import print_dict,get_resolution_mrk01,partition_to_groups,partition_independent_runs,get_outliers from utilities import merge_groups, save_alist, margin_of_error, get_margin_of_error, do_two_way_comparison, select_two_runs, get_ali3d_params from utilities import counting_projections, unload_dict, load_dict, get_stat_proj, create_random_list, get_number_of_groups, recons_mref from utilities import apply_low_pass_filter, get_groups_from_partition, get_number_of_groups, get_complementary_elements_total, update_full_dict from utilities import count_chunk_members, set_filter_parameters_from_adjusted_fsc, adjust_fsc_down, get_two_chunks_from_stack ####------------------------------------------------------------------ # # Get the pixel size; if none, set to 1.0, and the original image size from utilities import get_shrink_data_huang if(myid == main_node): line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" print(line+"Initialization of 3-D sorting") a = get_im(orgstack) nnxo = a.get_xsize() if( Tracker["nxinit"] > nnxo ): ERROR("Image size less than minimum permitted $d"%Tracker["nxinit"],"sxsort3d.py",1) nnxo = -1 else: if Tracker["constants"]["CTF"]: i = a.get_attr('ctf') pixel_size = i.apix fq = pixel_size/Tracker["fuse_freq"] else: pixel_size = 1.0 # No pixel size, fusing computed as 5 Fourier pixels fq = 5.0/nnxo del a else: nnxo = 0 fq = 0.0 pixel_size = 1.0 nnxo = bcast_number_to_all(nnxo, source_node = main_node) if( nnxo < 0 ): mpi_finalize() exit() pixel_size = bcast_number_to_all(pixel_size, source_node = main_node) fq = bcast_number_to_all(fq, source_node = main_node) if Tracker["constants"]["wn"]==0: Tracker["constants"]["nnxo"] = nnxo else: Tracker["constants"]["nnxo"] = Tracker["constants"]["wn"] nnxo = Tracker["constants"]["nnxo"] Tracker["constants"]["pixel_size"] = pixel_size Tracker["fuse_freq"] = fq del fq, nnxo, pixel_size if(Tracker["constants"]["radius"] < 1): Tracker["constants"]["radius"] = Tracker["constants"]["nnxo"]//2-2 elif((2*Tracker["constants"]["radius"] +2) > Tracker["constants"]["nnxo"]): ERROR("Particle radius set too large!","sxsort3d.py",1,myid) ####----------------------------------------------------------------------------------------- # Master directory if myid == main_node: if masterdir =="": timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime()) masterdir ="master_sort3d"+timestring li =len(masterdir) cmd="{} {}".format("mkdir", masterdir) os.system(cmd) else: li=0 li = mpi_bcast(li,1,MPI_INT,main_node,MPI_COMM_WORLD)[0] if li>0: masterdir = mpi_bcast(masterdir,li,MPI_CHAR,main_node,MPI_COMM_WORLD) import string masterdir = string.join(masterdir,"") if myid ==main_node: print_dict(Tracker["constants"],"Permanent settings of 3-D sorting program") ######### create a vstack from input stack to the local stack in masterdir # stack name set to default Tracker["constants"]["stack"] = "bdb:"+masterdir+"/rdata" Tracker["constants"]["ali3d"] = os.path.join(masterdir, "ali3d_init.txt") Tracker["constants"]["ctf_params"] = os.path.join(masterdir, "ctf_params.txt") Tracker["constants"]["partstack"] = Tracker["constants"]["ali3d"] # also serves for refinement if myid == main_node: total_stack = EMUtil.get_image_count(Tracker["orgstack"]) else: total_stack = 0 total_stack = bcast_number_to_all(total_stack, source_node = main_node) mpi_barrier(MPI_COMM_WORLD) from time import sleep while not os.path.exists(masterdir): print "Node ",myid," waiting..." sleep(5) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("Sphire sort3d ") log_main.add("the sort3d master directory is "+masterdir) ##### ###---------------------------------------------------------------------------------- # Initial data analysis and handle two chunk files from random import shuffle # Compute the resolution #### make chunkdir dictionary for computing margin of error import user_functions user_func = user_functions.factory[Tracker["constants"]["user_func"]] chunk_dict = {} chunk_list = [] if myid == main_node: chunk_one = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk0.txt")) chunk_two = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk1.txt")) else: chunk_one = 0 chunk_two = 0 chunk_one = wrap_mpi_bcast(chunk_one, main_node) chunk_two = wrap_mpi_bcast(chunk_two, main_node) mpi_barrier(MPI_COMM_WORLD) ######################## Read/write bdb: data on main node ############################ if myid==main_node: if(orgstack[:4] == "bdb:"): cmd = "{} {} {}".format("e2bdb.py", orgstack,"--makevstack="+Tracker["constants"]["stack"]) else: cmd = "{} {} {}".format("sxcpy.py", orgstack, Tracker["constants"]["stack"]) cmdexecute(cmd) cmd = "{} {} {}".format("sxheader.py --params=xform.projection", "--export="+Tracker["constants"]["ali3d"],orgstack) cmdexecute(cmd) cmd = "{} {} {}".format("sxheader.py --params=ctf", "--export="+Tracker["constants"]["ctf_params"],orgstack) cmdexecute(cmd) mpi_barrier(MPI_COMM_WORLD) ########----------------------------------------------------------------------------- Tracker["total_stack"] = total_stack Tracker["constants"]["total_stack"] = total_stack Tracker["shrinkage"] = float(Tracker["nxinit"])/Tracker["constants"]["nnxo"] Tracker["radius"] = Tracker["constants"]["radius"]*Tracker["shrinkage"] if Tracker["constants"]["mask3D"]: Tracker["mask3D"] = os.path.join(masterdir,"smask.hdf") else: Tracker["mask3D"] = None if Tracker["constants"]["focus3Dmask"]: Tracker["focus3D"] = os.path.join(masterdir,"sfocus.hdf") else: Tracker["focus3D"] = None if myid == main_node: if Tracker["constants"]["mask3D"]: mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["mask3D"]) mask_3D.write_image(Tracker["mask3D"]) if Tracker["constants"]["focus3Dmask"]: mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["focus3Dmask"]) st = Util.infomask(mask_3D, None, True) if( st[0] == 0.0 ): ERROR("sxrsort3d","incorrect focused mask, after binarize all values zero",1) mask_3D.write_image(Tracker["focus3D"]) del mask_3D if Tracker["constants"]["PWadjustment"] !='': PW_dict = {} nxinit_pwsp = sample_down_1D_curve(Tracker["constants"]["nxinit"],Tracker["constants"]["nnxo"],Tracker["constants"]["PWadjustment"]) Tracker["nxinit_PW"] = os.path.join(masterdir,"spwp.txt") if myid == main_node: write_text_file(nxinit_pwsp,Tracker["nxinit_PW"]) PW_dict[Tracker["constants"]["nnxo"]] = Tracker["constants"]["PWadjustment"] PW_dict[Tracker["constants"]["nxinit"]] = Tracker["nxinit_PW"] Tracker["PW_dict"] = PW_dict mpi_barrier(MPI_COMM_WORLD) #-----------------------From two chunks to FSC, and low pass filter-----------------------------------------### for element in chunk_one: chunk_dict[element] = 0 for element in chunk_two: chunk_dict[element] = 1 chunk_list =[chunk_one, chunk_two] Tracker["chunk_dict"] = chunk_dict Tracker["P_chunk0"] = len(chunk_one)/float(total_stack) Tracker["P_chunk1"] = len(chunk_two)/float(total_stack) ### create two volumes to estimate resolution if myid == main_node: for index in xrange(2): write_text_file(chunk_list[index],os.path.join(masterdir,"chunk%01d.txt"%index)) mpi_barrier(MPI_COMM_WORLD) vols = [] for index in xrange(2): data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"], os.path.join(masterdir,"chunk%01d.txt"%index), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift=True) vol = recons3d_4nn_ctf_MPI(myid=myid, prjlist=data,symmetry=Tracker["constants"]["sym"], finfo=None) if myid == main_node: vol.write_image(os.path.join(masterdir, "vol%d.hdf"%index)) vols.append(vol) mpi_barrier(MPI_COMM_WORLD) if myid ==main_node: low_pass, falloff,currentres = get_resolution_mrk01(vols,Tracker["constants"]["radius"],Tracker["constants"]["nxinit"],masterdir,Tracker["mask3D"]) if low_pass >Tracker["constants"]["low_pass_filter"]: low_pass= Tracker["constants"]["low_pass_filter"] else: low_pass =0.0 falloff =0.0 currentres =0.0 bcast_number_to_all(currentres,source_node = main_node) bcast_number_to_all(low_pass,source_node = main_node) bcast_number_to_all(falloff,source_node = main_node) Tracker["currentres"] = currentres Tracker["falloff"] = falloff if Tracker["constants"]["low_pass_filter"] ==-1.0: Tracker["low_pass_filter"] = min(.45,low_pass/Tracker["shrinkage"]) # no better than .45 else: Tracker["low_pass_filter"] = min(.45,Tracker["constants"]["low_pass_filter"]/Tracker["shrinkage"]) Tracker["lowpass"] = Tracker["low_pass_filter"] Tracker["falloff"] =.1 Tracker["global_fsc"] = os.path.join(masterdir, "fsc.txt") ############################################################################################ if myid == main_node: log_main.add("The command-line inputs are as following:") log_main.add("**********************************************************") for a in sys.argv: if myid == main_node:log_main.add(a) if myid == main_node: log_main.add("number of cpus used in this run is %d"%Tracker["constants"]["nproc"]) log_main.add("**********************************************************") from filter import filt_tanl ### START 3-D sorting if myid ==main_node: log_main.add("----------3-D sorting program------- ") log_main.add("current resolution %6.3f for images of original size in terms of absolute frequency"%Tracker["currentres"]) log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["currentres"]/Tracker["shrinkage"])) log_main.add("the user provided enforced low_pass_filter is %f"%Tracker["constants"]["low_pass_filter"]) #log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["constants"]["low_pass_filter"])) for index in xrange(2): filt_tanl(get_im(os.path.join(masterdir,"vol%01d.hdf"%index)), Tracker["low_pass_filter"],Tracker["falloff"]).write_image(os.path.join(masterdir, "volf%01d.hdf"%index)) mpi_barrier(MPI_COMM_WORLD) from utilities import get_input_from_string delta = get_input_from_string(Tracker["constants"]["delta"]) delta = delta[0] from utilities import even_angles n_angles = even_angles(delta, 0, 180) this_ali3d = Tracker["constants"]["ali3d"] sampled = get_stat_proj(Tracker,delta,this_ali3d) if myid ==main_node: nc = 0 for a in sampled: if len(sampled[a])>0: nc += 1 log_main.add("total sampled direction %10d at angle step %6.3f"%(len(n_angles), delta)) log_main.add("captured sampled directions %10d percentage covered by data %6.3f"%(nc,float(nc)/len(n_angles)*100)) number_of_images_per_group = Tracker["constants"]["number_of_images_per_group"] if myid ==main_node: log_main.add("user provided number_of_images_per_group %d"%number_of_images_per_group) Tracker["number_of_images_per_group"] = number_of_images_per_group number_of_groups = get_number_of_groups(total_stack,number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups generation =0 partition_dict ={} full_dict ={} workdir =os.path.join(masterdir,"generation%03d"%generation) Tracker["this_dir"] = workdir if myid ==main_node: log_main.add("---- generation %5d"%generation) log_main.add("number of images per group is set as %d"%number_of_images_per_group) log_main.add("the initial number of groups is %10d "%number_of_groups) cmd="{} {}".format("mkdir",workdir) os.system(cmd) mpi_barrier(MPI_COMM_WORLD) list_to_be_processed = range(Tracker["constants"]["total_stack"]) Tracker["this_data_list"] = list_to_be_processed create_random_list(Tracker) ################################# full_dict ={} for iptl in xrange(Tracker["constants"]["total_stack"]): full_dict[iptl] = iptl Tracker["full_ID_dict"] = full_dict ################################# for indep_run in xrange(Tracker["constants"]["indep_runs"]): Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run] ref_vol = recons_mref(Tracker) if myid == main_node: log_main.add("independent run %10d"%indep_run) mpi_barrier(MPI_COMM_WORLD) Tracker["this_data_list"] = list_to_be_processed Tracker["total_stack"] = len(Tracker["this_data_list"]) Tracker["this_particle_text_file"] = os.path.join(workdir,"independent_list_%03d.txt"%indep_run) # for get_shrink_data if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_particle_text_file"]) mpi_barrier(MPI_COMM_WORLD) outdir = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run) ref_vol = apply_low_pass_filter(ref_vol,Tracker) mref_ali3d_EQ_Kmeans(ref_vol, outdir, Tracker["this_particle_text_file"], Tracker) partition_dict[indep_run]=Tracker["this_partition"] Tracker["partition_dict"] = partition_dict Tracker["total_stack"] = len(Tracker["this_data_list"]) Tracker["this_total_stack"] = Tracker["total_stack"] ############################### do_two_way_comparison(Tracker) ############################### ref_vol_list = [] from time import sleep number_of_ref_class = [] for igrp in xrange(len(Tracker["two_way_stable_member"])): Tracker["this_data_list"] = Tracker["two_way_stable_member"][igrp] Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp) if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"]) data,old_shifts = get_shrink_data_huang(Tracker,Tracker["nxinit"], Tracker["this_data_list_file"], Tracker["constants"]["partstack"], myid, main_node, nproc, preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo = None) ref_vol_list.append(volref) number_of_ref_class.append(len(Tracker["this_data_list"])) if myid == main_node: log_main.add("group %d members %d "%(igrp,len(Tracker["this_data_list"]))) Tracker["number_of_ref_class"] = number_of_ref_class nx_of_image = ref_vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] # no PW adjustment if myid == main_node: for iref in xrange(len(ref_vol_list)): refdata = [None]*4 refdata[0] = ref_vol_list[iref] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir,"volf_stable.hdf"),iref) mpi_barrier(MPI_COMM_WORLD) Tracker["this_data_list"] = Tracker["this_accounted_list"] outdir = os.path.join(workdir,"Kmref") empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker) Tracker["this_unaccounted_list"] = get_complementary_elements(list_to_be_processed,final_list) if myid == main_node: log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"])) write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"]) update_full_dict(Tracker["this_unaccounted_list"], Tracker) ####################################### number_of_groups = len(res_groups) vol_list = [] number_of_ref_class = [] for igrp in xrange(number_of_groups): data,old_shifts = get_shrink_data_huang(Tracker, Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo=None) vol_list.append(volref) if( myid == main_node ): npergroup = len(read_text_file(os.path.join(outdir,"Class%d.txt"%igrp))) else: npergroup = 0 npergroup = bcast_number_to_all(npergroup, main_node ) number_of_ref_class.append(npergroup) Tracker["number_of_ref_class"] = number_of_ref_class mpi_barrier(MPI_COMM_WORLD) nx_of_image = vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"] if myid == main_node: for ivol in xrange(len(vol_list)): refdata =[None]*4 refdata[0] = vol_list[ivol] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir,"volf_of_Classes.hdf"),ivol) log_main.add("number of unaccounted particles %10d"%len(Tracker["this_unaccounted_list"])) log_main.add("number of accounted particles %10d"%len(Tracker["this_accounted_list"])) Tracker["this_data_list"] = Tracker["this_unaccounted_list"] # reset parameters for the next round calculation Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) Tracker["this_total_stack"] = Tracker["total_stack"] number_of_groups = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups while number_of_groups >= 2 : generation +=1 partition_dict ={} workdir =os.path.join(masterdir,"generation%03d"%generation) Tracker["this_dir"] = workdir if myid ==main_node: log_main.add("*********************************************") log_main.add("----- generation %5d "%generation) log_main.add("number of images per group is set as %10d "%number_of_images_per_group) log_main.add("the number of groups is %10d "%number_of_groups) log_main.add(" number of particles for clustering is %10d"%Tracker["total_stack"]) cmd ="{} {}".format("mkdir",workdir) os.system(cmd) mpi_barrier(MPI_COMM_WORLD) create_random_list(Tracker) for indep_run in xrange(Tracker["constants"]["indep_runs"]): Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run] ref_vol = recons_mref(Tracker) if myid == main_node: log_main.add("independent run %10d"%indep_run) outdir = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run) Tracker["this_data_list"] = Tracker["this_unaccounted_list"] #ref_vol=apply_low_pass_filter(ref_vol,Tracker) mref_ali3d_EQ_Kmeans(ref_vol,outdir,Tracker["this_unaccounted_text"],Tracker) partition_dict[indep_run] = Tracker["this_partition"] Tracker["this_data_list"] = Tracker["this_unaccounted_list"] Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) Tracker["partition_dict"] = partition_dict Tracker["this_total_stack"] = Tracker["total_stack"] total_list_of_this_run = Tracker["this_unaccounted_list"] ############################### do_two_way_comparison(Tracker) ############################### ref_vol_list = [] number_of_ref_class = [] for igrp in xrange(len(Tracker["two_way_stable_member"])): Tracker["this_data_list"] = Tracker["two_way_stable_member"][igrp] Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp) if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"]) mpi_barrier(MPI_COMM_WORLD) data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"],Tracker["this_data_list_file"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) #volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1) if myid == main_node:volref.write_image(os.path.join(workdir,"vol_stable.hdf"),iref) #volref = resample(volref,Tracker["shrinkage"]) ref_vol_list.append(volref) number_of_ref_class.append(len(Tracker["this_data_list"])) mpi_barrier(MPI_COMM_WORLD) Tracker["number_of_ref_class"] = number_of_ref_class Tracker["this_data_list"] = Tracker["this_accounted_list"] outdir = os.path.join(workdir,"Kmref") empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker) # calculate the 3-D structure of original image size for each group number_of_groups = len(res_groups) Tracker["this_unaccounted_list"] = get_complementary_elements(total_list_of_this_run,final_list) if myid == main_node: log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"])) write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"]) mpi_barrier(MPI_COMM_WORLD) update_full_dict(Tracker["this_unaccounted_list"],Tracker) vol_list = [] for igrp in xrange(number_of_groups): data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"], myid, main_node, nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) vol_list.append(volref) mpi_barrier(MPI_COMM_WORLD) nx_of_image=ref_vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] if myid == main_node: for ivol in xrange(len(vol_list)): refdata = [None]*4 refdata[0] = vol_list[ivol] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir, "volf_of_Classes.hdf"),ivol) log_main.add("number of unaccounted particles %10d"%len(Tracker["this_unaccounted_list"])) log_main.add("number of accounted particles %10d"%len(Tracker["this_accounted_list"])) del vol_list mpi_barrier(MPI_COMM_WORLD) number_of_groups = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups Tracker["this_data_list"] = Tracker["this_unaccounted_list"] Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) if Tracker["constants"]["unaccounted"]: data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"],Tracker["this_unaccounted_text"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) nx_of_image = volref.get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"] if( myid == main_node ): refdata = [None]*4 refdata[0] = volref refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) #volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1) volref.write_image(os.path.join(workdir,"volf_unaccounted.hdf")) # Finish program if myid ==main_node: log_main.add("sxsort3d finishes") mpi_barrier(MPI_COMM_WORLD) from mpi import mpi_finalize mpi_finalize() exit()
def do_volume_mrk02(ref_data): """ data - projections (scattered between cpus) or the volume. If volume, just do the volume processing options - the same for all cpus return - volume the same for all cpus """ from EMAN2 import Util from mpi import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD from filter import filt_table from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import bcast_EMData_to_all, bcast_number_to_all, model_blank from fundamentals import rops_table, fftip, fft import types # Retrieve the function specific input arguments from ref_data data = ref_data[0] Tracker = ref_data[1] iter = ref_data[2] mpi_comm = ref_data[3] # # For DEBUG # print "Type of data %s" % (type(data)) # print "Type of Tracker %s" % (type(Tracker)) # print "Type of iter %s" % (type(iter)) # print "Type of mpi_comm %s" % (type(mpi_comm)) if(mpi_comm == None): mpi_comm = MPI_COMM_WORLD myid = mpi_comm_rank(mpi_comm) nproc = mpi_comm_size(mpi_comm) try: local_filter = Tracker["local_filter"] except: local_filter = False #========================================================================= # volume reconstruction if( type(data) == types.ListType ): if Tracker["constants"]["CTF"]: vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"]) else: vol = recons3d_4nn_MPI (myid, data,\ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm) else: vol = data if myid == 0: from morphology import threshold from filter import filt_tanl, filt_btwl from utilities import model_circle, get_im import types nx = vol.get_xsize() if(Tracker["constants"]["mask3D"] == None): mask3D = model_circle(int(Tracker["constants"]["radius"]*float(nx)/float(Tracker["constants"]["nnxo"])+0.5), nx, nx, nx) elif(Tracker["constants"]["mask3D"] == "auto"): from utilities import adaptive_mask mask3D = adaptive_mask(vol) else: if( type(Tracker["constants"]["mask3D"]) == types.StringType ): mask3D = get_im(Tracker["constants"]["mask3D"]) else: mask3D = (Tracker["constants"]["mask3D"]).copy() nxm = mask3D.get_xsize() if( nx != nxm): from fundamentals import rot_shift3D mask3D = Util.window(rot_shift3D(mask3D,scale=float(nx)/float(nxm)),nx,nx,nx) nxm = mask3D.get_xsize() assert(nx == nxm) stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) Util.mul_img(vol, mask3D) if( Tracker["PWadjustment"] ): from utilities import read_text_file, write_text_file rt = read_text_file( Tracker["PWadjustment"] ) fftip(vol) ro = rops_table(vol) # Here unless I am mistaken it is enough to take the beginning of the reference pw. for i in xrange(1,len(ro)): ro[i] = (rt[i]/ro[i])**Tracker["upscale"] #write_text_file(rops_table(filt_table( vol, ro),1),"foo.txt") if Tracker["constants"]["sausage"]: ny = vol.get_ysize() y = float(ny) from math import exp for i in xrange(len(ro)): ro[i] *= \ (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2)) if local_filter: # skip low-pass filtration vol = fft( filt_table( vol, ro) ) else: if( type(Tracker["lowpass"]) == types.ListType ): vol = fft( filt_table( filt_table(vol, Tracker["lowpass"]), ro) ) else: vol = fft( filt_table( filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]), ro) ) del ro else: if Tracker["constants"]["sausage"]: ny = vol.get_ysize() y = float(ny) ro = [0.0]*(ny//2+2) from math import exp for i in xrange(len(ro)): ro[i] = \ (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2)) fftip(vol) filt_table(vol, ro) del ro if not local_filter: if( type(Tracker["lowpass"]) == types.ListType ): vol = filt_table(vol, Tracker["lowpass"]) else: vol = filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]) if Tracker["constants"]["sausage"]: vol = fft(vol) if local_filter: from morphology import binarize if(myid == 0): nx = mask3D.get_xsize() else: nx = 0 nx = bcast_number_to_all(nx, source_node = 0) # only main processor needs the two input volumes if(myid == 0): mask = binarize(mask3D, 0.5) locres = get_im(Tracker["local_filter"]) lx = locres.get_xsize() if(lx != nx): if(lx < nx): from fundamentals import fdecimate, rot_shift3D mask = Util.window(rot_shift3D(mask,scale=float(lx)/float(nx)),lx,lx,lx) vol = fdecimate(vol, lx,lx,lx) else: ERROR("local filter cannot be larger than input volume","user function",1) stat = Util.infomask(vol, mask, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) else: lx = 0 locres = model_blank(1,1,1) vol = model_blank(1,1,1) lx = bcast_number_to_all(lx, source_node = 0) if( myid != 0 ): mask = model_blank(lx,lx,lx) bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm) from filter import filterlocal vol = filterlocal( locres, vol, mask, Tracker["falloff"], myid, 0, nproc) if myid == 0: if(lx < nx): from fundamentals import fpol vol = fpol(vol, nx,nx,nx) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5)# This will have to be corrected. Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) else: vol = model_blank(nx,nx,nx) else: if myid == 0: #from utilities import write_text_file #write_text_file(rops_table(vol,1),"goo.txt") stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5)# This will have to be corrected. Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) # broadcast volume bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm) #========================================================================= return vol
def main(): from optparse import OptionParser from global_def import SPARXVERSION from EMAN2 import EMData from logger import Logger, BaseLogger_Files import sys, os, time global Tracker, Blockdata from global_def import ERROR progname = os.path.basename(sys.argv[0]) usage = progname + " --output_dir=output_dir --isac_dir=output_dir_of_isac " parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--pw_adjustment", type ="string", default ='analytical_model', \ help="adjust power spectrum of 2-D averages to an analytic model. Other opions: no_adjustment; bfactor; a text file of 1D rotationally averaged PW") #### Four options for --pw_adjustment: # 1> analytical_model(default); # 2> no_adjustment; # 3> bfactor; # 4> adjust_to_given_pw2(user has to provide a text file that contains 1D rotationally averaged PW) # options in common parser.add_option( "--isac_dir", type="string", default='', help="ISAC run output directory, input directory for this command") parser.add_option( "--output_dir", type="string", default='', help="output directory where computed averages are saved") parser.add_option( "--pixel_size", type="float", default=-1.0, help= "pixel_size of raw images. one can put 1.0 in case of negative stain data" ) parser.add_option( "--fl", type="float", default=-1.0, help= "low pass filter, = -1.0, not applied; =0.0, using FH1 (initial resolution), = 1.0 using FH2 (resolution after local alignment), or user provided value in absolute freqency [0.0:0.5]" ) parser.add_option("--stack", type="string", default="", help="data stack used in ISAC") parser.add_option("--radius", type="int", default=-1, help="radius") parser.add_option("--xr", type="float", default=-1.0, help="local alignment search range") #parser.add_option("--ts", type ="float", default =1.0, help= "local alignment search step") parser.add_option("--fh", type="float", default=-1.0, help="local alignment high frequencies limit") #parser.add_option("--maxit", type ="int", default =5, help= "local alignment iterations") parser.add_option("--navg", type="int", default=1000000, help="number of aveages") parser.add_option("--local_alignment", action="store_true", default=False, help="do local alignment") parser.add_option( "--noctf", action="store_true", default=False, help= "no ctf correction, useful for negative stained data. always ctf for cryo data" ) parser.add_option( "--B_start", type="float", default=45.0, help= "start frequency (Angstrom) of power spectrum for B_factor estimation") parser.add_option( "--Bfactor", type="float", default=-1.0, help= "User defined bactors (e.g. 25.0[A^2]). By default, the program automatically estimates B-factor. " ) (options, args) = parser.parse_args(sys.argv[1:]) adjust_to_analytic_model = False adjust_to_given_pw2 = False B_enhance = False no_adjustment = False if options.pw_adjustment == 'analytical_model': adjust_to_analytic_model = True elif options.pw_adjustment == 'no_adjustment': no_adjustment = True elif options.pw_adjustment == 'bfactor': B_enhance = True else: adjust_to_given_pw2 = True from utilities import get_im, bcast_number_to_all, write_text_file, read_text_file, wrap_mpi_bcast, write_text_row from utilities import cmdexecute from filter import filt_tanl from logger import Logger, BaseLogger_Files import user_functions import string from string import split, atoi, atof import json mpi_init(0, []) nproc = mpi_comm_size(MPI_COMM_WORLD) myid = mpi_comm_rank(MPI_COMM_WORLD) Blockdata = {} # MPI stuff Blockdata["nproc"] = nproc Blockdata["myid"] = myid Blockdata["main_node"] = 0 Blockdata["shared_comm"] = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL) Blockdata["myid_on_node"] = mpi_comm_rank(Blockdata["shared_comm"]) Blockdata["no_of_processes_per_group"] = mpi_comm_size( Blockdata["shared_comm"]) masters_from_groups_vs_everything_else_comm = mpi_comm_split( MPI_COMM_WORLD, Blockdata["main_node"] == Blockdata["myid_on_node"], Blockdata["myid_on_node"]) Blockdata["color"], Blockdata["no_of_groups"], balanced_processor_load_on_nodes = get_colors_and_subsets(Blockdata["main_node"], MPI_COMM_WORLD, Blockdata["myid"], \ Blockdata["shared_comm"], Blockdata["myid_on_node"], masters_from_groups_vs_everything_else_comm) # We need two nodes for processing of volumes Blockdata["node_volume"] = [ Blockdata["no_of_groups"] - 3, Blockdata["no_of_groups"] - 2, Blockdata["no_of_groups"] - 1 ] # For 3D stuff take three last nodes # We need two CPUs for processing of volumes, they are taken to be main CPUs on each volume # We have to send the two myids to all nodes so we can identify main nodes on two selected groups. Blockdata["nodes"] = [Blockdata["node_volume"][0]*Blockdata["no_of_processes_per_group"],Blockdata["node_volume"][1]*Blockdata["no_of_processes_per_group"], \ Blockdata["node_volume"][2]*Blockdata["no_of_processes_per_group"]] # End of Blockdata: sorting requires at least three nodes, and the used number of nodes be integer times of three global_def.BATCH = True global_def.MPI = True if adjust_to_given_pw2: checking_flag = 0 if (Blockdata["myid"] == Blockdata["main_node"]): if not os.path.exists(options.pw_adjustment): checking_flag = 1 checking_flag = bcast_number_to_all(checking_flag, Blockdata["main_node"], MPI_COMM_WORLD) if checking_flag == 1: ERROR("User provided power spectrum does not exist", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) Tracker = {} Constants = {} Constants["isac_dir"] = options.isac_dir Constants["masterdir"] = options.output_dir Constants["pixel_size"] = options.pixel_size Constants["orgstack"] = options.stack Constants["radius"] = options.radius Constants["xrange"] = options.xr Constants["FH"] = options.fh Constants["low_pass_filter"] = options.fl #Constants["maxit"] = options.maxit Constants["navg"] = options.navg Constants["B_start"] = options.B_start Constants["Bfactor"] = options.Bfactor if adjust_to_given_pw2: Constants["modelpw"] = options.pw_adjustment Tracker["constants"] = Constants # ------------------------------------------------------------- # # Create and initialize Tracker dictionary with input options # State Variables #<<<---------------------->>>imported functions<<<--------------------------------------------- #x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"])+1) #y_range = x_range ####----------------------------------------------------------- # Create Master directory and associated subdirectories line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" if Tracker["constants"]["masterdir"] == Tracker["constants"]["isac_dir"]: masterdir = os.path.join(Tracker["constants"]["isac_dir"], "sharpen") else: masterdir = Tracker["constants"]["masterdir"] if (Blockdata["myid"] == Blockdata["main_node"]): msg = "Postprocessing ISAC 2D averages starts" print(line, "Postprocessing ISAC 2D averages starts") if not masterdir: timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime()) masterdir = "sharpen_" + Tracker["constants"]["isac_dir"] os.mkdir(masterdir) else: if os.path.exists(masterdir): print("%s already exists" % masterdir) else: os.mkdir(masterdir) subdir_path = os.path.join(masterdir, "ali2d_local_params_avg") if not os.path.exists(subdir_path): os.mkdir(subdir_path) subdir_path = os.path.join(masterdir, "params_avg") if not os.path.exists(subdir_path): os.mkdir(subdir_path) li = len(masterdir) else: li = 0 li = mpi_bcast(li, 1, MPI_INT, Blockdata["main_node"], MPI_COMM_WORLD)[0] masterdir = mpi_bcast(masterdir, li, MPI_CHAR, Blockdata["main_node"], MPI_COMM_WORLD) masterdir = string.join(masterdir, "") Tracker["constants"]["masterdir"] = masterdir log_main = Logger(BaseLogger_Files()) log_main.prefix = Tracker["constants"]["masterdir"] + "/" while not os.path.exists(Tracker["constants"]["masterdir"]): print("Node ", Blockdata["myid"], " waiting...", Tracker["constants"]["masterdir"]) sleep(1) mpi_barrier(MPI_COMM_WORLD) if (Blockdata["myid"] == Blockdata["main_node"]): init_dict = {} print(Tracker["constants"]["isac_dir"]) Tracker["directory"] = os.path.join(Tracker["constants"]["isac_dir"], "2dalignment") core = read_text_row( os.path.join(Tracker["directory"], "initial2Dparams.txt")) for im in range(len(core)): init_dict[im] = core[im] del core else: init_dict = 0 init_dict = wrap_mpi_bcast(init_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) ### do_ctf = True if options.noctf: do_ctf = False if (Blockdata["myid"] == Blockdata["main_node"]): if do_ctf: print("CTF correction is on") else: print("CTF correction is off") if options.local_alignment: print("local refinement is on") else: print("local refinement is off") if B_enhance: print("Bfactor is to be applied on averages") elif adjust_to_given_pw2: print("PW of averages is adjusted to a given 1D PW curve") elif adjust_to_analytic_model: print("PW of averages is adjusted to analytical model") else: print("PW of averages is not adjusted") #Tracker["constants"]["orgstack"] = "bdb:"+ os.path.join(Tracker["constants"]["isac_dir"],"../","sparx_stack") image = get_im(Tracker["constants"]["orgstack"], 0) Tracker["constants"]["nnxo"] = image.get_xsize() if Tracker["constants"]["pixel_size"] == -1.0: print( "Pixel size value is not provided by user. extracting it from ctf header entry of the original stack." ) try: ctf_params = image.get_attr("ctf") Tracker["constants"]["pixel_size"] = ctf_params.apix except: ERROR( "Pixel size could not be extracted from the original stack.", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) # action=1 - fatal error, exit ## Now fill in low-pass filter isac_shrink_path = os.path.join(Tracker["constants"]["isac_dir"], "README_shrink_ratio.txt") if not os.path.exists(isac_shrink_path): ERROR( "%s does not exist in the specified ISAC run output directory" % (isac_shrink_path), "sxcompute_isac_avg.py", 1, Blockdata["myid"]) # action=1 - fatal error, exit isac_shrink_file = open(isac_shrink_path, "r") isac_shrink_lines = isac_shrink_file.readlines() isac_shrink_ratio = float( isac_shrink_lines[5] ) # 6th line: shrink ratio (= [target particle radius]/[particle radius]) used in the ISAC run isac_radius = float( isac_shrink_lines[6] ) # 7th line: particle radius at original pixel size used in the ISAC run isac_shrink_file.close() print("Extracted parameter values") print("ISAC shrink ratio : {0}".format(isac_shrink_ratio)) print("ISAC particle radius : {0}".format(isac_radius)) Tracker["ini_shrink"] = isac_shrink_ratio else: Tracker["ini_shrink"] = 0.0 Tracker = wrap_mpi_bcast(Tracker, Blockdata["main_node"], communicator=MPI_COMM_WORLD) #print(Tracker["constants"]["pixel_size"], "pixel_size") x_range = max(Tracker["constants"]["xrange"], int(1. / Tracker["ini_shrink"] + 0.99999)) a_range = y_range = x_range if (Blockdata["myid"] == Blockdata["main_node"]): parameters = read_text_row( os.path.join(Tracker["constants"]["isac_dir"], "all_parameters.txt")) else: parameters = 0 parameters = wrap_mpi_bcast(parameters, Blockdata["main_node"], communicator=MPI_COMM_WORLD) params_dict = {} list_dict = {} #parepare params_dict #navg = min(Tracker["constants"]["navg"]*Blockdata["nproc"], EMUtil.get_image_count(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"))) navg = min( Tracker["constants"]["navg"], EMUtil.get_image_count( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"))) global_dict = {} ptl_list = [] memlist = [] if (Blockdata["myid"] == Blockdata["main_node"]): print("Number of averages computed in this run is %d" % navg) for iavg in range(navg): params_of_this_average = [] image = get_im( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"), iavg) members = sorted(image.get_attr("members")) memlist.append(members) for im in range(len(members)): abs_id = members[im] global_dict[abs_id] = [iavg, im] P = combine_params2( init_dict[abs_id][0], init_dict[abs_id][1], init_dict[abs_id][2], init_dict[abs_id][3], \ parameters[abs_id][0], parameters[abs_id][1]/Tracker["ini_shrink"], parameters[abs_id][2]/Tracker["ini_shrink"], parameters[abs_id][3]) if parameters[abs_id][3] == -1: print( "WARNING: Image #{0} is an unaccounted particle with invalid 2D alignment parameters and should not be the member of any classes. Please check the consitency of input dataset." .format(abs_id) ) # How to check what is wrong about mirror = -1 (Toshio 2018/01/11) params_of_this_average.append([P[0], P[1], P[2], P[3], 1.0]) ptl_list.append(abs_id) params_dict[iavg] = params_of_this_average list_dict[iavg] = members write_text_row( params_of_this_average, os.path.join(Tracker["constants"]["masterdir"], "params_avg", "params_avg_%03d.txt" % iavg)) ptl_list.sort() init_params = [None for im in range(len(ptl_list))] for im in range(len(ptl_list)): init_params[im] = [ptl_list[im]] + params_dict[global_dict[ ptl_list[im]][0]][global_dict[ptl_list[im]][1]] write_text_row( init_params, os.path.join(Tracker["constants"]["masterdir"], "init_isac_params.txt")) else: params_dict = 0 list_dict = 0 memlist = 0 params_dict = wrap_mpi_bcast(params_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) list_dict = wrap_mpi_bcast(list_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) memlist = wrap_mpi_bcast(memlist, Blockdata["main_node"], communicator=MPI_COMM_WORLD) # Now computing! del init_dict tag_sharpen_avg = 1000 ## always apply low pass filter to B_enhanced images to suppress noise in high frequencies enforced_to_H1 = False if B_enhance: if Tracker["constants"]["low_pass_filter"] == -1.0: enforced_to_H1 = True if navg < Blockdata["nproc"]: # Each CPU do one average ERROR("number of nproc is larger than number of averages", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) else: FH_list = [[0, 0.0, 0.0] for im in range(navg)] image_start, image_end = MPI_start_end(navg, Blockdata["nproc"], Blockdata["myid"]) if Blockdata["myid"] == Blockdata["main_node"]: cpu_dict = {} for iproc in range(Blockdata["nproc"]): local_image_start, local_image_end = MPI_start_end( navg, Blockdata["nproc"], iproc) for im in range(local_image_start, local_image_end): cpu_dict[im] = iproc else: cpu_dict = 0 cpu_dict = wrap_mpi_bcast(cpu_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) slist = [None for im in range(navg)] ini_list = [None for im in range(navg)] avg1_list = [None for im in range(navg)] avg2_list = [None for im in range(navg)] plist_dict = {} data_list = [None for im in range(navg)] if Blockdata["myid"] == Blockdata["main_node"]: if B_enhance: print( "Avg ID B-factor FH1(Res before ali) FH2(Res after ali)" ) else: print("Avg ID FH1(Res before ali) FH2(Res after ali)") for iavg in range(image_start, image_end): mlist = EMData.read_images(Tracker["constants"]["orgstack"], list_dict[iavg]) for im in range(len(mlist)): #mlist[im]= get_im(Tracker["constants"]["orgstack"], list_dict[iavg][im]) set_params2D(mlist[im], params_dict[iavg][im], xform="xform.align2d") if options.local_alignment: """ new_average1 = within_group_refinement([mlist[kik] for kik in range(0,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou=Tracker["constants"]["radius"], rs=1.0, xrng=[x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH=max(Tracker["constants"]["FH"], FH1), FF=0.02, method="") new_average2 = within_group_refinement([mlist[kik] for kik in range(1,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou= Tracker["constants"]["radius"], rs=1.0, xrng=[ x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH = max(Tracker["constants"]["FH"], FH1), FF=0.02, method="") new_avg, frc, plist = compute_average(mlist, Tracker["constants"]["radius"], do_ctf) """ new_avg, plist, FH2 = refinement_2d_local( mlist, Tracker["constants"]["radius"], a_range, x_range, y_range, CTF=do_ctf, SNR=1.0e10) plist_dict[iavg] = plist FH1 = -1.0 else: new_avg, frc, plist = compute_average( mlist, Tracker["constants"]["radius"], do_ctf) FH1 = get_optimistic_res(frc) FH2 = -1.0 #write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d.txt"%iavg)) FH_list[iavg] = [iavg, FH1, FH2] if B_enhance: new_avg, gb = apply_enhancement( new_avg, Tracker["constants"]["B_start"], Tracker["constants"]["pixel_size"], Tracker["constants"]["Bfactor"]) print(" %6d %6.3f %4.3f %4.3f" % (iavg, gb, FH1, FH2)) elif adjust_to_given_pw2: roo = read_text_file(Tracker["constants"]["modelpw"], -1) roo = roo[0] # always on the first column new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], roo) print(" %6d %4.3f %4.3f " % (iavg, FH1, FH2)) elif adjust_to_analytic_model: new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], None) print(" %6d %4.3f %4.3f " % (iavg, FH1, FH2)) elif no_adjustment: pass if Tracker["constants"]["low_pass_filter"] != -1.0: if Tracker["constants"]["low_pass_filter"] == 0.0: low_pass_filter = FH1 elif Tracker["constants"]["low_pass_filter"] == 1.0: low_pass_filter = FH2 if not options.local_alignment: low_pass_filter = FH1 else: low_pass_filter = Tracker["constants"]["low_pass_filter"] if low_pass_filter >= 0.45: low_pass_filter = 0.45 new_avg = filt_tanl(new_avg, low_pass_filter, 0.02) else: # No low pass filter but if enforced if enforced_to_H1: new_avg = filt_tanl(new_avg, FH1, 0.02) if B_enhance: new_avg = fft(new_avg) new_avg.set_attr("members", list_dict[iavg]) new_avg.set_attr("n_objects", len(list_dict[iavg])) slist[iavg] = new_avg print( strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>", "Refined average %7d" % iavg) ## send to main node to write mpi_barrier(MPI_COMM_WORLD) for im in range(navg): # avg if cpu_dict[im] == Blockdata[ "myid"] and Blockdata["myid"] != Blockdata["main_node"]: send_EMData(slist[im], Blockdata["main_node"], tag_sharpen_avg) elif cpu_dict[im] == Blockdata["myid"] and Blockdata[ "myid"] == Blockdata["main_node"]: slist[im].set_attr("members", memlist[im]) slist[im].set_attr("n_objects", len(memlist[im])) slist[im].write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) elif cpu_dict[im] != Blockdata["myid"] and Blockdata[ "myid"] == Blockdata["main_node"]: new_avg_other_cpu = recv_EMData(cpu_dict[im], tag_sharpen_avg) new_avg_other_cpu.set_attr("members", memlist[im]) new_avg_other_cpu.set_attr("n_objects", len(memlist[im])) new_avg_other_cpu.write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) if options.local_alignment: if cpu_dict[im] == Blockdata["myid"]: write_text_row( plist_dict[im], os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params_avg", "ali2d_local_params_avg_%03d.txt" % im)) if cpu_dict[im] == Blockdata[ "myid"] and cpu_dict[im] != Blockdata["main_node"]: wrap_mpi_send(plist_dict[im], Blockdata["main_node"], MPI_COMM_WORLD) wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD) elif cpu_dict[im] != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) plist_dict[im] = dummy dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) FH_list[im] = dummy[im] else: if cpu_dict[im] == Blockdata[ "myid"] and cpu_dict[im] != Blockdata["main_node"]: wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD) elif cpu_dict[im] != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) FH_list[im] = dummy[im] mpi_barrier(MPI_COMM_WORLD) mpi_barrier(MPI_COMM_WORLD) if options.local_alignment: if Blockdata["myid"] == Blockdata["main_node"]: ali3d_local_params = [None for im in range(len(ptl_list))] for im in range(len(ptl_list)): ali3d_local_params[im] = [ptl_list[im]] + plist_dict[ global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]] write_text_row( ali3d_local_params, os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params.txt")) write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) else: if Blockdata["myid"] == Blockdata["main_node"]: write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) mpi_barrier(MPI_COMM_WORLD) target_xr = 3 target_yr = 3 if (Blockdata["myid"] == 0): cmd = "{} {} {} {} {} {} {} {} {} {}".format("sxchains.py", os.path.join(Tracker["constants"]["masterdir"],"class_averages.hdf"),\ os.path.join(Tracker["constants"]["masterdir"],"junk.hdf"),os.path.join(Tracker["constants"]["masterdir"],"ordered_class_averages.hdf"),\ "--circular","--radius=%d"%Tracker["constants"]["radius"] , "--xr=%d"%(target_xr+1),"--yr=%d"%(target_yr+1),"--align", ">/dev/null") junk = cmdexecute(cmd) cmd = "{} {}".format( "rm -rf", os.path.join(Tracker["constants"]["masterdir"], "junk.hdf")) junk = cmdexecute(cmd) from mpi import mpi_finalize mpi_finalize() exit()
def dovolume(ref_data): from utilities import print_msg, read_text_row from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold # Prepare the reference in 3D alignment, this function corresponds to what do_volume does. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 fl = ref_data[2].cmp("dot", ref_data[2], { "negative": 0, "mask": ref_data[0] }) print_msg("do_volume user function Step = %5d GOAL = %10.3e\n" % (ref_ali2d_counter, fl)) stat = Util.infomask(ref_data[2], ref_data[0], False) vol = ref_data[2] - stat[0] Util.mul_scalar(vol, 1.0 / stat[1]) vol = threshold(vol) #Util.mul_img(vol, ref_data[0]) try: aa = read_text_row("flaa.txt")[0] fl = aa[0] aa = aa[1] except: fl = 0.4 aa = 0.2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fl, aa) print_msg(msg) from utilities import read_text_file from fundamentals import rops_table, fftip, fft from filter import filt_table, filt_btwl fftip(vol) try: rt = read_text_file("pwreference.txt") ro = rops_table(vol) # Here unless I am mistaken it is enough to take the beginning of the reference pw. for i in xrange(1, len(ro)): ro[i] = (rt[i] / ro[i])**0.5 vol = fft(filt_table(filt_tanl(vol, fl, aa), ro)) msg = "Power spectrum adjusted\n" print_msg(msg) except: vol = fft(filt_tanl(vol, fl, aa)) stat = Util.infomask(vol, ref_data[0], False) vol -= stat[0] Util.mul_scalar(vol, 1.0 / stat[1]) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5) Util.mul_img(vol, ref_data[0]) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n" % ( cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) else: cs = [0.0] * 3 return vol, cs
def main(): from optparse import OptionParser from global_def import SPARXVERSION from EMAN2 import EMData from logger import Logger, BaseLogger_Files import sys, os, time global Tracker, Blockdata from global_def import ERROR progname = os.path.basename(sys.argv[0]) usage = progname + " --output_dir=output_dir --isac_dir=output_dir_of_isac " parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--adjust_to_analytic_model", action="store_true", default=False, help="adjust power spectrum of 2-D averages to an analytic model ") parser.add_option( "--adjust_to_given_pw2", action="store_true", default=False, help="adjust power spectrum to 2-D averages to given 1D power spectrum" ) parser.add_option("--B_enhance", action="store_true", default=False, help="using B-factor to enhance 2-D averages") parser.add_option("--no_adjustment", action="store_true", default=False, help="No power spectrum adjustment") options_list = [] adjust_to_analytic_model = False for q in sys.argv[1:]: if (q[:26] == "--adjust_to_analytic_model"): adjust_to_analytic_model = True options_list.append(q) break adjust_to_given_pw2 = False for q in sys.argv[1:]: if (q[:21] == "--adjust_to_given_pw2"): adjust_to_given_pw2 = True options_list.append(q) break B_enhance = False for q in sys.argv[1:]: if (q[:11] == "--B_enhance"): B_enhance = True options_list.append(q) break no_adjustment = False for q in sys.argv[1:]: if (q[:15] == "--no_adjustment"): no_adjustment = True options_list.append(q) break if len(options_list) == 0: if (Blockdata["myid"] == Blockdata["main_node"]): print( "specify one of the following options to start: 1. adjust_to_analytic_model; 2. adjust_to_given_pw2; 3. B_enhance; 4. no_adjustment" ) if len(options_list) > 1: ERROR( "The specified options are exclusive. Use only one of them to start", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) # options in common parser.add_option( "--isac_dir", type="string", default='', help="ISAC run output directory, input directory for this command") parser.add_option( "--output_dir", type="string", default='', help="output directory where computed averages are saved") parser.add_option("--pixel_size", type="float", default=-1.0, help="pixel_size of raw images") parser.add_option( "--fl", type="float", default=-1.0, help= "low pass filter, =-1, not applied; =1, using FH1 (initial resolution), =2 using FH2 (resolution after local alignment), or user provided value" ) parser.add_option("--stack", type="string", default="", help="data stack used in ISAC") parser.add_option("--radius", type="int", default=-1, help="radius") parser.add_option("--xr", type="float", default=-1.0, help="local alignment search range") parser.add_option("--ts", type="float", default=1.0, help="local alignment search step") parser.add_option("--fh", type="float", default=-1., help="local alignment high frequencies limit") parser.add_option("--maxit", type="int", default=5, help="local alignment iterations") parser.add_option("--navg", type="int", default=-1, help="number of aveages") parser.add_option("--skip_local_alignment", action="store_true", default=False, help="skip local alignment") parser.add_option( "--noctf", action="store_true", default=False, help= "no ctf correction, useful for negative stained data. always ctf for cryo data" ) if B_enhance: parser.add_option( "--B_start", type="float", default=10.0, help= "start frequency (1./Angstrom) of power spectrum for B_factor estimation" ) parser.add_option( "--Bfactor", type="float", default=-1.0, help= "User defined bactors (e.g. 45.0[A^2]). By default, the program automatically estimates B-factor. " ) if adjust_to_given_pw2: parser.add_option("--modelpw", type="string", default='', help="1-D reference power spectrum") checking_flag = 0 if (Blockdata["myid"] == Blockdata["main_node"]): if not os.path.exists(options.modelpw): checking_flag = 1 checking_flag = bcast_number_to_all(checking_flag, Blockdata["main_node"], MPI_COMM_WORLD) if checking_flag == 1: ERROR("User provided power spectrum does not exist", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) (options, args) = parser.parse_args(sys.argv[1:]) Tracker = {} Constants = {} Constants["isac_dir"] = options.isac_dir Constants["masterdir"] = options.output_dir Constants["pixel_size"] = options.pixel_size Constants["orgstack"] = options.stack Constants["radius"] = options.radius Constants["xrange"] = options.xr Constants["xstep"] = options.ts Constants["FH"] = options.fh Constants["maxit"] = options.maxit Constants["navg"] = options.navg Constants["low_pass_filter"] = options.fl if B_enhance: Constants["B_start"] = options.B_start Constants["Bfactor"] = options.Bfactor if adjust_to_given_pw2: Constants["modelpw"] = options.modelpw Tracker["constants"] = Constants # ------------------------------------------------------------- # # Create and initialize Tracker dictionary with input options # State Variables #<<<---------------------->>>imported functions<<<--------------------------------------------- from utilities import get_im, bcast_number_to_all, write_text_file, read_text_file, wrap_mpi_bcast, write_text_row from utilities import cmdexecute from filter import filt_tanl from time import sleep from logger import Logger, BaseLogger_Files import user_functions import string from string import split, atoi, atof import json #x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"])+1) #y_range = x_range ####----------------------------------------------------------- # Create Master directory line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" if Tracker["constants"]["masterdir"] == Tracker["constants"]["isac_dir"]: masterdir = os.path.join(Tracker["constants"]["isac_dir"], "sharpen") else: masterdir = Tracker["constants"]["masterdir"] if (Blockdata["myid"] == Blockdata["main_node"]): msg = "Postprocessing ISAC 2D averages starts" print(line, "Postprocessing ISAC 2D averages starts") if not masterdir: timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime()) masterdir = "sharpen_" + Tracker["constants"]["isac_dir"] os.mkdir(masterdir) else: if os.path.exists(masterdir): print("%s already exists" % masterdir) else: os.mkdir(masterdir) li = len(masterdir) else: li = 0 li = mpi_bcast(li, 1, MPI_INT, Blockdata["main_node"], MPI_COMM_WORLD)[0] masterdir = mpi_bcast(masterdir, li, MPI_CHAR, Blockdata["main_node"], MPI_COMM_WORLD) masterdir = string.join(masterdir, "") Tracker["constants"]["masterdir"] = masterdir log_main = Logger(BaseLogger_Files()) log_main.prefix = Tracker["constants"]["masterdir"] + "/" while not os.path.exists(Tracker["constants"]["masterdir"]): print("Node ", Blockdata["myid"], " waiting...", Tracker["constants"]["masterdir"]) sleep(1) mpi_barrier(MPI_COMM_WORLD) if (Blockdata["myid"] == Blockdata["main_node"]): init_dict = {} print(Tracker["constants"]["isac_dir"]) Tracker["directory"] = os.path.join(Tracker["constants"]["isac_dir"], "2dalignment") core = read_text_row( os.path.join(Tracker["directory"], "initial2Dparams.txt")) for im in xrange(len(core)): init_dict[im] = core[im] del core else: init_dict = 0 init_dict = wrap_mpi_bcast(init_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) ### if (Blockdata["myid"] == Blockdata["main_node"]): #Tracker["constants"]["orgstack"] = "bdb:"+ os.path.join(Tracker["constants"]["isac_dir"],"../","sparx_stack") image = get_im(Tracker["constants"]["orgstack"], 0) Tracker["constants"]["nnxo"] = image.get_xsize() try: ctf_params = image.get_attr("ctf") if Tracker["constants"]["pixel_size"] == -1.: Tracker["constants"]["pixel_size"] = ctf_params.apix except: print("pixel size value is not given.") Tracker["ini_shrink"] = float( get_im(os.path.join(Tracker["directory"], "aqfinal.hdf"), 0).get_xsize()) / Tracker["constants"]["nnxo"] else: Tracker["ini_shrink"] = 0 Tracker = wrap_mpi_bcast(Tracker, Blockdata["main_node"], communicator=MPI_COMM_WORLD) #print(Tracker["constants"]["pixel_size"], "pixel_size") x_range = max(Tracker["constants"]["xrange"], int(1. / Tracker["ini_shrink"]) + 1) y_range = x_range if (Blockdata["myid"] == Blockdata["main_node"]): parameters = read_text_row( os.path.join(Tracker["constants"]["isac_dir"], "all_parameters.txt")) else: parameters = 0 parameters = wrap_mpi_bcast(parameters, Blockdata["main_node"], communicator=MPI_COMM_WORLD) params_dict = {} list_dict = {} #parepare params_dict if Tracker["constants"]["navg"] < 0: navg = EMUtil.get_image_count( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf")) else: navg = min( Tracker["constants"]["navg"], EMUtil.get_image_count( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"))) global_dict = {} ptl_list = [] memlist = [] if (Blockdata["myid"] == Blockdata["main_node"]): for iavg in xrange(navg): params_of_this_average = [] image = get_im( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"), iavg) members = image.get_attr("members") memlist.append(members) for im in xrange(len(members)): abs_id = members[im] global_dict[abs_id] = [iavg, im] P = combine_params2( init_dict[abs_id][0], init_dict[abs_id][1], init_dict[abs_id][2], init_dict[abs_id][3], \ parameters[abs_id][0], parameters[abs_id][1]/Tracker["ini_shrink"], parameters[abs_id][2]/Tracker["ini_shrink"], parameters[abs_id][3]) if parameters[abs_id][3] == -1: print("wrong one") params_of_this_average.append([P[0], P[1], P[2], P[3], 1.0]) ptl_list.append(abs_id) params_dict[iavg] = params_of_this_average list_dict[iavg] = members write_text_row( params_of_this_average, os.path.join(Tracker["constants"]["masterdir"], "params_avg_%03d.txt" % iavg)) ptl_list.sort() init_params = [None for im in xrange(len(ptl_list))] for im in xrange(len(ptl_list)): init_params[im] = [ptl_list[im]] + params_dict[global_dict[ ptl_list[im]][0]][global_dict[ptl_list[im]][1]] write_text_row( init_params, os.path.join(Tracker["constants"]["masterdir"], "init_isac_params.txt")) else: params_dict = 0 list_dict = 0 memlist = 0 params_dict = wrap_mpi_bcast(params_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) list_dict = wrap_mpi_bcast(list_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) memlist = wrap_mpi_bcast(memlist, Blockdata["main_node"], communicator=MPI_COMM_WORLD) # Now computing! del init_dict tag_sharpen_avg = 1000 ## always apply low pass filter to B_enhanced images to suppress noise in high frequencies enforced_to_H1 = False if options.B_enhance: if Tracker["constants"]["low_pass_filter"] == -1: print("User does not provide low pass filter") enforced_to_H1 = True if navg < Blockdata["nproc"]: # Each CPU do one average FH_list = [None for im in xrange(navg)] for iavg in xrange(navg): if Blockdata["myid"] == iavg: mlist = [None for i in xrange(len(list_dict[iavg]))] for im in xrange(len(mlist)): mlist[im] = get_im(Tracker["constants"]["orgstack"], list_dict[iavg][im]) set_params2D(mlist[im], params_dict[iavg][im], xform="xform.align2d") if options.noctf: new_avg, frc, plist = compute_average_noctf( mlist, Tracker["constants"]["radius"]) else: new_avg, frc, plist = compute_average_ctf( mlist, Tracker["constants"]["radius"]) FH1 = get_optimistic_res(frc) #write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d_before_ali.txt"%iavg)) if not options.skip_local_alignment: new_average1 = within_group_refinement([mlist[kik] for kik in xrange(0,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou=Tracker["constants"]["radius"], rs=1.0, xrng=[x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH = max(Tracker["constants"]["FH"], FH1), FF=0.1) new_average2 = within_group_refinement([mlist[kik] for kik in xrange(1,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou=Tracker["constants"]["radius"], rs=1.0, xrng=[x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH = max(Tracker["constants"]["FH"], FH1), FF=0.1) if options.noctf: new_avg, frc, plist = compute_average_noctf( mlist, Tracker["constants"]["radius"]) else: new_avg, frc, plist = compute_average_ctf( mlist, Tracker["constants"]["radius"]) FH2 = get_optimistic_res(frc) #write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d.txt"%iavg)) #if Tracker["constants"]["nopwadj"]: # pw adjustment, 1. analytic model 2. PDB model 3. B-facttor enhancement else: FH2 = 0.0 FH_list[iavg] = [FH1, FH2] if options.B_enhance: new_avg, gb = apply_enhancement( new_avg, Tracker["constants"]["B_start"], Tracker["constants"]["pixel_size"], Tracker["constants"]["Bfactor"]) print("Process avg %d %f %f %f" % (iavg, gb, FH1, FH2)) elif options.adjust_to_given_pw2: roo = read_text_file(Tracker["constants"]["modelpw"], -1) roo = roo[0] # always put pw in the first column new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], roo) elif options.adjust_to_analytic_model: new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], None) elif options.no_adjustment: pass print("Process avg %d %f %f" % (iavg, FH1, FH2)) if Tracker["constants"]["low_pass_filter"] != -1.: if Tracker["constants"]["low_pass_filter"] == 1.: low_pass_filter = FH1 elif Tracker["constants"]["low_pass_filter"] == 2.: low_pass_filter = FH2 if options.skip_local_alignment: low_pass_filter = FH1 else: low_pass_filter = Tracker["constants"][ "low_pass_filter"] if low_pass_filter >= 0.45: low_pass_filter = 0.45 new_avg = filt_tanl(new_avg, low_pass_filter, 0.1) new_avg.set_attr("members", list_dict[iavg]) new_avg.set_attr("n_objects", len(list_dict[iavg])) mpi_barrier(MPI_COMM_WORLD) for im in xrange(navg): # avg if im == Blockdata[ "myid"] and Blockdata["myid"] != Blockdata["main_node"]: send_EMData(new_avg, Blockdata["main_node"], tag_sharpen_avg) elif Blockdata["myid"] == Blockdata["main_node"]: if im != Blockdata["main_node"]: new_avg_other_cpu = recv_EMData(im, tag_sharpen_avg) new_avg_other_cpu.set_attr("members", memlist[im]) new_avg_other_cpu.write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) else: new_avg.write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) if not options.skip_local_alignment: if im == Blockdata["myid"]: write_text_row( plist, os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params_avg_%03d.txt" % im)) if Blockdata["myid"] == im and Blockdata["myid"] != Blockdata[ "main_node"]: wrap_mpi_send(plist_dict[im], Blockdata["main_node"], MPI_COMM_WORLD) elif im != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(im, MPI_COMM_WORLD) plist_dict[im] = dummy if im == Blockdata["myid"] and im != Blockdata["main_node"]: wrap_mpi_send(FH_list[im], Blockdata["main_node"], MPI_COMM_WORLD) elif im != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(im, MPI_COMM_WORLD) FH_list[im] = dummy else: if im == Blockdata["myid"] and im != Blockdata["main_node"]: wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD) elif im != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(im, MPI_COMM_WORLD) FH_list[im] = dummy[im] mpi_barrier(MPI_COMM_WORLD) else: FH_list = [[0, 0.0, 0.0] for im in xrange(navg)] image_start, image_end = MPI_start_end(navg, Blockdata["nproc"], Blockdata["myid"]) if Blockdata["myid"] == Blockdata["main_node"]: cpu_dict = {} for iproc in xrange(Blockdata["nproc"]): local_image_start, local_image_end = MPI_start_end( navg, Blockdata["nproc"], iproc) for im in xrange(local_image_start, local_image_end): cpu_dict[im] = iproc else: cpu_dict = 0 cpu_dict = wrap_mpi_bcast(cpu_dict, Blockdata["main_node"], communicator=MPI_COMM_WORLD) slist = [None for im in xrange(navg)] ini_list = [None for im in xrange(navg)] avg1_list = [None for im in xrange(navg)] avg2_list = [None for im in xrange(navg)] plist_dict = {} data_list = [None for im in xrange(navg)] if Blockdata["myid"] == Blockdata["main_node"]: print("read data") for iavg in xrange(image_start, image_end): mlist = [None for i in xrange(len(list_dict[iavg]))] for im in xrange(len(mlist)): mlist[im] = get_im(Tracker["constants"]["orgstack"], list_dict[iavg][im]) set_params2D(mlist[im], params_dict[iavg][im], xform="xform.align2d") data_list[iavg] = mlist print("read data done %d" % Blockdata["myid"]) #if Blockdata["myid"] == Blockdata["main_node"]: print("start to compute averages") for iavg in xrange(image_start, image_end): mlist = data_list[iavg] if options.noctf: new_avg, frc, plist = compute_average_noctf( mlist, Tracker["constants"]["radius"]) else: new_avg, frc, plist = compute_average_ctf( mlist, Tracker["constants"]["radius"]) FH1 = get_optimistic_res(frc) #write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d_before_ali.txt"%iavg)) if not options.skip_local_alignment: new_average1 = within_group_refinement([mlist[kik] for kik in xrange(0,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou=Tracker["constants"]["radius"], rs=1.0, xrng=[x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH=max(Tracker["constants"]["FH"], FH1), FF=0.1) new_average2 = within_group_refinement([mlist[kik] for kik in xrange(1,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \ ou= Tracker["constants"]["radius"], rs=1.0, xrng=[ x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \ dst=0.0, maxit=Tracker["constants"]["maxit"], FH = max(Tracker["constants"]["FH"], FH1), FF=0.1) if options.noctf: new_avg, frc, plist = compute_average_noctf( mlist, Tracker["constants"]["radius"]) else: new_avg, frc, plist = compute_average_ctf( mlist, Tracker["constants"]["radius"]) plist_dict[iavg] = plist FH2 = get_optimistic_res(frc) else: FH2 = 0.0 #write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d.txt"%iavg)) FH_list[iavg] = [iavg, FH1, FH2] if options.B_enhance: new_avg, gb = apply_enhancement( new_avg, Tracker["constants"]["B_start"], Tracker["constants"]["pixel_size"], Tracker["constants"]["Bfactor"]) print("Process avg %d %f %f %f" % (iavg, gb, FH1, FH2)) elif options.adjust_to_given_pw2: roo = read_text_file(Tracker["constants"]["modelpw"], -1) roo = roo[0] # always on the first column new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], roo) print("Process avg %d %f %f" % (iavg, FH1, FH2)) elif adjust_to_analytic_model: new_avg = adjust_pw_to_model( new_avg, Tracker["constants"]["pixel_size"], None) print("Process avg %d %f %f" % (iavg, FH1, FH2)) elif options.no_adjustment: pass if Tracker["constants"]["low_pass_filter"] != -1.: new_avg = filt_tanl(new_avg, Tracker["constants"]["low_pass_filter"], 0.1) if Tracker["constants"]["low_pass_filter"] != -1.: if Tracker["constants"]["low_pass_filter"] == 1.: low_pass_filter = FH1 elif Tracker["constants"]["low_pass_filter"] == 2.: low_pass_filter = FH2 if options.skip_local_alignment: low_pass_filter = FH1 else: low_pass_filter = Tracker["constants"]["low_pass_filter"] if low_pass_filter >= 0.45: low_pass_filter = 0.45 new_avg = filt_tanl(new_avg, low_pass_filter, 0.1) else: if enforced_to_H1: new_avg = filt_tanl(new_avg, FH1, 0.1) if options.B_enhance: new_avg = fft(new_avg) new_avg.set_attr("members", list_dict[iavg]) new_avg.set_attr("n_objects", len(list_dict[iavg])) slist[iavg] = new_avg ## send to main node to write mpi_barrier(MPI_COMM_WORLD) for im in xrange(navg): # avg if cpu_dict[im] == Blockdata[ "myid"] and Blockdata["myid"] != Blockdata["main_node"]: send_EMData(slist[im], Blockdata["main_node"], tag_sharpen_avg) elif cpu_dict[im] == Blockdata["myid"] and Blockdata[ "myid"] == Blockdata["main_node"]: slist[im].set_attr("members", memlist[im]) slist[im].write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) elif cpu_dict[im] != Blockdata["myid"] and Blockdata[ "myid"] == Blockdata["main_node"]: new_avg_other_cpu = recv_EMData(cpu_dict[im], tag_sharpen_avg) new_avg_other_cpu.set_attr("members", memlist[im]) new_avg_other_cpu.write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im) if not options.skip_local_alignment: if cpu_dict[im] == Blockdata["myid"]: write_text_row( plist_dict[im], os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params_avg_%03d.txt" % im)) if cpu_dict[im] == Blockdata[ "myid"] and cpu_dict[im] != Blockdata["main_node"]: wrap_mpi_send(plist_dict[im], Blockdata["main_node"], MPI_COMM_WORLD) wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD) elif cpu_dict[im] != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) plist_dict[im] = dummy dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) FH_list[im] = dummy[im] else: if cpu_dict[im] == Blockdata[ "myid"] and cpu_dict[im] != Blockdata["main_node"]: wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD) elif cpu_dict[im] != Blockdata["main_node"] and Blockdata[ "myid"] == Blockdata["main_node"]: dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD) FH_list[im] = dummy[im] mpi_barrier(MPI_COMM_WORLD) mpi_barrier(MPI_COMM_WORLD) if not options.skip_local_alignment: if Blockdata["myid"] == Blockdata["main_node"]: ali3d_local_params = [None for im in xrange(len(ptl_list))] for im in xrange(len(ptl_list)): ali3d_local_params[im] = [ptl_list[im]] + plist_dict[ global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]] write_text_row( ali3d_local_params, os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params.txt")) write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) else: if Blockdata["myid"] == Blockdata["main_node"]: write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) mpi_barrier(MPI_COMM_WORLD) target_xr = 3 target_yr = 3 if (Blockdata["myid"] == 0): cmd = "{} {} {} {} {} {} {} {} {} {}".format("sxchains.py", os.path.join(Tracker["constants"]["masterdir"],"class_averages.hdf"),\ os.path.join(Tracker["constants"]["masterdir"],"junk.hdf"),os.path.join(Tracker["constants"]["masterdir"],"ordered_class_averages.hdf"),\ "--circular","--radius=%d"%Tracker["constants"]["radius"] , "--xr=%d"%(target_xr+1),"--yr=%d"%(target_yr+1),"--align", ">/dev/null") junk = cmdexecute(cmd) cmd = "{} {}".format( "rm -rf", os.path.join(Tracker["constants"]["masterdir"], "junk.hdf")) junk = cmdexecute(cmd) from mpi import mpi_finalize mpi_finalize() exit()
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl=0.2 --aa=0.1 --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--ave2D", type="string" , default=False, help="write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string" , default=False, help="write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string" , default=False, help="write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string" , default=False, help="compute 3D variability (time consuming!)") parser.add_option("--img_per_grp", type="int" , default=10 , help="number of neighbouring projections") parser.add_option("--no_norm", action="store_true", default=False, help="do not use normalization") parser.add_option("--radiusvar", type="int" , default=-1 , help="radius for 3D var" ) parser.add_option("--npad", type="int" , default=2 , help="number of time to pad the original images") parser.add_option("--sym" , type="string" , default="c1" , help="symmetry") parser.add_option("--fl", type="float" , default=0.0 , help="stop-band frequency (Default - no filtration)") parser.add_option("--aa", type="float" , default=0.0 , help="fall off of the filter (Default - no filtration)") parser.add_option("--CTF", action="store_true", default=False, help="use CFT correction") parser.add_option("--VERBOSE", action="store_true", default=False, help="Long output for debugging") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option("--VAR" , action="store_true", default=False, help="stack on input consists of 2D variances (Default False)") parser.add_option("--decimate", type="float", default=1.0, help="image decimate rate, a number large than 1. default is 1") parser.add_option("--window", type="int", default=0, help="reduce images to a small image size without changing pixel_size. Default value is zero.") #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") parser.add_option("--nvec", type="int" , default=0 , help="number of eigenvectors, default = 0 meaning no PCA calculated") parser.add_option("--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") (options,args) = parser.parse_args() ##### from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD, MPI_TAG_UB from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX from applications import MPI_start_end from reconstruction import recons3d_em, recons3d_em_MPI from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import print_begin_msg, print_end_msg, print_msg from utilities import read_text_row, get_image, get_im from utilities import bcast_EMData_to_all, bcast_number_to_all from utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict if options.symmetrize : try: sys.argv = mpi_init(len(sys.argv), sys.argv) try: number_of_proc = mpi_comm_size(MPI_COMM_WORLD) if( number_of_proc > 1 ): ERROR("Cannot use more than one CPU for symmetry prepration","sx3dvariability",1) except: pass except: pass # Input #instack = "Clean_NORM_CTF_start_wparams.hdf" #instack = "bdb:data" instack = args[0] sym = options.sym if( sym == "c1" ): ERROR("Thre is no need to symmetrize stack for C1 symmetry","sx3dvariability",1) if(instack[:4] !="bdb:"): stack = "bdb:data" delete_bdb(stack) cmdexecute("sxcpy.py "+instack+" "+stack) else: stack = instack qt = EMUtil.get_all_attributes(stack,'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None]*na for k in xrange(ks): delete_bdb("bdb:Q%1d"%k) cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) DB = db_open_dict("bdb:Q%1d"%k) for i in xrange(na): ut = qt[i]*ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() delete_bdb("bdb:sdata") cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") #cmdexecute("ls EMAN2DB/sdata*") a = get_im("bdb:sdata") a.set_attr("variabilitysymmetry",sym) a.write_image("bdb:sdata") else: sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 if len(args) == 1: stack = args[0] else: print( "usage: " + usage) print( "Please run '" + progname + " -h' for detailed options") return 1 t0 = time() # obsolete flags options.MPI = True options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid) if options.VAR and options.SND: ERROR("Only one of var and SND can be set!", "sx3dvariability", myid) exit() if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid) exit() #if options.SND and (options.ave2D or options.ave3D): # ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid) # exit() if options.nvec > 0 : ERROR("PCA option not implemented", "sx3dvariability", 1, myid) exit() if options.nvec > 0 and options.ave3D == None: ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid) exit() import string options.sym = options.sym.lower() if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() global_def.BATCH = True if myid == main_node: print_begin_msg("sx3dvariability") print_msg("%-70s: %s\n"%("Input stack", stack)) img_per_grp = options.img_per_grp nvec = options.nvec radiuspca = options.radiuspca symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() if options.sym != "c1" : imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry") if(i != options.sym): ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid) except: ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid) from utilities import get_symt i = len(get_symt(options.sym)) if((nima/i)*i != nima): ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid) symbaselen = nima/i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) Tracker ={} Tracker["nx"] =nx Tracker["ny"] =ny Tracker["total_stack"]=nima if options.decimate==1.: if options.window !=0: nx = options.window ny = options.window else: if options.window ==0: nx = int(nx/options.decimate) ny = int(ny/options.decimate) else: nx = int(options.window/options.decimate) ny = nx symbaselen = bcast_number_to_all(symbaselen) if radiuspca == -1: radiuspca = nx/2-2 if myid == main_node: print_msg("%-70s: %d\n"%("Number of projection", nima)) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from projection import prep_vol, prgs from statistics import im_diff from utilities import get_im, model_circle, get_params_proj, set_params_proj from utilities import get_ctf, generate_ctf from filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: #varList = EMData.read_images(stack, range(img_begin, img_end)) varList = [] this_image = EMData() for index_of_particle in xrange(img_begin,img_end): this_image.read_image(stack,index_of_particle) varList.append(image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF)) else: from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from utilities import model_blank, nearest_proj, model_circle from applications import pca from statistics import avgvar, avgvar_ctf, ccc from filter import filt_tanl from morphology import threshold, square_root from projection import project, prep_vol, prgs from sets import Set if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in xrange(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x*10000+psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() print_msg("%-70s: %d\n"%("Number of neighboring projections", img_per_grp)) print_msg("...... Finding neighboring projections\n") if options.VERBOSE: print "Number of images per group: ", img_per_grp print "Now grouping projections" proj_angles.sort() proj_angles_list = [0.0]*(nima*4) if myid == main_node: for i in xrange(nima): proj_angles_list[i*4] = proj_angles[i][1] proj_angles_list[i*4+1] = proj_angles[i][2] proj_angles_list[i*4+2] = proj_angles[i][3] proj_angles_list[i*4+3] = proj_angles[i][4] proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node) proj_angles = [] for i in xrange(nima): proj_angles.append([proj_angles_list[i*4], proj_angles_list[i*4+1], proj_angles_list[i*4+2], int(proj_angles_list[i*4+3])]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) if options.VERBOSE: print "On node %2d, number of images needed to be read = %5d"%(myid, len(all_proj)) index = {} for i in xrange(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: print_msg("%-70s: %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2)) print_msg("%-70s: %d\n"%("Number of groups processed on the main node", len(proj_list))) if options.VERBOSE: print "Grouping projections took: ", (time()-t2)/60 , "[min]" print "Number of groups on main node: ", len(proj_list) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: print_msg("...... calculating the stack of 2D variances \n") if options.VERBOSE: print "Now calculating the stack of 2D variances" proj_params = [0.0]*(nima*5) aveList = [] varList = [] if nvec > 0: eigList = [[] for i in xrange(nvec)] if options.VERBOSE: print "Begin to read images on processor %d"%(myid) ttt = time() #imgdata = EMData.read_images(stack, all_proj) img = EMData() imgdata = [] for index_of_proj in xrange(len(all_proj)): img.read_image(stack, all_proj[index_of_proj]) dmg = image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF) #print dmg.get_xsize(), "init" imgdata.append(dmg) if options.VERBOSE: print "Reading images on processor %d done, time = %.2f"%(myid, time()-ttt) print "On processor %d, we got %d images"%(myid, len(imgdata)) mpi_barrier(MPI_COMM_WORLD) ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from applications import prepare_2d_forPCA from utilities import model_blank for i in xrange(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi]) grp_imgdata = [] for j in xrange(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj]) alpha, sx, sy, mirror = params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM-phi, 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM-phi), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM-phi), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM-phi)), 0.0, 0.0, 1.0) set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0]) grp_imgdata.append(imgdata[mj]) #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize() if not options.no_norm: #print grp_imgdata[j].get_xsize() mask = model_circle(nx/2-2, nx, nx) for k in xrange(img_per_grp): ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std del mask if options.fl > 0.0: from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2*nx ny2 = 2*ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d(fft( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa) ),nx,ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: for k in xrange(img_per_grp): grp_imgdata[k] = filt_tanl( grp_imgdata[k], options.fl, options.aa) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: from utilities import pad, read_text_file from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2*nx ny2 = 2*ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d( fft( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1) ) , nx,ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) ''' if i < 10 and myid == main_node: for k in xrange(10): grp_imgdata[k].write_image("grp%03d.hdf"%i, k) ''' """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("pp.hdf", pp) """ ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata) """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("qq.hdf", pp) """ var = model_blank(nx,ny) for q in grp_imgdata: Util.add_img2( var, q ) Util.mul_scalar( var, 1.0/(len(grp_imgdata)-1)) # Switch to std dev var = square_root(threshold(var)) #if options.CTF: ave, var = avgvar_ctf(grp_imgdata, mode="a") #else: ave, var = avgvar(grp_imgdata, mode="a") """ if myid == main_node: ave.write_image("avgv.hdf",i) var.write_image("varv.hdf",i) """ set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) if options.VERBOSE: print "%5.2f%% done on processor %d"%(i*100.0/len(proj_list), myid) if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in xrange(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ del imgdata # To this point, all averages, variances, and eigenvectors are computed if options.ave2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node : for im in xrange(len(aveList)): aveList[im].write_image(options.ave2D, km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im+i+70000) """ nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol=fpol(ave, Tracker["nx"],Tracker["nx"],Tracker["nx"]) tmpvol.write_image(options.ave2D, km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) for im in xrange(len(aveList)): send_EMData(aveList[im], main_node,im+myid+70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) """ if options.ave3D: from fundamentals import fpol if options.VERBOSE: print "Reconstructing 3D average volume" ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: ave3D=fpol(ave3D,Tracker["nx"],Tracker["nx"],Tracker["nx"]) ave3D.write_image(options.ave3D) print_msg("%-70s: %s\n"%("Writing to the disk volume reconstructed from averages as", options.ave3D)) del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList if nvec > 0: for k in xrange(nvec): if options.VERBOSE: print "Reconstruction eigenvolumes", k cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image("eig3d_%03d.hdf"%k, ITER) Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) ) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in xrange(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: u = int(u[0]) print " Eigenvector: ",k," number changed ",int(icont[0]) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d if options.ave3D: del ave3D if options.var2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node : for im in xrange(len(varList)): tmpvol=fpol(varList[im], Tracker["nx"], Tracker["nx"],1) tmpvol.write_image(options.var2D, km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im+i+70000) tmpvol=fpol(ave, Tracker["nx"], Tracker["nx"],1) tmpvol.write_image(options.var2D, km) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) for im in xrange(len(varList)): send_EMData(varList[im], main_node, im+myid+70000)# What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node and options.VERBOSE: print "Reconstructing 3D variability volume" t6 = time() radiusvar = options.radiusvar if( radiusvar < 0 ): radiusvar = nx//2 -3 res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from fundamentals import fpol res =fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"]) res.write_image(options.var3D) if myid == main_node: print_msg("%-70s: %.2f\n"%("Reconstructing 3D variability took [s]", time()-t6)) if options.VERBOSE: print "Reconstruction took: %.2f [min]"%((time()-t6)/60) if myid == main_node: print_msg("%-70s: %.2f\n"%("Total time for these computations [s]", time()-t0)) if options.VERBOSE: print "Total time for these computations: %.2f [min]"%((time()-t0)/60) print_end_msg("sx3dvariability") global_def.BATCH = False from mpi import mpi_finalize mpi_finalize()
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0 - psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0 - psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl=15. --aa=0.01 --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--output_dir", type="string", default="./", help="output directory") parser.add_option("--ave2D", type="string", default=False, help="write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string", default=False, help="write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string", default=False, help="write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string", default=False, help="compute 3D variability (time consuming!)") parser.add_option("--img_per_grp", type="int", default=10, help="number of neighbouring projections") parser.add_option("--no_norm", action="store_true", default=False, help="do not use normalization") #parser.add_option("--radius", type="int" , default=-1 , help="radius for 3D variability" ) parser.add_option("--npad", type="int", default=2, help="number of time to pad the original images") parser.add_option("--sym", type="string", default="c1", help="symmetry") parser.add_option( "--fl", type="float", default=0.0, help= "cutoff freqency in absolute frequency (0.0-0.5). (Default - no filtration)" ) parser.add_option( "--aa", type="float", default=0.0, help= "fall off of the filter. Put 0.01 if user has no clue about falloff (Default - no filtration)" ) parser.add_option("--CTF", action="store_true", default=False, help="use CFT correction") parser.add_option("--VERBOSE", action="store_true", default=False, help="Long output for debugging") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option( "--VAR", action="store_true", default=False, help="stack on input consists of 2D variances (Default False)") parser.add_option( "--decimate", type="float", default=1.0, help= "image decimate rate, a number larger (expand image) or less (shrink image) than 1. default is 1" ) parser.add_option( "--window", type="int", default=0, help= "reduce images to a small image size without changing pixel_size. Default value is zero." ) #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") parser.add_option( "--nvec", type="int", default=0, help="number of eigenvectors, default = 0 meaning no PCA calculated") parser.add_option( "--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") (options, args) = parser.parse_args() ##### from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX from applications import MPI_start_end from reconstruction import recons3d_em, recons3d_em_MPI from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import print_begin_msg, print_end_msg, print_msg from utilities import read_text_row, get_image, get_im from utilities import bcast_EMData_to_all, bcast_number_to_all from utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict # Set up global variables related to bdb cache if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() # Set up global variables related to ERROR function global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ if RUNNING_UNDER_MPI: global_def.MPI = True if options.symmetrize: if RUNNING_UNDER_MPI: try: sys.argv = mpi_init(len(sys.argv), sys.argv) try: number_of_proc = mpi_comm_size(MPI_COMM_WORLD) if (number_of_proc > 1): ERROR( "Cannot use more than one CPU for symmetry prepration", "sx3dvariability", 1) except: pass except: pass if options.output_dir != "./" and not os.path.exists( options.output_dir): os.mkdir(options.output_dir) # Input #instack = "Clean_NORM_CTF_start_wparams.hdf" #instack = "bdb:data" from logger import Logger, BaseLogger_Files if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main = Logger(BaseLogger_Files()) log_main.prefix = os.path.join(options.output_dir, "./") instack = args[0] sym = options.sym.lower() if (sym == "c1"): ERROR("There is no need to symmetrize stack for C1 symmetry", "sx3dvariability", 1) line = "" for a in sys.argv: line += " " + a log_main.add(line) if (instack[:4] != "bdb:"): if output_dir == "./": stack = "bdb:data" else: stack = "bdb:" + options.output_dir + "/data" delete_bdb(stack) junk = cmdexecute("sxcpy.py " + instack + " " + stack) else: stack = instack qt = EMUtil.get_all_attributes(stack, 'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None] * na for k in xrange(ks): #Qfile = "Q%1d"%k if options.output_dir != "./": Qfile = os.path.join(options.output_dir, "Q%1d" % k) else: Qfile = os.path.join(options.output_dir, "Q%1d" % k) #delete_bdb("bdb:Q%1d"%k) delete_bdb("bdb:" + Qfile) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) junk = cmdexecute("e2bdb.py " + stack + " --makevstack=bdb:" + Qfile) #DB = db_open_dict("bdb:Q%1d"%k) DB = db_open_dict("bdb:" + Qfile) for i in xrange(na): ut = qt[i] * ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() if options.output_dir == "./": delete_bdb("bdb:sdata") else: delete_bdb("bdb:" + options.output_dir + "/" + "sdata") #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") sdata = "bdb:" + options.output_dir + "/" + "sdata" print(sdata) junk = cmdexecute("e2bdb.py " + options.output_dir + " --makevstack=" + sdata + " --filt=Q") #junk = cmdexecute("ls EMAN2DB/sdata*") #a = get_im("bdb:sdata") a = get_im(sdata) a.set_attr("variabilitysymmetry", sym) #a.write_image("bdb:sdata") a.write_image(sdata) else: sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 if len(args) == 1: stack = args[0] else: print(("usage: " + usage)) print(("Please run '" + progname + " -h' for detailed options")) return 1 t0 = time() # obsolete flags options.MPI = True options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid) if options.VAR and options.SND: ERROR("Only one of var and SND can be set!", "sx3dvariability", myid) exit() if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR( "When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid) exit() #if options.SND and (options.ave2D or options.ave3D): # ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid) # exit() if options.nvec > 0: ERROR("PCA option not implemented", "sx3dvariability", 1, myid) exit() if options.nvec > 0 and options.ave3D == None: ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid) exit() import string options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: if options.output_dir != "./" and not os.path.exists( options.output_dir): os.mkdir(options.output_dir) img_per_grp = options.img_per_grp nvec = options.nvec radiuspca = options.radiuspca from logger import Logger, BaseLogger_Files #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main = Logger(BaseLogger_Files()) log_main.prefix = os.path.join(options.output_dir, "./") if myid == main_node: line = "" for a in sys.argv: line += " " + a log_main.add(line) log_main.add("-------->>>Settings given by all options<<<-------") log_main.add("instack :" + stack) log_main.add("output_dir :" + options.output_dir) log_main.add("var3d :" + options.var3D) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" #print_begin_msg("sx3dvariability") msg = "sx3dvariability" log_main.add(msg) print(line, msg) msg = ("%-70s: %s\n" % ("Input stack", stack)) log_main.add(msg) print(line, msg) symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() if options.sym != "c1": imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry").lower() if (i != options.sym): ERROR( "The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid) except: ERROR( "Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid) from utilities import get_symt i = len(get_symt(options.sym)) if ((nima / i) * i != nima): ERROR( "The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid) symbaselen = nima / i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) Tracker = {} Tracker["total_stack"] = nima if options.decimate == 1.: if options.window != 0: nx = options.window ny = options.window else: if options.window == 0: nx = int(nx * options.decimate) ny = int(ny * options.decimate) else: nx = int(options.window * options.decimate) ny = nx Tracker["nx"] = nx Tracker["ny"] = ny Tracker["nz"] = nx symbaselen = bcast_number_to_all(symbaselen) if radiuspca == -1: radiuspca = nx / 2 - 2 if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = "%-70s: %d\n" % ("Number of projection", nima) log_main.add(msg) print(line, msg) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from projection import prep_vol, prgs from statistics import im_diff from utilities import get_im, model_circle, get_params_proj, set_params_proj from utilities import get_ctf, generate_ctf from filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: #varList = EMData.read_images(stack, range(img_begin, img_end)) varList = [] this_image = EMData() for index_of_particle in xrange(img_begin, img_end): this_image.read_image(stack, index_of_particle) varList.append( image_decimate_window_xform_ctf(this_image, options.decimate, options.window, options.CTF)) else: from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from utilities import model_blank, nearest_proj, model_circle from applications import pca from statistics import avgvar, avgvar_ctf, ccc from filter import filt_tanl from morphology import threshold, square_root from projection import project, prep_vol, prgs from sets import Set if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in xrange(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x * 10000 + psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = "%-70s: %d\n" % ("Number of neighboring projections", img_per_grp) log_main.add(msg) print(line, msg) msg = "...... Finding neighboring projections\n" log_main.add(msg) print(line, msg) if options.VERBOSE: msg = "Number of images per group: %d" % img_per_grp log_main.add(msg) print(line, msg) msg = "Now grouping projections" log_main.add(msg) print(line, msg) proj_angles.sort() proj_angles_list = [0.0] * (nima * 4) if myid == main_node: for i in xrange(nima): proj_angles_list[i * 4] = proj_angles[i][1] proj_angles_list[i * 4 + 1] = proj_angles[i][2] proj_angles_list[i * 4 + 2] = proj_angles[i][3] proj_angles_list[i * 4 + 3] = proj_angles[i][4] proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node) proj_angles = [] for i in xrange(nima): proj_angles.append([ proj_angles_list[i * 4], proj_angles_list[i * 4 + 1], proj_angles_list[i * 4 + 2], int(proj_angles_list[i * 4 + 3]) ]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) if options.VERBOSE: print("On node %2d, number of images needed to be read = %5d" % (myid, len(all_proj))) index = {} for i in xrange(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("%-70s: %.2f\n" % ("Finding neighboring projections lasted [s]", time() - t2)) log_main.add(msg) print(msg) msg = ("%-70s: %d\n" % ("Number of groups processed on the main node", len(proj_list))) log_main.add(msg) print(line, msg) if options.VERBOSE: print("Grouping projections took: ", (time() - t2) / 60, "[min]") print("Number of groups on main node: ", len(proj_list)) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("...... calculating the stack of 2D variances \n") log_main.add(msg) print(line, msg) if options.VERBOSE: print("Now calculating the stack of 2D variances") proj_params = [0.0] * (nima * 5) aveList = [] varList = [] if nvec > 0: eigList = [[] for i in xrange(nvec)] if options.VERBOSE: print("Begin to read images on processor %d" % (myid)) ttt = time() #imgdata = EMData.read_images(stack, all_proj) imgdata = [] for index_of_proj in xrange(len(all_proj)): #img = EMData() #img.read_image(stack, all_proj[index_of_proj]) dmg = image_decimate_window_xform_ctf( get_im(stack, all_proj[index_of_proj]), options.decimate, options.window, options.CTF) #print dmg.get_xsize(), "init" imgdata.append(dmg) if options.VERBOSE: print("Reading images on processor %d done, time = %.2f" % (myid, time() - ttt)) print("On processor %d, we got %d images" % (myid, len(imgdata))) mpi_barrier(MPI_COMM_WORLD) ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from applications import prepare_2d_forPCA from utilities import model_blank for i in xrange(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi]) grp_imgdata = [] for j in xrange(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj]) alpha, sx, sy, mirror = params_3D_2D_NEW( phi, theta, psi, s2x, s2y, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, phiM - phi, 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, 180 - (phiM - phi), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(phiM - phi), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(180 - (phiM - phi)), 0.0, 0.0, 1.0) set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0]) grp_imgdata.append(imgdata[mj]) #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize() if not options.no_norm: #print grp_imgdata[j].get_xsize() mask = model_circle(nx / 2 - 2, nx, nx) for k in xrange(img_per_grp): ave, std, minn, maxx = Util.infomask( grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std del mask if options.fl > 0.0: from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2 * nx ny2 = 2 * ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d( fft( filt_tanl( filt_ctf( fft( pad(grp_imgdata[k], nx2, ny2, 1, 0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa)), nx, ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: for k in xrange(img_per_grp): grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: from utilities import pad, read_text_file from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2 * nx ny2 = 2 * ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d( fft( filt_ctf(fft( pad(grp_imgdata[k], nx2, ny2, 1, 0.0)), grp_imgdata[k].get_attr("ctf"), binary=1)), nx, ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) ''' if i < 10 and myid == main_node: for k in xrange(10): grp_imgdata[k].write_image("grp%03d.hdf"%i, k) ''' """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("pp.hdf", pp) """ ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata) """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("qq.hdf", pp) """ var = model_blank(nx, ny) for q in grp_imgdata: Util.add_img2(var, q) Util.mul_scalar(var, 1.0 / (len(grp_imgdata) - 1)) # Switch to std dev var = square_root(threshold(var)) #if options.CTF: ave, var = avgvar_ctf(grp_imgdata, mode="a") #else: ave, var = avgvar(grp_imgdata, mode="a") """ if myid == main_node: ave.write_image("avgv.hdf",i) var.write_image("varv.hdf",i) """ set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) if options.VERBOSE: print("%5.2f%% done on processor %d" % (i * 100.0 / len(proj_list), myid)) if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in xrange(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ del imgdata # To this point, all averages, variances, and eigenvectors are computed if options.ave2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node: for im in xrange(len(aveList)): aveList[im].write_image( os.path.join(options.output_dir, options.ave2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im + i + 70000) """ nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol = fpol(ave, Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image( os.path.join(options.output_dir, options.ave2D), km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in xrange(len(aveList)): send_EMData(aveList[im], main_node, im + myid + 70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) """ if options.ave3D: from fundamentals import fpol if options.VERBOSE: print("Reconstructing 3D average volume") ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" ave3D = fpol(ave3D, Tracker["nx"], Tracker["nx"], Tracker["nx"]) ave3D.write_image( os.path.join(options.output_dir, options.ave3D)) msg = ("%-70s: %s\n" % ( "Writing to the disk volume reconstructed from averages as", options.ave3D)) log_main.add(msg) print(line, msg) del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList if nvec > 0: for k in xrange(nvec): if options.VERBOSE: print("Reconstruction eigenvolumes", k) cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image( os.path.join(options.outpout_dir, "eig3d_%03d.hdf" % (k, ITER))) Util.mul_img(eig3D, model_circle(radiuspca, nx, nx, nx)) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in xrange(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj( eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" u = int(u[0]) msg = (" Eigenvector: ", k, " number changed ", int(icont[0])) log_main.add(msg) print(line, msg) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d if options.ave3D: del ave3D if options.var2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node: for im in xrange(len(varList)): tmpvol = fpol(varList[im], Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image( os.path.join(options.output_dir, options.var2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im + i + 70000) tmpvol = fpol(ave, Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image( os.path.join(options.output_dir, options.var2D, km)) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in xrange(len(varList)): send_EMData(varList[im], main_node, im + myid + 70000) # What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node and options.VERBOSE: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("Reconstructing 3D variability volume") log_main.add(msg) print(line, msg) t6 = time() # radiusvar = options.radius # if( radiusvar < 0 ): radiusvar = nx//2 -3 res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from fundamentals import fpol res = fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"]) res.write_image(os.path.join(options.output_dir, options.var3D)) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("%-70s: %.2f\n" % ("Reconstructing 3D variability took [s]", time() - t6)) log_main.add(msg) print(line, msg) if options.VERBOSE: print("Reconstruction took: %.2f [min]" % ((time() - t6) / 60)) if myid == main_node: line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("%-70s: %.2f\n" % ("Total time for these computations [s]", time() - t0)) print(line, msg) log_main.add(msg) if options.VERBOSE: print("Total time for these computations: %.2f [min]" % ((time() - t0) / 60)) line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" msg = ("sx3dvariability") print(line, msg) log_main.add(msg) from mpi import mpi_finalize mpi_finalize() if RUNNING_UNDER_MPI: global_def.MPI = False global_def.BATCH = False
def do_volume_mrk03(ref_data): """ data - projections (scattered between cpus) or the volume. If volume, just do the volume processing options - the same for all cpus return - volume the same for all cpus """ from EMAN2 import Util from mpi import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD from filter import filt_table from reconstruction import recons3d_4nn_MPI, recons3d_4nnw_MPI # recons3d_4nn_ctf_MPI from utilities import bcast_EMData_to_all, bcast_number_to_all, model_blank from fundamentals import rops_table, fftip, fft import types # Retrieve the function specific input arguments from ref_data data = ref_data[0] Tracker = ref_data[1] iter = ref_data[2] mpi_comm = ref_data[3] # # For DEBUG # print "Type of data %s" % (type(data)) # print "Type of Tracker %s" % (type(Tracker)) # print "Type of iter %s" % (type(iter)) # print "Type of mpi_comm %s" % (type(mpi_comm)) if(mpi_comm == None): mpi_comm = MPI_COMM_WORLD myid = mpi_comm_rank(mpi_comm) nproc = mpi_comm_size(mpi_comm) try: local_filter = Tracker["local_filter"] except: local_filter = False #========================================================================= # volume reconstruction if( type(data) == types.ListType ): if Tracker["constants"]["CTF"]: #vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \ # symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"]) vol = recons3d_4nnw_MPI(myid, data, Tracker["bckgnoise"], Tracker["constants"]["snr"], \ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"]) else: vol = recons3d_4nn_MPI (myid, data,\ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm) else: vol = data if myid == 0: from morphology import threshold from filter import filt_tanl, filt_btwl from utilities import model_circle, get_im import types nx = vol.get_xsize() if(Tracker["constants"]["mask3D"] == None): mask3D = model_circle(int(Tracker["constants"]["radius"]*float(nx)/float(Tracker["constants"]["nnxo"])+0.5), nx, nx, nx) elif(Tracker["constants"]["mask3D"] == "auto"): from utilities import adaptive_mask mask3D = adaptive_mask(vol) else: if( type(Tracker["constants"]["mask3D"]) == types.StringType ): mask3D = get_im(Tracker["constants"]["mask3D"]) else: mask3D = (Tracker["constants"]["mask3D"]).copy() nxm = mask3D.get_xsize() if( nx != nxm): from fundamentals import rot_shift3D mask3D = Util.window(rot_shift3D(mask3D,scale=float(nx)/float(nxm)),nx,nx,nx) nxm = mask3D.get_xsize() assert(nx == nxm) stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) Util.mul_img(vol, mask3D) if not local_filter: if( type(Tracker["lowpass"]) == types.ListType ): vol = filt_table(vol, Tracker["lowpass"]) else: vol = filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]) if local_filter: from morphology import binarize if(myid == 0): nx = mask3D.get_xsize() else: nx = 0 nx = bcast_number_to_all(nx, source_node = 0) # only main processor needs the two input volumes if(myid == 0): mask = binarize(mask3D, 0.5) locres = get_im(Tracker["local_filter"]) lx = locres.get_xsize() if(lx != nx): if(lx < nx): from fundamentals import fdecimate, rot_shift3D mask = Util.window(rot_shift3D(mask,scale=float(lx)/float(nx)),lx,lx,lx) vol = fdecimate(vol, lx,lx,lx) else: ERROR("local filter cannot be larger than input volume","user function",1) stat = Util.infomask(vol, mask, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) else: lx = 0 locres = model_blank(1,1,1) vol = model_blank(1,1,1) lx = bcast_number_to_all(lx, source_node = 0) if( myid != 0 ): mask = model_blank(lx,lx,lx) bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm) from filter import filterlocal vol = filterlocal( locres, vol, mask, Tracker["falloff"], myid, 0, nproc) if myid == 0: if(lx < nx): from fundamentals import fpol vol = fpol(vol, nx,nx,nx) vol = threshold(vol) Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) else: vol = model_blank(nx,nx,nx) """ else: if myid == 0: #from utilities import write_text_file #write_text_file(rops_table(vol,1),"goo.txt") stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) """ # broadcast volume bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm) #========================================================================= return vol
def dovolume( ref_data ): from utilities import print_msg, read_text_row from filter import fit_tanh, filt_tanl from fundamentals import fshift from morphology import threshold # Prepare the reference in 3D alignment, this function corresponds to what do_volume does. # Input: list ref_data # 0 - mask # 1 - center flag # 2 - raw average # 3 - fsc result # Output: filtered, centered, and masked reference image # apply filtration (FSC) to reference image: global ref_ali2d_counter ref_ali2d_counter += 1 fl = ref_data[2].cmp("dot",ref_data[2], {"negative":0, "mask":ref_data[0]} ) print_msg("do_volume user function Step = %5d GOAL = %10.3e\n"%(ref_ali2d_counter,fl)) stat = Util.infomask(ref_data[2], ref_data[0], False) vol = ref_data[2] - stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) #Util.mul_img(vol, ref_data[0]) try: aa = read_text_row("flaa.txt")[0] fl = aa[0] aa=aa[1] except: fl = 0.4 aa = 0.2 msg = "Tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n"%(fl, aa) print_msg(msg) from utilities import read_text_file from fundamentals import rops_table, fftip, fft from filter import filt_table, filt_btwl fftip(vol) try: rt = read_text_file( "pwreference.txt" ) ro = rops_table(vol) # Here unless I am mistaken it is enough to take the beginning of the reference pw. for i in xrange(1,len(ro)): ro[i] = (rt[i]/ro[i])**0.5 vol = fft( filt_table( filt_tanl(vol, fl, aa), ro) ) msg = "Power spectrum adjusted\n" print_msg(msg) except: vol = fft( filt_tanl(vol, fl, aa) ) stat = Util.infomask(vol, ref_data[0], False) vol -= stat[0] Util.mul_scalar(vol, 1.0/stat[1]) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5) Util.mul_img(vol, ref_data[0]) if ref_data[1] == 1: cs = volf.phase_cog() msg = "Center x = %10.3f Center y = %10.3f Center z = %10.3f\n"%(cs[0], cs[1], cs[2]) print_msg(msg) volf = fshift(volf, -cs[0], -cs[1], -cs[2]) else: cs = [0.0]*3 return vol, cs
def spruce_up_var_m(refdata): from utilities import print_msg from utilities import model_circle, get_im from filter import filt_tanl, filt_gaussl from morphology import threshold import os numref = refdata[0] outdir = refdata[1] fscc = refdata[2] total_iter = refdata[3] varf = refdata[4] mask = refdata[5] ali50S = refdata[6] if ali50S: mask_50S = get_im("mask-50S.spi") if fscc is None: flmin = 0.4 aamin = 0.1 else: flmin, aamin, idmin = minfilt(fscc) aamin = aamin msg = "Minimum tangent filter: cut-off frequency = %10.3f fall-off = %10.3f\n" % ( fflmin, aamin) print_msg(msg) for i in xrange(numref): volf = get_im(os.path.join(outdir, "vol%04d.hdf" % total_iter), i) if (not (varf is None)): volf = volf.filter_by_image(varf) volf = filt_tanl(volf, flmin, aamin) stat = Util.infomask(volf, mask, True) volf -= stat[0] Util.mul_scalar(volf, 1.0 / stat[1]) nx = volf.get_xsize() stat = Util.infomask( volf, model_circle(nx // 2 - 2, nx, nx, nx) - model_circle(nx // 2 - 6, nx, nx, nx), True) volf -= stat[0] Util.mul_img(volf, mask) volf = threshold(volf) volf = filt_gaussl(volf, 0.4) if ali50S: if i == 0: v50S_0 = volf.copy() v50S_0 *= mask_50S else: from applications import ali_vol_3 from fundamentals import rot_shift3D v50S_i = volf.copy() v50S_i *= mask_50S params = ali_vol_3(v50S_i, v50S_0, 10.0, 0.5, mask=mask_50S) volf = rot_shift3D(volf, params[0], params[1], params[2], params[3], params[4], params[5], 1.0) volf.write_image(os.path.join(outdir, "volf%04d.hdf" % total_iter), i)
def do_volume_mrk02(ref_data): """ data - projections (scattered between cpus) or the volume. If volume, just do the volume processing options - the same for all cpus return - volume the same for all cpus """ from EMAN2 import Util from mpi import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD from filter import filt_table from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import bcast_EMData_to_all, bcast_number_to_all, model_blank from fundamentals import rops_table, fftip, fft import types # Retrieve the function specific input arguments from ref_data data = ref_data[0] Tracker = ref_data[1] iter = ref_data[2] mpi_comm = ref_data[3] # # For DEBUG # print "Type of data %s" % (type(data)) # print "Type of Tracker %s" % (type(Tracker)) # print "Type of iter %s" % (type(iter)) # print "Type of mpi_comm %s" % (type(mpi_comm)) if (mpi_comm == None): mpi_comm = MPI_COMM_WORLD myid = mpi_comm_rank(mpi_comm) nproc = mpi_comm_size(mpi_comm) try: local_filter = Tracker["local_filter"] except: local_filter = False #========================================================================= # volume reconstruction if (type(data) == types.ListType): if Tracker["constants"]["CTF"]: vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"]) else: vol = recons3d_4nn_MPI (myid, data,\ symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm) else: vol = data if myid == 0: from morphology import threshold from filter import filt_tanl, filt_btwl from utilities import model_circle, get_im import types nx = vol.get_xsize() if (Tracker["constants"]["mask3D"] == None): mask3D = model_circle( int(Tracker["constants"]["radius"] * float(nx) / float(Tracker["constants"]["nnxo"]) + 0.5), nx, nx, nx) elif (Tracker["constants"]["mask3D"] == "auto"): from utilities import adaptive_mask mask3D = adaptive_mask(vol) else: if (type(Tracker["constants"]["mask3D"]) == types.StringType): mask3D = get_im(Tracker["constants"]["mask3D"]) else: mask3D = (Tracker["constants"]["mask3D"]).copy() nxm = mask3D.get_xsize() if (nx != nxm): from fundamentals import rot_shift3D mask3D = Util.window( rot_shift3D(mask3D, scale=float(nx) / float(nxm)), nx, nx, nx) nxm = mask3D.get_xsize() assert (nx == nxm) stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0 / stat[1]) vol = threshold(vol) Util.mul_img(vol, mask3D) if (Tracker["PWadjustment"]): from utilities import read_text_file, write_text_file rt = read_text_file(Tracker["PWadjustment"]) fftip(vol) ro = rops_table(vol) # Here unless I am mistaken it is enough to take the beginning of the reference pw. for i in xrange(1, len(ro)): ro[i] = (rt[i] / ro[i])**Tracker["upscale"] #write_text_file(rops_table(filt_table( vol, ro),1),"foo.txt") if Tracker["constants"]["sausage"]: ny = vol.get_ysize() y = float(ny) from math import exp for i in xrange(len(ro)): ro[i] *= \ (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2)) if local_filter: # skip low-pass filtration vol = fft(filt_table(vol, ro)) else: if (type(Tracker["lowpass"]) == types.ListType): vol = fft( filt_table(filt_table(vol, Tracker["lowpass"]), ro)) else: vol = fft( filt_table( filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]), ro)) del ro else: if Tracker["constants"]["sausage"]: ny = vol.get_ysize() y = float(ny) ro = [0.0] * (ny // 2 + 2) from math import exp for i in xrange(len(ro)): ro[i] = \ (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2)) fftip(vol) filt_table(vol, ro) del ro if not local_filter: if (type(Tracker["lowpass"]) == types.ListType): vol = filt_table(vol, Tracker["lowpass"]) else: vol = filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]) if Tracker["constants"]["sausage"]: vol = fft(vol) if local_filter: from morphology import binarize if (myid == 0): nx = mask3D.get_xsize() else: nx = 0 nx = bcast_number_to_all(nx, source_node=0) # only main processor needs the two input volumes if (myid == 0): mask = binarize(mask3D, 0.5) locres = get_im(Tracker["local_filter"]) lx = locres.get_xsize() if (lx != nx): if (lx < nx): from fundamentals import fdecimate, rot_shift3D mask = Util.window( rot_shift3D(mask, scale=float(lx) / float(nx)), lx, lx, lx) vol = fdecimate(vol, lx, lx, lx) else: ERROR("local filter cannot be larger than input volume", "user function", 1) stat = Util.infomask(vol, mask, False) vol -= stat[0] Util.mul_scalar(vol, 1.0 / stat[1]) else: lx = 0 locres = model_blank(1, 1, 1) vol = model_blank(1, 1, 1) lx = bcast_number_to_all(lx, source_node=0) if (myid != 0): mask = model_blank(lx, lx, lx) bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm) from filter import filterlocal vol = filterlocal(locres, vol, mask, Tracker["falloff"], myid, 0, nproc) if myid == 0: if (lx < nx): from fundamentals import fpol vol = fpol(vol, nx, nx, nx) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5) # This will have to be corrected. Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) else: vol = model_blank(nx, nx, nx) else: if myid == 0: #from utilities import write_text_file #write_text_file(rops_table(vol,1),"goo.txt") stat = Util.infomask(vol, mask3D, False) vol -= stat[0] Util.mul_scalar(vol, 1.0 / stat[1]) vol = threshold(vol) vol = filt_btwl(vol, 0.38, 0.5) # This will have to be corrected. Util.mul_img(vol, mask3D) del mask3D # vol.write_image('toto%03d.hdf'%iter) # broadcast volume bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm) #========================================================================= return vol
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): # the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl= --aa= --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--output_dir", type="string" , default="./", help="Output directory") parser.add_option("--ave2D", type="string" , default=False, help="Write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string" , default=False, help="Write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string" , default=False, help="Write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string" , default=False, help="Compute 3D variability (time consuming!)") parser.add_option("--img_per_grp", type="int" , default=100, help="Number of neighbouring projections.(Default is 100)") parser.add_option("--no_norm", action="store_true", default=False, help="Do not use normalization.(Default is to apply normalization)") #parser.add_option("--radius", type="int" , default=-1 , help="radius for 3D variability" ) parser.add_option("--npad", type="int" , default=2 , help="Number of time to pad the original images.(Default is 2 times padding)") parser.add_option("--sym" , type="string" , default="c1", help="Symmetry. (Default is no symmetry)") parser.add_option("--fl", type="float" , default=0.0, help="Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)") parser.add_option("--aa", type="float" , default=0.02 , help="Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)") parser.add_option("--CTF", action="store_true", default=False, help="Use CFT correction.(Default is no CTF correction)") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option("--VAR" , action="store_true", default=False, help="Stack of input consists of 2D variances (Default False)") parser.add_option("--decimate", type ="float", default=0.25, help="Image decimate rate, a number less than 1. (Default is 0.25)") parser.add_option("--window", type ="int", default=0, help="Target image size relative to original image size. (Default value is zero.)") #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") #parser.add_option("--nvec", type="int" , default=0 , help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)") parser.add_option("--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") parser.add_option("--overhead", type ="float", default=0.5, help="python overhead per CPU.") (options,args) = parser.parse_args() ##### from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX #from mpi import * from applications import MPI_start_end from reconstruction import recons3d_em, recons3d_em_MPI from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import print_begin_msg, print_end_msg, print_msg from utilities import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv from utilities import bcast_EMData_to_all, bcast_number_to_all from utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict # Set up global variables related to bdb cache if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() # Set up global variables related to ERROR function global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ if RUNNING_UNDER_MPI: global_def.MPI = True if options.output_dir =="./": current_output_dir = os.path.abspath(options.output_dir) else: current_output_dir = options.output_dir if options.symmetrize : if RUNNING_UNDER_MPI: try: sys.argv = mpi_init(len(sys.argv), sys.argv) try: number_of_proc = mpi_comm_size(MPI_COMM_WORLD) if( number_of_proc > 1 ): ERROR("Cannot use more than one CPU for symmetry preparation","sx3dvariability",1) except: pass except: pass if not os.path.exists(current_output_dir): os.mkdir(current_output_dir) # Input #instack = "Clean_NORM_CTF_start_wparams.hdf" #instack = "bdb:data" from logger import Logger,BaseLogger_Files if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt")) log_main=Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") instack = args[0] sym = options.sym.lower() if( sym == "c1" ): ERROR("There is no need to symmetrize stack for C1 symmetry","sx3dvariability",1) line ="" for a in sys.argv: line +=" "+a log_main.add(line) if(instack[:4] !="bdb:"): #if output_dir =="./": stack = "bdb:data" stack = "bdb:"+current_output_dir+"/data" delete_bdb(stack) junk = cmdexecute("sxcpy.py "+instack+" "+stack) else: stack = instack qt = EMUtil.get_all_attributes(stack,'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None]*na for k in range(ks): #Qfile = "Q%1d"%k #if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k) Qfile = os.path.join(current_output_dir, "Q%1d"%k) #delete_bdb("bdb:Q%1d"%k) delete_bdb("bdb:"+Qfile) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:"+Qfile) #DB = db_open_dict("bdb:Q%1d"%k) DB = db_open_dict("bdb:"+Qfile) for i in range(na): ut = qt[i]*ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() #if options.output_dir =="./": delete_bdb("bdb:sdata") delete_bdb("bdb:" + current_output_dir + "/"+"sdata") #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") sdata = "bdb:"+current_output_dir+"/"+"sdata" print(sdata) junk = cmdexecute("e2bdb.py " + current_output_dir +" --makevstack="+sdata +" --filt=Q") #junk = cmdexecute("ls EMAN2DB/sdata*") #a = get_im("bdb:sdata") a = get_im(sdata) a.set_attr("variabilitysymmetry",sym) #a.write_image("bdb:sdata") a.write_image(sdata) else: from fundamentals import window2d sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 shared_comm = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL) myid_on_node = mpi_comm_rank(shared_comm) no_of_processes_per_group = mpi_comm_size(shared_comm) masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node) color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \ shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm) overhead_loading = options.overhead*number_of_proc #memory_per_node = options.memory_per_node #if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group keepgoing = 1 current_window = options.window current_decimate = options.decimate if len(args) == 1: stack = args[0] else: print(( "usage: " + usage)) print(( "Please run '" + progname + " -h' for detailed options")) return 1 t0 = time() # obsolete flags options.MPI = True #options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid) #if options.VAR and options.SND: # ERROR("Only one of var and SND can be set!", "sx3dvariability", myid) if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid) #if options.SND and (options.ave2D or options.ave3D): # ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid) #if options.nvec > 0 : # ERROR("PCA option not implemented", "sx3dvariability", 1, myid) #if options.nvec > 0 and options.ave3D == None: # ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", 1, myid) if current_decimate>1.0 or current_decimate<0.0: ERROR("Decimate rate should be a value between 0.0 and 1.0", "sx3dvariability", 1, myid) if current_window < 0.0: ERROR("Target window size should be always larger than zero", "sx3dvariability", 1, myid) if myid == main_node: img = get_image(stack, 0) nx = img.get_xsize() ny = img.get_ysize() if(min(nx, ny) < current_window): keepgoing = 0 keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD) if keepgoing == 0: ERROR("The target window size cannot be larger than the size of decimated image", "sx3dvariability", 1, myid) import string options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)# Never delete output_dir in the program! img_per_grp = options.img_per_grp #nvec = options.nvec radiuspca = options.radiuspca from logger import Logger,BaseLogger_Files #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main=Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") if myid == main_node: line = "" for a in sys.argv: line +=" "+a log_main.add(line) log_main.add("-------->>>Settings given by all options<<<-------") log_main.add("Symmetry : %s"%options.sym) log_main.add("Input stack : %s"%stack) log_main.add("Output_dir : %s"%current_output_dir) if options.ave3D: log_main.add("Ave3d : %s"%options.ave3D) if options.var3D: log_main.add("Var3d : %s"%options.var3D) if options.ave2D: log_main.add("Ave2D : %s"%options.ave2D) if options.var2D: log_main.add("Var2D : %s"%options.var2D) if options.VAR: log_main.add("VAR : True") else: log_main.add("VAR : False") if options.CTF: log_main.add("CTF correction : True ") else: log_main.add("CTF correction : False ") log_main.add("Image per group : %5d"%options.img_per_grp) log_main.add("Image decimate rate : %4.3f"%current_decimate) log_main.add("Low pass filter : %4.3f"%options.fl) current_fl = options.fl if current_fl == 0.0: current_fl = 0.5 log_main.add("Current low pass filter is equivalent to cutoff frequency %4.3f for original image size"%round((current_fl*current_decimate),3)) log_main.add("Window size : %5d "%current_window) log_main.add("sx3dvariability begins") symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() nnxo = nx nnyo = ny if options.sym != "c1" : imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry").lower() if(i != options.sym): ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", 1, myid) except: ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", 1, myid) from utilities import get_symt i = len(get_symt(options.sym)) if((nima/i)*i != nima): ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", 1, myid) symbaselen = nima/i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nnxo = 0 nnyo = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) nnxo = bcast_number_to_all(nnxo) nnyo = bcast_number_to_all(nnyo) if current_window > max(nx, ny): ERROR("Window size is larger than the original image size", "sx3dvariability", 1) if current_decimate == 1.: if current_window !=0: nx = current_window ny = current_window else: if current_window == 0: nx = int(nx*current_decimate+0.5) ny = int(ny*current_decimate+0.5) else: nx = int(current_window*current_decimate+0.5) ny = nx symbaselen = bcast_number_to_all(symbaselen) # check FFT prime number from fundamentals import smallprime is_fft_friendly = (nx == smallprime(nx)) if not is_fft_friendly: if myid == main_node: log_main.add("The target image size is not a product of small prime numbers") log_main.add("Program adjusts the input settings!") ### two cases if current_decimate == 1.: nx = smallprime(nx) ny = nx current_window = nx # update if myid == main_node: log_main.add("The window size is updated to %d."%current_window) else: if current_window == 0: nx = smallprime(int(nx*current_decimate+0.5)) current_decimate = float(nx)/nnxo ny = nx if (myid == main_node): log_main.add("The decimate rate is updated to %f."%current_decimate) else: nx = smallprime(int(current_window*current_decimate+0.5)) ny = nx current_window = int(nx/current_decimate+0.5) if (myid == main_node): log_main.add("The window size is updated to %d."%current_window) if myid == main_node: log_main.add("The target image size is %d"%nx) if radiuspca == -1: radiuspca = nx/2-2 if myid == main_node: log_main.add("%-70s: %d\n"%("Number of projection", nima)) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from projection import prep_vol, prgs from statistics import im_diff from utilities import get_im, model_circle, get_params_proj, set_params_proj from utilities import get_ctf, generate_ctf from filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: # 2D variance images have no shifts #varList = EMData.read_images(stack, range(img_begin, img_end)) from EMAN2 import Region for index_of_particle in range(img_begin,img_end): image = get_im(stack, index_of_proj) if current_window > 0: varList.append(fdecimate(window2d(image,current_window,current_window), nx,ny)) else: varList.append(fdecimate(image, nx,ny)) else: from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from utilities import model_blank, nearest_proj, model_circle, write_text_row, wrap_mpi_gatherv from applications import pca from statistics import avgvar, avgvar_ctf, ccc from filter import filt_tanl from morphology import threshold, square_root from projection import project, prep_vol, prgs from sets import Set from utilities import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send import numpy as np if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in range(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x*10000+psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() log_main.add( "%-70s: %d\n"%("Number of neighboring projections", img_per_grp)) log_main.add("...... Finding neighboring projections\n") log_main.add( "Number of images per group: %d"%img_per_grp) log_main.add( "Now grouping projections") proj_angles.sort() proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32) for i in range(nima): proj_angles_list[i][0] = proj_angles[i][1] proj_angles_list[i][1] = proj_angles[i][2] proj_angles_list[i][2] = proj_angles[i][3] proj_angles_list[i][3] = proj_angles[i][4] else: proj_angles_list = 0 proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD) proj_angles = [] for i in range(nima): proj_angles.append([proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3])]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) index = {} for i in range(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("%-70s: %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2)) log_main.add("%-70s: %d\n"%("Number of groups processed on the main node", len(proj_list))) log_main.add("Grouping projections took: %12.1f [m]"%((time()-t2)/60.)) log_main.add("Number of groups on main node: ", len(proj_list)) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("...... Calculating the stack of 2D variances \n") # Memory estimation. There are two memory consumption peaks # peak 1. Compute ave, var; # peak 2. Var volume reconstruction; # proj_params = [0.0]*(nima*5) aveList = [] varList = [] #if nvec > 0: eigList = [[] for i in range(nvec)] dnumber = len(all_proj)# all neighborhood set for assigned to myid pnumber = len(proj_list)*2. + img_per_grp # aveList and varList tnumber = dnumber+pnumber vol_size2 = nx**3*4.*8/1.e9 vol_size1 = 2.*nnxo**3*4.*8/1.e9 proj_size = nnxo*nnyo*len(proj_list)*4.*2./1.e9 # both aveList and varList orig_data_size = nnxo*nnyo*4.*tnumber/1.e9 reduced_data_size = nx*nx*4.*tnumber/1.e9 full_data = np.full((number_of_proc, 2), -1., dtype=np.float16) full_data[myid] = orig_data_size, reduced_data_size if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD) if myid == main_node: for iproc in range(number_of_proc): if iproc != main_node: dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD) full_data[np.where(dummy>-1)] = dummy[np.where(dummy>-1)] del dummy mpi_barrier(MPI_COMM_WORLD) full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD) # find the CPU with heaviest load minindx = np.argsort(full_data, 0) heavy_load_myid = minindx[-1][1] total_mem = sum(full_data) if myid == main_node: if current_window == 0: log_main.add("Nx: current image size = %d. Decimated by %f from %d"%(nx, current_decimate, nnxo)) else: log_main.add("Nx: current image size = %d. Windowed to %d, and decimated by %f from %d"%(nx, current_window, current_decimate, nnxo)) log_main.add("Nproj: number of particle images.") log_main.add("Navg: number of 2D average images.") log_main.add("Nvar: number of 2D variance images.") log_main.add("Img_per_grp: user defined image per group for averaging = %d"%img_per_grp) log_main.add("Overhead: total python overhead memory consumption = %f"%overhead_loading) log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\ (total_mem[1] + overhead_loading)) del full_data mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("Begin reading and preprocessing images on processor. Wait... ") ttt = time() #imgdata = EMData.read_images(stack, all_proj) imgdata = [ None for im in range(len(all_proj))] for index_of_proj in range(len(all_proj)): #image = get_im(stack, all_proj[index_of_proj]) if( current_window > 0): imgdata[index_of_proj] = fdecimate(window2d(get_im(stack, all_proj[index_of_proj]),current_window,current_window), nx, ny) else: imgdata[index_of_proj] = fdecimate(get_im(stack, all_proj[index_of_proj]), nx, ny) if (current_decimate> 0.0 and options.CTF): ctf = imgdata[index_of_proj].get_attr("ctf") ctf.apix = ctf.apix/current_decimate imgdata[index_of_proj].set_attr("ctf", ctf) if myid == heavy_load_myid and index_of_proj%100 == 0: log_main.add(" ...... %6.2f%% "%(index_of_proj/float(len(all_proj))*100.)) mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("All_proj preprocessing cost %7.2f m"%((time()-ttt)/60.)) log_main.add("Wait untill reading on all CPUs done...") ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from applications import prepare_2d_forPCA from utilities import model_blank from EMAN2 import Transform if not options.no_norm: mask = model_circle(nx/2-2, nx, nx) if options.CTF: from utilities import pad from filter import filt_ctf from filter import filt_tanl if myid == heavy_load_myid: log_main.add("Start computing 2D aveList and varList. Wait...") ttt = time() inner=nx//2-4 outer=inner+2 xform_proj_for_2D = [ None for i in range(len(proj_list))] for i in range(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider") phiM, thetaM, psiM, s2xM, s2yM = dpar["phi"],dpar["theta"],dpar["psi"],-dpar["tx"]*current_decimate,-dpar["ty"]*current_decimate grp_imgdata = [] for j in range(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider") alpha, sx, sy, mirror = params_3D_2D_NEW(cpar["phi"], cpar["theta"],cpar["psi"], -cpar["tx"]*current_decimate, -cpar["ty"]*current_decimate, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM - cpar["phi"]), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM- cpar["phi"]), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM - cpar["phi"])), 0.0, 0.0, 1.0) imgdata[mj].set_attr("xform.align2d", Transform({"type":"2D","alpha":alpha,"tx":sx,"ty":sy,"mirror":mirror,"scale":1.0})) grp_imgdata.append(imgdata[mj]) if not options.no_norm: for k in range(img_per_grp): ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std if options.fl > 0.0: for k in range(img_per_grp): grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) # Because of background issues, only linear option works. if options.CTF: ave, var = aves_wiener(grp_imgdata, SNR = 1.0e5, interpolation_method = "linear") else: ave, var = ave_var(grp_imgdata) # Switch to std dev # threshold is not really needed,it is just in case due to numerical accuracy something turns out negative. var = square_root(threshold(var)) set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0] ''' if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in range(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ ''' if (myid == heavy_load_myid) and (i%100 == 0): log_main.add(" ......%6.2f%% "%(i/float(len(proj_list))*100.)) del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index if not options.no_norm: del mask if myid == main_node: del tab # At this point, all averages and variances are computed mpi_barrier(MPI_COMM_WORLD) if (myid == heavy_load_myid): log_main.add("Computing aveList and varList took %12.1f [m]"%((time()-ttt)/60.)) xform_proj_for_2D = wrap_mpi_gatherv(xform_proj_for_2D, main_node, MPI_COMM_WORLD) if (myid == main_node): write_text_row(xform_proj_for_2D, os.path.join(current_output_dir, "params.txt")) del xform_proj_for_2D mpi_barrier(MPI_COMM_WORLD) if options.ave2D: from fundamentals import fpol from applications import header if myid == main_node: log_main.add("Compute ave2D ... ") km = 0 for i in range(number_of_proc): if i == main_node : for im in range(len(aveList)): aveList[im].write_image(os.path.join(current_output_dir, options.ave2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im+i+70000) """ nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol=fpol(ave, nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.ave2D), km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(aveList)): send_EMData(aveList[im], main_node,im+myid+70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) """ if myid == main_node: header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport = os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.ave3D: from fundamentals import fpol t5 = time() if myid == main_node: log_main.add("Reconstruct ave3D ... ") ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: if current_decimate != 1.0: ave3D = resample(ave3D, 1./current_decimate) ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size set_pixel_size(ave3D, 1.0) ave3D.write_image(os.path.join(current_output_dir, options.ave3D)) log_main.add("Ave3D reconstruction took %12.1f [m]"%((time()-t5)/60.0)) log_main.add("%-70s: %s\n"%("The reconstructed ave3D is saved as ", options.ave3D)) mpi_barrier(MPI_COMM_WORLD) del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList ''' if nvec > 0: for k in range(nvec): if myid == main_node:log_main.add("Reconstruction eigenvolumes", k) cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER))) Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) ) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in range(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: u = int(u[0]) log_main.add(" Eigenvector: ",k," number changed ",int(icont[0])) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d ''' if options.ave3D: del ave3D if options.var2D: from fundamentals import fpol from applications import header if myid == main_node: log_main.add("Compute var2D...") km = 0 for i in range(number_of_proc): if i == main_node : for im in range(len(varList)): tmpvol=fpol(varList[im], nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im+i+70000) tmpvol=fpol(ave, nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(varList)): send_EMData(varList[im], main_node, im+myid+70000)# What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if myid == main_node: from applications import header header(os.path.join(current_output_dir, options.var2D), params = 'xform.projection',fimport = os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node: log_main.add("Reconstruct var3D ...") t6 = time() # radiusvar = options.radius # if( radiusvar < 0 ): radiusvar = nx//2 -3 res = recons3d_4nn_MPI(myid, varList, symmetry = options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from fundamentals import fpol if current_decimate != 1.0: res = resample(res, 1./current_decimate) res = fpol(res, nnxo, nnxo, nnxo) set_pixel_size(res, 1.0) res.write_image(os.path.join(current_output_dir, options.var3D)) log_main.add("%-70s: %s\n"%("The reconstructed var3D is saved as ", options.var3D)) log_main.add("Var3D reconstruction took %f12.1 [m]"%((time()-t6)/60.0)) log_main.add("Total computation time %f12.1 [m]"%((time()-t0)/60.0)) log_main.add("sx3dvariability finishes") from mpi import mpi_finalize mpi_finalize() if RUNNING_UNDER_MPI: global_def.MPI = False global_def.BATCH = False
def main(): """ Main function. Arguments: None Returns: None """ command_args = parse_command_line() # Import volume print('Import volume.') input_vol = utilities.get_im(command_args.input_volume) # Sanity checks sanity_checks(command_args, input_vol) try: os.makedirs(command_args.output_dir) except OSError: print('Output directory already exists. No need to create it.') else: print('Created output directory.') output_prefix = os.path.join(command_args.output_dir, command_args.prefix) # Filter volume if specified if command_args.low_pass_filter_resolution is not None: print('Filter volume to {0}A.'.format( command_args.low_pass_filter_resolution)) input_vol = sparx_filter.filt_tanl( input_vol, command_args.pixel_size / command_args.low_pass_filter_resolution, command_args.low_pass_filter_falloff) input_vol.write_image(output_prefix + '_filtered_volume.hdf') else: print('Skip filter volume.') # Create a mask based on the filtered volume print('Create mask') density_threshold = -9999.0 nsigma = 1.0 if command_args.mol_mass: density_threshold = input_vol.find_3d_threshold( command_args.mol_mass, command_args.pixel_size) elif command_args.threshold: density_threshold = command_args.threshold elif command_args.nsigma: nsigma = command_args.nsigma else: assert False if command_args.edge_type == 'cosine': mode = 'C' elif command_args.edge_type == 'gaussian': mode = 'G' else: assert False mask_first = morphology.adaptive_mask_scipy( input_vol, nsigma=nsigma, threshold=density_threshold, ndilation=command_args.ndilation, nerosion=command_args.nerosion, edge_width=command_args.edge_width, allow_disconnected=command_args.allow_disconnected, mode=mode, do_approx=command_args.do_old, ) # Create a second mask based on the filtered volume s_mask = None s_density_threshold = 1 s_nsigma = 1.0 if command_args.second_mask is not None: s_mask = utilities.get_im(command_args.second_mask) density_threshold = -9999.0 nsigma = 1.0 if command_args.s_mol_mass: s_density_threshold = input_vol.find_3d_threshold( command_args.s_mol_mass, command_args.s_pixel_size) elif command_args.s_threshold: s_density_threshold = command_args.s_threshold elif command_args.s_nsigma: s_nsigma = command_args.s_nsigma else: assert False elif command_args.second_mask_shape is not None: nx = mask_first.get_xsize() ny = mask_first.get_ysize() nz = mask_first.get_zsize() if command_args.second_mask_shape == 'cube': s_nx = command_args.s_nx s_ny = command_args.s_ny s_nz = command_args.s_nz s_mask = utilities.model_blank(s_nx, s_ny, s_nz, 1) elif command_args.second_mask_shape == 'cylinder': s_radius = command_args.s_radius s_nx = command_args.s_nx s_ny = command_args.s_ny s_nz = command_args.s_nz s_mask = utilities.model_cylinder(s_radius, s_nx, s_ny, s_nz) elif command_args.second_mask_shape == 'sphere': s_radius = command_args.s_radius s_nx = command_args.s_nx s_ny = command_args.s_ny s_nz = command_args.s_nz s_mask = utilities.model_circle(s_radius, s_nx, s_ny, s_nz) else: assert False s_mask = utilities.pad(s_mask, nx, ny, nz, 0) if s_mask is not None: print('Create second mask') if command_args.s_edge_type == 'cosine': mode = 'C' elif command_args.s_edge_type == 'gaussian': mode = 'G' else: assert False s_mask = morphology.adaptive_mask_scipy( s_mask, nsigma=s_nsigma, threshold=s_density_threshold, ndilation=command_args.s_ndilation, nerosion=command_args.s_nerosion, edge_width=command_args.s_edge_width, allow_disconnected=command_args.s_allow_disconnected, mode=mode, do_approx=command_args.s_do_old) if command_args.s_invert: s_mask = 1 - s_mask mask_first.write_image(output_prefix + '_mask_first.hdf') s_mask.write_image(output_prefix + '_mask_second.hdf') masked_combined = mask_first * s_mask masked_combined.write_image(output_prefix + '_mask.hdf') else: mask_first.write_image(output_prefix + '_mask.hdf')
def main(): import sys import os import math import random import pyemtbx.options import time from random import random, seed, randint from optparse import OptionParser progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <inputfile> <outputfile> Generic 2-D image processing programs. Functionality: 1. Phase flip a stack of images and write output to new file: sxprocess.py input_stack.hdf output_stack.hdf --phase_flip 2. Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size. The window size will change accordingly. sxprocess input.hdf output.hdf --changesize --ratio=0.5 3. Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes. sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024] 4. Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs: sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64 5. Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3): sxprocess.py bdb:test3 class_averages_generation_3.hdf list3_12.txt --isacgroup=12 --params=originalid 6. Retrieve original image numbers of images listed in ISAC output stack of averages: sxprocess.py select1.hdf ohk.txt 7. Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file. Optionally use a tangent low-pass filter. Also works for a stack of images, in which case the output is also a stack. sxprocess.py vol.hdf ref.hdf avol.hdf < 0.25 0.2> --adjpw sxprocess.py vol.hdf pw.txt avol.hdf < 0.25 0.2> --adjpw 8. Generate a 1D rotationally averaged power spectrum of an image. sxprocess.py vol.hdf --rotwp=rotpw.txt # Output will contain three columns: (1) rotationally averaged power spectrum (2) logarithm of the rotationally averaged power spectrum (3) integer line number (from zero to approximately to half the image size) 9. Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data. sxprocess.py --transfromparams=phi,theta,psi,tx,ty,tz input.txt output.txt The output file is then imported and 3D transformed volume computed: sxheader.py bdb:p --params=xform.projection --import=output.txt mpirun -np 2 sxrecons3d_n.py bdb:p tvol.hdf --MPI The reconstructed volume is in the position of the volume computed using the input.txt parameters and then transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz) 10. Import ctf parameters from the output of sxcter into windowed particle headers. There are three possible input files formats: (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph. In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'. Normally this is done by e2boxer.py during windowing. Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created. sxprocess.py --input=bdb:data --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 # Output will be a vritual stack bdb:Gdata sxprocess.py --input="bdb:directory/stacks*" --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 To concatenate output files: cd directory e2bdb.py . --makevstack=bdb:allparticles --filt=G IMPORTANT: Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them. 11. Scale 3D shifts. The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor sxprocess.py orientationparams.txt scaledparams.txt scale=0.5 12. Generate adaptive mask from a given 3-D volume. """ parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--order", action="store_true", help= "Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.", default=False) parser.add_option("--order_lookup", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_metropolis", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_pca", action="store_true", help="Test/Debug.", default=False) parser.add_option( "--initial", type="int", default=-1, help= "Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)" ) parser.add_option( "--circular", action="store_true", help= "Select circular ordering (fisr image has to be similar to the last", default=False) parser.add_option( "--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering") parser.add_option( "--changesize", action="store_true", help= "resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.", default=False) parser.add_option( "--ratio", type="float", default=1.0, help= "The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round" ) parser.add_option( "--pw", action="store_true", help= "compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes", default=False) parser.add_option( "--wn", type="int", default=-1, help= "Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))" ) parser.add_option("--phase_flip", action="store_true", help="Phase flip the input stack", default=False) parser.add_option( "--makedb", metavar="param1=value1:param2=value2", type="string", action="append", help= "One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0" ) parser.add_option( "--generate_projections", metavar="param1=value1:param2=value2", type="string", action="append", help= "Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64." ) parser.add_option( "--isacgroup", type="int", help= "Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.", default=-1) parser.add_option( "--isacselect", action="store_true", help= "Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.", default=False) parser.add_option( "--params", type="string", default=None, help="Name of header of parameter, which one depends on specific option" ) parser.add_option( "--adjpw", action="store_true", help="Adjust rotationally averaged power spectrum of an image", default=False) parser.add_option( "--rotpw", type="string", default=None, help= "Name of the text file to contain rotationally averaged power spectrum of the input image." ) parser.add_option( "--transformparams", type="string", default=None, help= "Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz). Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters." ) # import ctf estimates done using cter parser.add_option("--input", type="string", default=None, help="Input particles.") parser.add_option( "--importctf", type="string", default=None, help="Name of the file containing CTF parameters produced by sxcter.") parser.add_option( "--defocuserror", type="float", default=1000000.0, help= "Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent. The error is computed as (std dev defocus)/defocus*100%" ) parser.add_option( "--astigmatismerror", type="float", default=360.0, help= "Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees." ) # import ctf estimates done using cter parser.add_option( "--scale", type="float", default=-1.0, help= "Divide shifts in the input 3D orientation parameters text file by the scale factor." ) # generate adaptive mask from an given 3-Db volue parser.add_option("--adaptive_mask", action="store_true", help="create adavptive 3-D mask from a given volume", default=False) parser.add_option( "--nsigma", type="float", default=1., help= "number of times of sigma of the input volume to obtain the the large density cluster" ) parser.add_option( "--ndilation", type="int", default=3, help= "number of times of dilation applied to the largest cluster of density" ) parser.add_option( "--kernel_size", type="int", default=11, help="convolution kernel for smoothing the edge of the mask") parser.add_option( "--gauss_standard_dev", type="int", default=9, help="stanadard deviation value to generate Gaussian edge") (options, args) = parser.parse_args() global_def.BATCH = True if options.phase_flip: nargs = len(args) if nargs != 2: print "must provide name of input and output file!" return from EMAN2 import Processor instack = args[0] outstack = args[1] nima = EMUtil.get_image_count(instack) from filter import filt_ctf for i in xrange(nima): img = EMData() img.read_image(instack, i) try: ctf = img.get_attr('ctf') except: print "no ctf information in input stack! Exiting..." return dopad = True sign = 1 binary = 1 # phase flip assert img.get_ysize() > 1 dict = ctf.to_dict() dz = dict["defocus"] cs = dict["cs"] voltage = dict["voltage"] pixel_size = dict["apix"] b_factor = dict["bfactor"] ampcont = dict["ampcont"] dza = dict["dfdiff"] azz = dict["dfang"] if dopad and not img.is_complex(): ip = 1 else: ip = 0 params = { "filter_type": Processor.fourier_filter_types.CTF_, "defocus": dz, "Cs": cs, "voltage": voltage, "Pixel_size": pixel_size, "B_factor": b_factor, "amp_contrast": ampcont, "dopad": ip, "binary": binary, "sign": sign, "dza": dza, "azz": azz } tmp = Processor.EMFourierFilter(img, params) tmp.set_attr_dict({"ctf": ctf}) tmp.write_image(outstack, i) elif options.changesize: nargs = len(args) if nargs != 2: ERROR("must provide name of input and output file!", "change size", 1) return from utilities import get_im instack = args[0] outstack = args[1] sub_rate = float(options.ratio) nima = EMUtil.get_image_count(instack) from fundamentals import resample for i in xrange(nima): resample(get_im(instack, i), sub_rate).write_image(outstack, i) elif options.isacgroup > -1: nargs = len(args) if nargs != 3: ERROR("Three files needed on input!", "isacgroup", 1) return from utilities import get_im instack = args[0] m = get_im(args[1], int(options.isacgroup)).get_attr("members") l = [] for k in m: l.append(int(get_im(args[0], k).get_attr(options.params))) from utilities import write_text_file write_text_file(l, args[2]) elif options.isacselect: nargs = len(args) if nargs != 2: ERROR("Two files needed on input!", "isacgroup", 1) return from utilities import get_im nima = EMUtil.get_image_count(args[0]) m = [] for k in xrange(nima): m += get_im(args[0], k).get_attr("members") m.sort() from utilities import write_text_file write_text_file(m, args[1]) elif options.pw: nargs = len(args) if nargs < 2: ERROR("must provide name of input and output file!", "pw", 1) return from utilities import get_im d = get_im(args[0]) nx = d.get_xsize() ny = d.get_ysize() if nargs == 3: mask = get_im(args[2]) wn = int(options.wn) if wn == -1: wn = max(nx, ny) else: if ((wn < nx) or (wn < ny)): ERROR("window size cannot be smaller than the image size", "pw", 1) n = EMUtil.get_image_count(args[0]) from utilities import model_blank, model_circle, pad from EMAN2 import periodogram p = model_blank(wn, wn) for i in xrange(n): d = get_im(args[0], i) if nargs == 3: d *= mask st = Util.infomask(d, None, True) d -= st[0] p += periodogram(pad(d, wn, wn, 1, 0.)) p /= n p.write_image(args[1]) elif options.adjpw: if len(args) < 3: ERROR( "filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)", "adjpw", 1) return img_stack = args[0] from math import sqrt from fundamentals import rops_table, fft from utilities import read_text_file, get_im from filter import filt_tanl, filt_table if (args[1][-3:] == 'txt'): rops_dst = read_text_file(args[1]) else: rops_dst = rops_table(get_im(args[1])) out_stack = args[2] if (len(args) > 4): fl = float(args[3]) aa = float(args[4]) else: fl = -1.0 aa = 0.0 nimage = EMUtil.get_image_count(img_stack) for i in xrange(nimage): img = fft(get_im(img_stack, i)) rops_src = rops_table(img) assert len(rops_dst) == len(rops_src) table = [0.0] * len(rops_dst) for j in xrange(len(rops_dst)): table[j] = sqrt(rops_dst[j] / rops_src[j]) if (fl > 0.0): img = filt_tanl(img, fl, aa) img = fft(filt_table(img, table)) img.write_image(out_stack, i) elif options.rotpw != None: if len(args) != 1: ERROR("Only one input permitted", "rotpw", 1) return from utilities import write_text_file, get_im from fundamentals import rops_table from math import log10 t = rops_table(get_im(args[0])) x = range(len(t)) r = [0.0] * len(x) for i in x: r[i] = log10(t[i]) write_text_file([t, r, x], options.rotpw) elif options.transformparams != None: if len(args) != 2: ERROR( "Please provide names of input and output files with orientation parameters", "transformparams", 1) return from utilities import read_text_row, write_text_row transf = [0.0] * 6 spl = options.transformparams.split(',') for i in xrange(len(spl)): transf[i] = float(spl[i]) write_text_row(rotate_shift_params(read_text_row(args[0]), transf), args[1]) elif options.makedb != None: nargs = len(args) if nargs != 1: print "must provide exactly one argument denoting database key under which the input params will be stored" return dbkey = args[0] print "database key under which params will be stored: ", dbkey gbdb = js_open_dict("e2boxercache/gauss_box_DB.json") parmstr = 'dummy:' + options.makedb[0] (processorname, param_dict) = parsemodopt(parmstr) dbdict = {} for pkey in param_dict: if (pkey == 'invert_contrast') or (pkey == 'use_variance'): if param_dict[pkey] == 'True': dbdict[pkey] = True else: dbdict[pkey] = False else: dbdict[pkey] = param_dict[pkey] gbdb[dbkey] = dbdict elif options.generate_projections: nargs = len(args) if nargs != 3: ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\ "sxprocess - generate projections",1) return inpstr = args[0] outstk = args[1] micpref = args[2] parmstr = 'dummy:' + options.generate_projections[0] (processorname, param_dict) = parsemodopt(parmstr) parm_CTF = False parm_format = 'bdb' parm_apix = 2.5 if 'CTF' in param_dict: if param_dict['CTF'] == 'True': parm_CTF = True if 'format' in param_dict: parm_format = param_dict['format'] if 'apix' in param_dict: parm_apix = float(param_dict['apix']) boxsize = 64 if 'boxsize' in param_dict: boxsize = int(param_dict['boxsize']) print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize scale_mult = 2500 sigma_add = 1.5 sigma_proj = 30.0 sigma2_proj = 17.5 sigma_gauss = 0.3 sigma_mic = 30.0 sigma2_mic = 17.5 sigma_gauss_mic = 0.3 if 'scale_mult' in param_dict: scale_mult = float(param_dict['scale_mult']) if 'sigma_add' in param_dict: sigma_add = float(param_dict['sigma_add']) if 'sigma_proj' in param_dict: sigma_proj = float(param_dict['sigma_proj']) if 'sigma2_proj' in param_dict: sigma2_proj = float(param_dict['sigma2_proj']) if 'sigma_gauss' in param_dict: sigma_gauss = float(param_dict['sigma_gauss']) if 'sigma_mic' in param_dict: sigma_mic = float(param_dict['sigma_mic']) if 'sigma2_mic' in param_dict: sigma2_mic = float(param_dict['sigma2_mic']) if 'sigma_gauss_mic' in param_dict: sigma_gauss_mic = float(param_dict['sigma_gauss_mic']) from filter import filt_gaussl, filt_ctf from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank, pad, model_gauss_noise, set_params2D, set_params_proj from projection import prep_vol, prgs seed(14567) delta = 29 angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S") nangle = len(angles) modelvol = [] nvlms = EMUtil.get_image_count(inpstr) from utilities import get_im for k in xrange(nvlms): modelvol.append(get_im(inpstr, k)) nx = modelvol[0].get_xsize() if nx != boxsize: ERROR("Requested box dimension does not match dimension of the input model.", \ "sxprocess - generate projections",1) nvol = 10 volfts = [[] for k in xrange(nvlms)] for k in xrange(nvlms): for i in xrange(nvol): sigma = sigma_add + random() # 1.5-2.5 addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma, sigma, 38, 38, 40) scale = scale_mult * (0.5 + random()) vf, kb = prep_vol(modelvol[k] + scale * addon) volfts[k].append(vf) del vf, modelvol if parm_format == "bdb": stack_data = "bdb:" + outstk delete_bdb(stack_data) else: stack_data = outstk + ".hdf" Cs = 2.0 pixel = parm_apix voltage = 120.0 ampcont = 10.0 ibd = 4096 / 2 - boxsize iprj = 0 width = 240 xstart = 8 + boxsize / 2 ystart = 8 + boxsize / 2 rowlen = 17 from random import randint params = [] for idef in xrange(3, 8): irow = 0 icol = 0 mic = model_blank(4096, 4096) defocus = idef * 0.5 #0.2 if parm_CTF: astampl = defocus * 0.15 astangl = 50.0 ctf = generate_ctf([ defocus, Cs, voltage, pixel, ampcont, 0.0, astampl, astangl ]) for i in xrange(nangle): for k in xrange(12): dphi = 8.0 * (random() - 0.5) dtht = 8.0 * (random() - 0.5) psi = 360.0 * random() phi = angles[i][0] + dphi tht = angles[i][1] + dtht s2x = 4.0 * (random() - 0.5) s2y = 4.0 * (random() - 0.5) params.append([phi, tht, psi, s2x, s2y]) ivol = iprj % nvol #imgsrc = randint(0,nvlms-1) imgsrc = iprj % nvlms proj = prgs(volfts[imgsrc][ivol], kb, [phi, tht, psi, -s2x, -s2y]) x = xstart + irow * width y = ystart + icol * width mic += pad(proj, 4096, 4096, 1, 0.0, x - 2048, y - 2048, 0) proj = proj + model_gauss_noise(sigma_proj, nx, nx) if parm_CTF: proj = filt_ctf(proj, ctf) proj.set_attr_dict({"ctf": ctf, "ctf_applied": 0}) proj = proj + filt_gaussl( model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss) proj.set_attr("origimgsrc", imgsrc) proj.set_attr("test_id", iprj) # flags describing the status of the image (1 = true, 0 = false) set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0]) set_params_proj(proj, [phi, tht, psi, s2x, s2y]) proj.write_image(stack_data, iprj) icol += 1 if icol == rowlen: icol = 0 irow += 1 iprj += 1 mic += model_gauss_noise(sigma_mic, 4096, 4096) if parm_CTF: #apply CTF mic = filt_ctf(mic, ctf) mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096), sigma_gauss_mic) mic.write_image(micpref + "%1d.hdf" % (idef - 3), 0) drop_spider_doc("params.txt", params) elif options.importctf != None: print ' IMPORTCTF ' from utilities import read_text_row, write_text_row from random import randint import subprocess grpfile = 'groupid%04d' % randint(1000, 9999) ctfpfile = 'ctfpfile%04d' % randint(1000, 9999) cterr = [options.defocuserror / 100.0, options.astigmatismerror] ctfs = read_text_row(options.importctf) for kk in xrange(len(ctfs)): root, name = os.path.split(ctfs[kk][-1]) ctfs[kk][-1] = name[:-4] if (options.input[:4] != 'bdb:'): ERROR('Sorry, only bdb files implemented', 'importctf', 1) d = options.input[4:] #try: str = d.index('*') #except: str = -1 from string import split import glob uu = os.path.split(d) uu = os.path.join(uu[0], 'EMAN2DB', uu[1] + '.bdb') flist = glob.glob(uu) for i in xrange(len(flist)): root, name = os.path.split(flist[i]) root = root[:-7] name = name[:-4] fil = 'bdb:' + os.path.join(root, name) sourcemic = EMUtil.get_all_attributes(fil, 'ptcl_source_image') nn = len(sourcemic) gctfp = [] groupid = [] for kk in xrange(nn): junk, name2 = os.path.split(sourcemic[kk]) name2 = name2[:-4] ctfp = [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] for ll in xrange(len(ctfs)): if (name2 == ctfs[ll][-1]): # found correct if (ctfs[ll][8] / ctfs[ll][0] <= cterr[0]): # acceptable defocus error ctfp = ctfs[ll][:8] if (ctfs[ll][10] > cterr[1]): # error of astigmatism exceed the threshold, set astigmatism to zero. ctfp[6] = 0.0 ctfp[7] = 0.0 gctfp.append(ctfp) groupid.append(kk) break if (len(groupid) > 0): write_text_row(groupid, grpfile) write_text_row(gctfp, ctfpfile) cmd = "{} {} {} {}".format( 'e2bdb.py', fil, '--makevstack=bdb:' + root + 'G' + name, '--list=' + grpfile) #print cmd subprocess.call(cmd, shell=True) cmd = "{} {} {} {}".format('sxheader.py', 'bdb:' + root + 'G' + name, '--params=ctf', '--import=' + ctfpfile) #print cmd subprocess.call(cmd, shell=True) else: print ' >>> Group ', name, ' skipped.' cmd = "{} {} {}".format("rm -f", grpfile, ctfpfile) subprocess.call(cmd, shell=True) elif options.scale > 0.0: from utilities import read_text_row, write_text_row scale = options.scale nargs = len(args) if nargs != 2: print "Please provide names of input and output file!" return p = read_text_row(args[0]) for i in xrange(len(p)): p[i][3] /= scale p[i][4] /= scale write_text_row(p, args[1]) elif options.adaptive_mask: from utilities import get_im from morphology import adaptive_mask nsigma = options.nsigma ndilation = options.ndilation kernel_size = options.kernel_size gauss_standard_dev = options.gauss_standard_dev nargs = len(args) if nargs > 2: print "Too many inputs are given, try again!" return else: inputvol = get_im(args[0]) input_path, input_file_name = os.path.split(args[0]) input_file_name_root, ext = os.path.splitext(input_file_name) if nargs == 2: mask_file_name = args[1] else: mask_file_name = "adaptive_mask_for" + input_file_name_root + ".hdf" # Only hdf file is output. mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size, gauss_standard_dev) mask3d.write_image(mask_file_name) else: ERROR("Please provide option name", "sxprocess.py", 1)