Exemplo n.º 1
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = """prog <output> [options]
	Program to build an initial subtomogram average by averaging pairs from the largest subset
	in --input that is a power of 2. For example, if you supply an input stack with 100 subtomograms,
	this program will build an initial reference using 64, since 64 is the largest power of 2 contained in 100.
	In the first iteration, particle 1 will be averaged with 2, 3 with 4, 5 with 6... etc.
	32 new averages (each an average of 2 subtomograms) will be used for the second iteration.
	Again, 1 will be averaged with 2, 3 with 4, etc... yielding 16 new averages.
	The algorithm continues until the entire subset (64) has been merged into 1 average.
	
	This program imports 'preprocfunc' from e2spt_preproc.py and 'alignment' from e2spt_classaverage.py
	
	--mask=mask.sharp:outer_radius=<safe radius>
	--preprocess=filter.lowpass.gauss:cutoff_freq=<1/resolution in A>
	"""
			
	parser = EMArgumentParser(usage=usage,version=EMANVERSION)
	
	parser.add_header(name="sptbtheader", help="""Options below this label are specific to 
		sptbinarytree""", title="### sptbinarytree options ###", row=6, col=0, rowspan=1, colspan=3,mode="align")
	
	parser.add_header(name="caheader", help="""Options below this label are specific to sptclassaverage""", title="### sptclassaverage options ###", row=3, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--path",type=str,default='spt',help="""Default=spt. Directory to store results in. The default is a numbered series of directories containing the prefix 'spt'; for example, spt_02 will be the directory by default if 'spt_01' already exists.""")
	
	parser.add_argument("--input", type=str, default='',help="""Default=None. The name of the input volume stack. MUST be HDF since volume stack support is required.""", guitype='filebox', browser='EMSubTomosTable(withmodal=True,multiselect=False)', row=0, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--npeakstorefine", type=int, help="""Default=1. The number of best coarse alignments to refine in search of the best final alignment. Default=1.""", default=4, guitype='intbox', row=9, col=0, rowspan=1, colspan=1, nosharedb=True, mode='alignment,breaksym[1]')

	parser.add_argument("--parallel",default="thread:1",help="""default=thread:1. Parallelism. See http://blake.bcm.edu/emanwiki/EMAN2/Parallel""", guitype='strbox', row=19, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--ppid", type=int, help="""Default=-1. Set the PID of the parent process, used for cross platform PPID""",default=-1)
	
	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="""Default=0. Verbose level [0-9], higner number means higher level of verboseness""")
		
	#parser.add_argument("--resume",type=str,default='',help="""(Not working currently). tomo_fxorms.json file that contains alignment information for the particles in the set. If the information is incomplete (i.e., there are less elements in the file than particles in the stack), on the first iteration the program will complete the file by working ONLY on particle indexes that are missing. For subsequent iterations, all the particles will be used.""")
															
	parser.add_argument("--plots", action='store_true', default=False,help="""Default=False. Turn this option on to generatea plot of the ccc scores during each iteration. Running on a cluster or via ssh remotely might not support plotting.""")

	parser.add_argument("--subset",type=int,default=0,help="""Default=0 (not used). Refine only this substet of particles from the stack provided through --input""")
	
	parser.add_argument("--preavgproc1",type=str,default='',help="""Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)""")
	
	parser.add_argument("--preavgproc2",type=str,default='',help="""Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)""")

	parser.add_argument("--weighbytiltaxis",type=str,default='',help="""Default=None. A,B, where A is an integer number and B a decimal. A represents the location of the tilt axis in the tomogram in pixels (eg.g, for a 4096x4096xZ tomogram, this value should be 2048), and B is the weight of the particles furthest from the tomogram. For example, --weighbytiltaxis=2048,0.5 means that praticles at the tilt axis (with an x coordinate of 2048) will have a weight of 1.0 during averaging, while the distance in the x coordinates of particles not-on the tilt axis will be used to weigh their contribution to the average, with particles at the edge(0+radius or 4096-radius) weighing 0.5, as specified by the value provided for B.""")
	
	parser.add_argument("--weighbyscore",action='store_true',default=False,help="""Default=False. This option will weigh the contribution of each subtomogram to the average by score/bestscore.""")
	
	parser.add_argument("--align",type=str,default="rotate_translate_3d:search=8:delta=12:dphi=12",help="""This is the aligner used to align particles to the previous class average. Default is rotate_translate_3d:search=8:delta=12:dphi=12, specify 'None' (with capital N) to disable.""", returnNone=True,guitype='comboparambox', choicelist='re_filter_list(dump_aligners_list(),\'3d\')', row=12, col=0, rowspan=1, colspan=3, nosharedb=True, mode="alignment,breaksym['rotate_symmetry_3d']")
	
	parser.add_argument("--aligncmp",type=str,default="ccc.tomo.thresh",help="""Default=ccc.tomo.thresh. The comparator used for the --align aligner. Do not specify unless you need to use anotherspecific aligner.""",guitype='comboparambox',choicelist='re_filter_list(dump_cmps_list(),\'tomo\')', row=13, col=0, rowspan=1, colspan=3,mode="alignment,breaksym")
	
	
	#parser.add_argument("--output", type=str, default='avg.hdf', help="""Default=avg.hdf. The name of the output class-average stack. MUST be HDF since volume stack support is required.""", guitype='strbox', row=2, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	#parser.add_argument("--classmx", type=str, default='', help="""Default=None. The name of the classification matrix specifying how particles in 'input' should be grouped. If omitted, all particles will be averaged.""")
	
	#parser.add_argument("--ref", type=str, default='', help="""Default=None. Reference image(s). Used as an initial alignment reference and for final orientation adjustment if present. This is typically the projections that were used for classification.""", guitype='filebox', browser='EMBrowserWidget(withmodal=True,multiselect=True)', filecheck=False, row=1, col=0, rowspan=1, colspan=3, mode='alignment')
	
	#parser.add_argument("--refpreprocess",action="store_true",default=False,help="""Default=False. This will preprocess the reference identically to the particles. It is off by default, but it is internally turned on when no reference is supplied.""")
	
	#parser.add_argument("--resultmx",type=str,default=None,help="""Default=Npone. Specify an output image to store the result matrix. This is in the same format as the classification matrix. http://blake.bcm.edu/emanwiki/EMAN2/ClassmxFiles""")
	
	#parser.add_argument("--refinemultireftag", type=str, default='', help="""Default=''. DO NOT USE THIS PARAMETER. It is passed on from e2spt_refinemulti.py if needed.""")

	'''
	ADVANCED parameters
	
	'''
	parser.add_argument("--averager",type=str,default="mean.tomo",help="""Default=mean.tomo. The type of averager used to produce the class average. Default=mean.tomo.""")

	'''
	PRE-FFT processing parameters
	'''
	
	#parser.add_argument("--nopreprocprefft",action="store_true",default=False,help="""Turns off all preprocessing that happens only once before alignment (--normproc, --mask, --maskfile, --clipali, --threshold; i.e., all preprocessing excepting filters --highpass, --lowpass, --preprocess, and --shrink.""")

	parser.add_argument("--shrink", type=int,default=1,help="""Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for coarse alignment.""", guitype='shrinkbox', row=5, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')
	
	parser.add_argument("--shrinkfine", type=int,default=1,help="""Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for refine alignment.""", guitype='intbox', row=5, col=2, rowspan=1, colspan=1, mode='alignment')
	
	parser.add_argument("--threshold",type=str,default='',help="""Default=None. A threshold applied to the subvolumes after normalization. For example, --threshold=threshold.belowtozero:minval=0 makes all negative pixels equal 0, so that they do not contribute to the correlation score.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=10, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--mask",type=str,default='', help="""Default=None. Masking processor applied to particles before alignment. IF using --clipali, make sure to express outer mask radii as negative pixels from the edge.""", returnNone=True, guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'mask\')', row=11, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--maskfile",type=str,default='',help="""Default=None. Mask file (3D IMAGE) applied to particles before alignment. Must be in HDF format. Default is None.""")
	
	parser.add_argument("--normproc",type=str, default='',help="""Default=None (see 'e2help.py processors -v 10' at the command line). Normalization processor applied to particles before alignment. If normalize.mask is used, results of the mask option will be passed in automatically. If you want to turn this option off specify \'None\'""")
	
	parser.add_argument("--clipali",type=int,default=0,help="""Default=0 (which means it's not used). Boxsize to clip particles as part of preprocessing to speed up alignment. For example, the boxsize of the particles might be 100 pixels, but the particles are only 50 pixels in diameter. Aliasing effects are not always as deleterious for all specimens, and sometimes 2x padding isn't necessary; still, there are some benefits from 'oversampling' the data during averaging; so you might still want an average of size 2x, but perhaps particles in a box of 1.5x are sufficiently good for alignment. In this case, you would supply --clipali=75""")

	
	'''
	POST-FFT filtering parameters
	'''
	parser.add_argument("--preprocess",type=str,default='',help="""Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=10, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--preprocessfine",type=str,default='',help="""Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")
	
	parser.add_argument("--lowpass",type=str,default='',help="""Default=None. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=17, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--lowpassfine",type=str,default='',help="""Default=None. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")

	parser.add_argument("--highpass",type=str,default='',help="""Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=18, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--highpassfine",type=str,default='',help="""Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")

	parser.add_argument("--matchimgs",action='store_true',default=False,help="""Default=False. Applies filter.matchto to one image so that it matches the other's spectral profile during preprocessing for pair-wise alignment purposes.""")
	
	parser.add_argument("--filterbyfsc",action='store_true',default=False,help="""Default=False. If on, this parameter will use dynamic FSC filtering. --lowpass will be used to build initial references if no --ref supplied, then, the FSC between the even and odd initial references will be used to filter the data during preprocessing. If --ref is supplied, --lowpass will be used during the first iteration to align the particles against the reference. Thereafter, the FSC between the most current particle average and the original reference (--ref) will be used in the next iteration.""")

	
	'''
	OTHER ADVANCED parameters
	'''
	parser.add_argument("--radius", type=float, default=0, help="""Default=0 (which means it's not used by default). Hydrodynamic radius of the particle in Angstroms. This will be used to automatically calculate the angular steps to use in search of the best alignment. Make sure the apix is correct on the particles' headers, sine the radius will be converted from Angstroms to pixels. Then, the fine angular step is equal to 360/(2*pi*radius), and the coarse angular step 4 times that.""")
	
	parser.add_argument("--precision",type=float,default=1.0,help="""Default=1.0. Precision in pixels to use when figuring out alignment parameters automatically using --radius. Precision would be the number of pixels that the the edge of the specimen is moved (rotationally) during the finest sampling, --falign. If precision is 1, then the precision of alignment will be that of the sampling (apix of your images) times the --shrinkfine factor specified.""")
	
	parser.add_argument("--search", type=int,default=8,help=""""Default=8. During COARSE alignment translational search in X, Y and Z, in pixels. Default=8. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
	
	parser.add_argument("--searchfine", type=int,default=2,help=""""Default=2. During FINE alignment translational search in X, Y and Z, in pixels. Default=2. This WILL overwrite any search: provided through --falign, EXCEPT if you provide --searchfine=2, which is the default. In general, just avoid providing search twice (through here and through the fine aligner --falign). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
	
	#parser.add_argument("--donotaverage",action="store_true", help="""If e2spt_refinemulti.py is calling e2spt_classaverage.py, the latter need not average any particles, but rather only yield the alignment results.""", default=False)
	
	parser.add_argument("--iterstop", type=int, default=0, help="""Default=0. (Not used). The program is called to convergence by default (all particles merge into one final average). To stop at an intermediate iteration, provide this parameter. For example, --iterstop=1, will only allow the algorithm to complete 1 iteration; --iterstop=2 will allow it to go through 2, etc.""")
	
	parser.add_argument("--savesteps",action="store_true", default=False, help="""Default=False. If set, will save the average after each iteration to class_#.hdf. Each class in a separate file. Appends to existing files.""", guitype='boolbox', row=4, col=0, rowspan=1, colspan=1, mode='alignment,breaksym')
	
	parser.add_argument("--saveali",action="store_true", default=False, help="""Default=False. If set, will save the aligned particle volumes in class_ptcl.hdf. Overwrites existing file.""", guitype='boolbox', row=4, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')
	
	parser.add_argument("--saveallalign",action="store_true", default=False, help="""Default=False. If set, will save the alignment parameters after each iteration""", guitype='boolbox', row=4, col=2, rowspan=1, colspan=1, mode='alignment,breaksym')
	
	parser.add_argument("--sym", dest = "sym", default='', help = """Default=None (equivalent to c1). Symmetry to impose -choices are: c<n>, d<n>, h<n>, tet, oct, icos""", guitype='symbox', row=9, col=1, rowspan=1, colspan=2, mode='alignment,breaksym')
	
	parser.add_argument("--postprocess",type=str,default='',help="""A processor to be applied to the FINAL volume after averaging the raw volumes in their FINAL orientations, after all iterations are done.""",guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=16, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
	
	parser.add_argument("--procfinelikecoarse",action='store_true',default=False,help="""If you supply this parameters, particles for fine alignment will be preprocessed identically to particles for coarse alignment by default. If you supply this, but want specific parameters for preprocessing particles for also supply: fine alignment, nd supply fine alignment parameters, such as --lowpassfine, --highpassfine, etc; to preprocess the particles for FINE alignment differently than for COARSE alignment.""")
	
	
	
	parser.add_argument("--falign",type=str,default='',help="""Default=None. This is the second stage aligner used to fine-tune the first alignment.""", returnNone=True, guitype='comboparambox', choicelist='re_filter_list(dump_aligners_list(),\'refine.*3d\')', row=14, col=0, rowspan=1, colspan=3, nosharedb=True, mode='alignment,breaksym[None]')
		
	parser.add_argument("--faligncmp",type=str,default="ccc.tomo.thresh",help="""Default=ccc.tomo.thresh. The comparator used by the second stage aligner.""", guitype='comboparambox', choicelist='re_filter_list(dump_cmps_list(),\'tomo\')', row=15, col=0, rowspan=1, colspan=3,mode="alignment,breaksym")		
		
	
	#parser.add_argument("--nopreprocprefft",action="store_true",default=False,help="""Turns off all preprocessing that happens only once before alignment (--normproc, --mask, --maskfile, --clipali, --threshold; i.e., all preprocessing excepting filters --highpass, --lowpass, --preprocess, and --shrink.""")
	
	#parser.add_argument("--keep",type=float,default=1.0,help="""Default=1.0 (all particles kept). The fraction of particles to keep in each class.""", guitype='floatbox', row=6, col=0, rowspan=1, colspan=1, mode='alignment,breaksym')
	
	#parser.add_argument("--keepsig", action="store_true", default=False,help="""Default=False. Causes the keep argument to be interpreted in standard deviations.""", guitype='boolbox', row=6, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')

	#parser.add_argument("--inixforms",type=str,default="",help="""Default=None. .json file containing a dict of transforms to apply to 'pre-align' the particles.""", guitype='dirbox', dirbasename='spt_|sptsym_', row=7, col=0,rowspan=1, colspan=2, nosharedb=True, mode='breaksym')
	
	parser.add_argument("--breaksym",action="store_true", default=False,help="""Default=False. Break symmetry. Do not apply symmetrization after averaging, even if searching the asymmetric unit provided through --sym only for alignment. Default=False""", guitype='boolbox', row=7, col=2, rowspan=1, colspan=1, nosharedb=True, mode=',breaksym[True]')
	
	#parser.add_argument("--groups",type=int,default=0,help="""Default=0 (not used; data not split). This parameter will split the data into a user defined number of groups. For purposes of gold-standard FSC computation later, select --group=2.""")
		
	parser.add_argument("--randomizewedge",action="store_true",  default=False,help="""Default=False. This parameter is EXPERIMENTAL. It randomizes the position of the particles BEFORE alignment, to minimize missing wedge bias and artifacts during symmetric alignment where only a fraction of space is scanned""")
	
	#parser.add_argument("--savepreproc",action="store_true",  default=False,help="""Default=False. Will save stacks of preprocessed particles (one for coarse alignment and one for fine alignment if preprocessing options are different).""")
	
	parser.add_argument("--autocenter",type=str, default='',help="""Default=None. Autocenters each averaged pair during initial average generation with --btref and --hacref. Will also autocenter the average of all particles after each iteration of iterative refinement. Options are --autocenter=xform.centerofmass (self descriptive), or --autocenter=xform.centeracf, which applies auto-convolution on the average.""")
	
	parser.add_argument("--autocentermask",type=str, default='',help="""Default=None. Masking processor to apply before autocentering. See 'e2help.py processors -v 10' at the command line.""")
	
	parser.add_argument("--autocenterpreprocess",action='store_true', default=False,help="""Default=False. This will apply a highpass filter at a frequency of half the box size times the apix, shrink by 2, and apply a low pass filter at half nyquist frequency to any computed average for autocentering purposes if --autocenter is provided. Default=False.""")
	
	
	
	parser.add_argument("--tweak",action='store_true',default=False,help="""WARNING: BUGGY. This will perform a final alignment with no downsampling [without using --shrink or --shrinkfine] if --shrinkfine > 1.""")


	'''
	BT SPECIFIC PARAMETERS
	'''
	
		
	parser.add_argument("--nseedlimit",type=int,default=0,help="""Maximum number of particles
		to use. For example, if you supply a stack with 150 subtomograms, the program will
		automatically select 128 as the limit to use because it's the largest power of 2 that is
		smaller than 150. But if you provide, say --nseedlimit=100, then the number of particles
		used will be 64, because it's the largest power of 2 that is still smaller than 100.""")
	
	

	(options, args) = parser.parse_args()
	
	options.nopreprocprefft = False
	
	
	if options.shrink < options.shrinkfine:
		options.shrink = options.shrinkfine
		print "\n(e2spt_binarytree)(main) it makes no sense for shrinkfine to be larger than shrink; therefore, shrink will be made to match shrinkfine"
	
	from e2spt_classaverage import checksaneimagesize	
	checksaneimagesize( options, options.input )
	
	'''
	Make the directory where to create the database where the results will be stored
	'''
	from e2spt_classaverage import sptmakepath
	options = sptmakepath(options,'spt_bt')
	
	rootpath = os.getcwd()
	if rootpath not in options.path:
		options.path = rootpath + '/' + options.path
	
	
	if not options.input:
		parser.print_help()
		exit(0)
	elif options.subset:
		subsetStack = options.path + '/subset' + str( options.subset ).zfill( len( str( options.subset))) + '.hdf' 
		print "\nSubset to be written to", subsetStack
		
		subsetcmd = 'e2proc3d.py ' + options.input + ' ' + subsetStack + ' --first=0 --last=' + str(options.subset-1) 
		print "Subset cmd is", subsetcmd
		
		p=subprocess.Popen( subsetcmd, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE )
		text=p.communicate()	
		p.stdout.close()
		
		options.input = subsetStack
		
	from e2spt_classaverage import sptParseAligner
	options = sptParseAligner( options )

	'''
	If --radius of the particle is provided, we calculate the optimal alignment steps for 
	coarse and fine alignment rounds using --shrink and --shrinkfine options and apix info
	'''
	
	if options.radius:
		from e2spt_classaverage import calcAliStep
		options = calcAliStep(options)
	
	'''
	Parse parameters such that "None" or "none" are adequately interpreted to turn of an option
	'''
	
	from e2spt_classaverage import sptOptionsParser
	options = sptOptionsParser( options )
	
	from e2spt_classaverage import writeParameters
	writeParameters(options,'e2spt_binarytree.py', 'bt')
	
					
	hdr = EMData(options.input,0,True)
	nx = hdr["nx"]
	ny = hdr["ny"]
	nz = hdr["nz"]
	if nx!=ny or ny!=nz :
		print "ERROR, input volumes are not cubes"
		sys.exit(1)
		
	logger = E2init(sys.argv, options.ppid)
	
	
	'''
	Initialize parallelism if being used
	'''
	
	if options.parallel :
	
		if options.parallel == 'none' or options.parallel == 'None' or options.parallel == 'NONE':
			options.parallel = ''
			etc = ''
		
		else:
			print "\n\n(e2spt_classaverage.py) INITIALIZING PARALLELISM!"
			print "\n\n"

			from EMAN2PAR import EMTaskCustomer
			etc=EMTaskCustomer(options.parallel)

			pclist=[options.input]

			etc.precache(pclist)
		
	else:
		etc=''
	
	
	options.raw = options.input
	
	"""
	if 'tree' in options.align:
		options.falign = None
		options.mask = None
		options.lowpass = None
		options.highpass = None
		options.normproc = None
		options.lowpassfine = None
		options.highpassfine = None
		options.preprocess = None
		options.preprocessfine = None

	else:
		from e2spt_classaverage import cmdpreproc
		cmpreproc( options.input, options, False )
	"""
	
	nptcl=EMUtil.get_image_count(options.input)
	if nptcl < 1: 
		print "ERROR : at least 2 particles required in input stack"
		sys.exit(1)
	
	ptclnums=range(nptcl)
	nptclForRef = len(ptclnums)
	
	nseed=2**int(floor(log(len(ptclnums),2)))	# we stick with powers of 2 for this to make the tree easier to collapse
	
	if options.nseedlimit:
		nseed=2**int(floor(log( options.nseedlimit , 2)))
		
	#binaryTreeRef(options,nptclForRef,nseed,-1,etc)

	binaryTreeRef(options,nptclForRef,nseed,etc)
		
	print "Will end logger"	
	E2end(logger)
	
	print "logger ended"
	sys.stdout.flush()
	
	return
Exemplo n.º 2
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = """prog [options]

	This program runs different spt programs quickly, in testing mode, such that crashes
	can be identified more easily.
	"""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)

    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        default=0,
        help=
        "verbose level [0-9], higher number means higher level of verboseness")

    parser.add_argument(
        "--testn",
        type=int,
        default=6,
        help=
        """default=6. size of dataset to run tests with; cannot be < 6, since initial model generation with HAC for gold-standard refinement requires at least 3 particles for the even set and 3 for the odd set."""
    )

    parser.add_argument(
        "--path",
        type=str,
        default='spttests',
        help=
        """Default=spttests. Directory to store results in. The default is a numbered series of directories containing the prefix 'spttests'; for example, spttests_02 will be the directory by default if 'spttests_01' already exists."""
    )

    parser.add_argument(
        "--parallel",
        type=str,
        default='',
        help=
        """the program will detect the number of cores available and use threaded parallelism by default. To use only one core, supply --parallel=thread:1. For MPI on clusters, see parallelism at http://blake.bcm.edu/emanwiki/EMAN2/Parallel"""
    )

    #parser.add_argument("--testsim",action='store_true',default=False,help="""default=False. If supplied, this option will test e2spt_simulation.py as well and use the generated simulated particles for subsequent tests, opposed to random volumes that do not have a missing wedge, noise or CTF.""")

    parser.add_argument(
        "--ppid",
        type=int,
        help="Set the PID of the parent process, used for cross platform PPID",
        default=-1)

    (options, args) = parser.parse_args()

    logger = E2init(sys.argv, options.ppid)

    if not options.parallel:
        import multiprocessing
        nparallel = multiprocessing.cpu_count()
        options.parallel = 'thread:' + str(nparallel)
        print("\nfound %d cores" % (nparallel))
        print("setting --parallel to", options.parallel)

    if options.testn < 6:
        print("\nERROR: --testn must be > 5.")
        sys.exit()
    '''
	Make the directory where to create the database where the results will be stored
	'''
    from e2spt_classaverage import sptmakepath
    options = sptmakepath(options, 'spt_bt')

    from e2spt_classaverage import writeParameters
    writeParameters(options, 'e2spt_test.py', 'spttests')

    for i in range(options.testn):
        a = test_image_3d()
        t = Transform()
        if i > 0:
            az = random.randint(0, 360)
            alt = random.randint(0, 180)
            phi = random.randint(0, 360)
            tx = random.randint(-5, 5)
            ty = random.randint(-5, 5)
            tz = random.randint(-5, 5)
            t = Transform({
                'type': 'eman',
                'tx': tx,
                'ty': ty,
                'tz': tz,
                'alt': alt,
                'az': az,
                'phi': phi
            })
            a.transform(t)

        a.process_inplace('math.meanshrink', {'n': 4})
        a['spt_randT'] = t
        a.write_image(options.path + '/testimgs.hdf', -1)

        if i == 0:
            a.write_image(options.path + '/testimg_ref.hdf', 0)

    cmds = []

    rootpath = os.getcwd()

    os.system('touch ' + options.path + '/output.txt')

    #input = rootpath + '/' + options.path + '/testimgs.hdf'
    #if options.testsim:

    simcmd = 'e2spt_simulation.py --input=' + options.path + '/testimg_ref.hdf --nptcls ' + str(
        options.testn
    ) + ' --tiltrange 60 --nslices 25 --saveprjs --applyctf --snr 2' + ' --parallel=' + options.parallel + ' --path testsim'
    if options.verbose:
        simcmd += ' --verbose ' + str(options.verbose)

    cmds.append(simcmd)
    simcmd2 = 'mv testsim ' + options.path
    cmds.append(simcmd2)

    input = rootpath + '/' + options.path + '/testsim/simptcls.hdf'

    btcmd = 'e2spt_binarytree.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' + options.parallel + ' --path testbt'
    cmds.append(btcmd)
    btcmd2 = 'mv testbt ' + rootpath + '/' + options.path + '/'
    cmds.append(btcmd2)

    haccmd = 'e2spt_hac.py  --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' + options.parallel + ' --path testhac'
    cmds.append(haccmd)
    haccmd2 = 'mv testhac ' + rootpath + '/' + options.path + '/'
    cmds.append(haccmd2)

    ssacmd = 'e2symsearch3d.py  --input ' + input + ' --sym icos --steps 2 --parallel=' + options.parallel + ' --path testssa'
    cmds.append(ssacmd)
    ssacmd2 = 'mv testssa ' + rootpath + '/' + options.path + '/'
    cmds.append(ssacmd2)

    sptdefaultcmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --parallel=' + options.parallel + ' --path testsptdefaultgoldoff'
    cmds.append(sptdefaultcmdgoldoff)
    sptdefaultcmdgoldoff2 = 'mv testsptdefaultgoldoff ' + rootpath + '/' + options.path + '/'
    cmds.append(sptdefaultcmdgoldoff2)

    sptrefbtcmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --btref 2 --parallel=' + options.parallel + ' --path testsptrefbtgoldoff'
    cmds.append(sptrefbtcmdgoldoff)
    sptrefbtcmdgoldoff2 = 'mv testsptrefbtgoldoff ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefbtcmdgoldoff2)

    sptrefssacmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --ssaref 2 --parallel=' + options.parallel + ' --path testsptrefssagoldoff'
    cmds.append(sptrefssacmdgoldoff)
    sptrefssacmdgoldoff2 = 'mv testsptrefssagoldoff ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefssacmdgoldoff2)

    sptrefhaccmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --hacref 3 --parallel=' + options.parallel + ' --path testsptrefhacgoldoff'
    cmds.append(sptrefhaccmdgoldoff)
    sptrefhaccmdgoldoff2 = 'mv testsptrefhacgoldoff ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefhaccmdgoldoff2)

    sptdefaultcmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' + options.parallel + ' --path testsptdefaultgoldon'
    cmds.append(sptdefaultcmdgoldon)
    sptdefaultcmdgoldon2 = 'mv testsptdefaultgoldon ' + rootpath + '/' + options.path + '/'
    cmds.append(sptdefaultcmdgoldon2)

    sptrefbtcmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --btref 4 --parallel=' + options.parallel + ' --path testsptrefbtgoldon'
    cmds.append(sptrefbtcmdgoldon)
    sptrefbtcmdgoldon2 = 'mv testsptrefbtgoldon ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefbtcmdgoldon2)

    sptrefssacmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --ssaref 4 --parallel=' + options.parallel + ' --path testsptrefssagoldon'
    cmds.append(sptrefssacmdgoldon)
    sptrefssacmdgoldon2 = 'mv testsptrefssagoldon ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefssacmdgoldon2)

    sptrefhaccmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --hacref 6 --parallel=' + options.parallel + ' --path testsptrefhacgoldon'
    cmds.append(sptrefhaccmdgoldon)
    sptrefhaccmdgoldon2 = 'mv testsptrefhacgoldon ' + rootpath + '/' + options.path + '/'
    cmds.append(sptrefhaccmdgoldon2)

    for cmd in cmds:
        runcmd(options, cmd)

    E2end(logger)
    sys.stdout.flush()

    return
Exemplo n.º 3
0
def main():
    #import pylab
    #import matplotlib.mlab as mlab
    import matplotlib.pyplot as plt

    progname = os.path.basename(sys.argv[0])
    usage = """Produces mean intensity histograms of stack of sub-volumes"""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)

    parser.add_argument(
        "--input",
        type=str,
        default='',
        help=
        """Default=None. Comma-separated stacks of images whose mean intensity distribution you want to plot."""
    )

    parser.add_argument(
        "--subset",
        type=int,
        default=0,
        help=
        """Default=0 (not used). N > 2 number of particles to from each stack provided through --input to consider."""
    )

    parser.add_argument(
        "--path",
        type=str,
        default='',
        help=
        "Directory to store results in. The default is a numbered series of directories containing the prefix 'sptsim'; for example, sptsim_02 will be the directory by default if 'sptsim_01' already exists."
    )

    #parser.add_argument("--output",type=str,default='',help="""Name of output plot if comparing two populations or more.""")

    parser.add_argument(
        "--shrink",
        type=int,
        default=1,
        help=
        "Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount n > 1."
    )

    parser.add_argument(
        "--bins",
        type=int,
        default=0,
        help=
        """Default=0 (not used). Number of bins for histogram. If not provided, the optimal bin number will be automatically calculated based on bin-width, computed using Scott's normal reference rule, width = (3.5*std)/cuberoot(n), where 'std' is the standard deviation of the mean intensity distribution of population and n is the number of mean intensity values considered (this is affected by --removesigma). Then, bins will be nbins = (max(intensities) - min(intensities)) / width."""
    )

    #parser.add_argument("--sym", type=str, default='c1', help = "Symmetry to enforce before computing mean intensity in the box. Note that this should only be used if the particles are properly aligned to the symmetry axis.")

    parser.add_argument(
        "--mask",
        type=str,
        default="mask.sharp:outer_radius=-2",
        help=
        "Default=mask.sharp:outer_radius=-2. Mask processor applied to the particles before alignment. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."
    )

    parser.add_argument(
        "--maskfile",
        type=str,
        default='',
        help=
        """Default=None. An image file containing an additional mask to apply besides --mask."""
    )

    parser.add_argument(
        "--clip",
        type=int,
        default=0,
        help=
        """Default=0 (not used). Boxsize to clip particles to before computing mean and standard deviation values for each image. (This can act as a mask, as you'd want to clip the boxes to a smaller size than their current, original size, excluding neighboring particles and background pixels/voxels)."""
    )

    parser.add_argument(
        "--preprocess",
        type=str,
        default='',
        help=
        """Any processor to be applied to each image before computing mean and standard deviation values. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."""
    )

    parser.add_argument(
        "--lowpass",
        type=str,
        default='',
        help=
        """Default=None. A lowpass filtering processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."""
    )

    parser.add_argument(
        "--highpass",
        type=str,
        default='',
        help=
        """Default=None. A highpass filtering processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."""
    )

    parser.add_argument(
        "--threshold",
        type=str,
        default='',
        help=
        """A thresholding processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."""
    )

    parser.add_argument(
        "--normproc",
        type=str,
        default="normalize.edgemean",
        help=
        """Default=normalize.edgemean. Normalization processor applied to particles before computing mean and standard deviation values for each iamge. If normalize.mask is used, --mask will be passed in automatically. If you want to turn normalization off specify \'None\'. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py)."""
    )

    parser.add_argument(
        "--savepreprocessed",
        action="store_true",
        default=False,
        help=
        """Default=False. If provided, this option will save the image stacks in --input after all preprocessing options (lowpass, highpass, preprocess, masking, etc.) have been applied."""
    )

    parser.add_argument(
        "--normalizeplot",
        action="store_true",
        default=False,
        help=
        """Default=False. This will normalize the intensity values of the distribution to be between 0 and 1"""
    )

    parser.add_argument(
        "--removesigma",
        type=int,
        default=0,
        help=
        """Default=0. Provide a value for the number of standard deviations away from the mean to consider values to exclude. For example, if --removesigma=3, values further than 3 standard deviations away from the mean will be excluded."""
    )

    parser.add_argument(
        "--ppid",
        type=int,
        help=
        "Default=1. Set the PID of the parent process, used for cross platform PPID",
        default=-1)

    parser.add_argument(
        "--verbose",
        "-v",
        type=int,
        default=0,
        help=
        "Default 0. Verbose level [0-9], higner number means higher level of verboseness",
        dest="verbose",
        action="store",
        metavar="n")

    (options, args) = parser.parse_args()

    logger = E2init(sys.argv, options.ppid)
    '''
	if options.mask: 
		options.mask=parsemodopt(options.mask)
	
	if options.preprocess: 
		options.preprocess=parsemodopt(options.preprocess)
		
	if options.lowpass: 
		options.lowpass=parsemodopt(options.lowpass)
	
	if options.highpass: 
		options.highpass=parsemodopt(options.highpass)
	
	if options.threshold: 
		options.threshold=parsemodopt(options.threshold)
		
	if options.normproc: 
		options.normproc=parsemodopt(options.normproc)
	'''

    from e2spt_classaverage import sptOptionsParser
    options = sptOptionsParser(options)

    datafiles = options.input.split(',')

    from e2spt_classaverage import sptmakepath
    options = sptmakepath(options, 'meanintensityplots')

    intensitiesSeveral = []
    iwzSeveral = []
    iminsSeveral = []
    imaxsSeveral = []
    istdsSeveral = []

    means = []
    stds = []

    from e2spt_classaverage import writeParameters
    cmdwp = writeParameters(options, 'e2spt_meanintensityplot.py',
                            'sptmeanintensity')

    for datafile in datafiles:
        n = EMUtil.get_image_count(datafile)

        if options.subset:
            if options.subset < 3:
                print "ERROR:Subset must be > 2."
                sys.exit(1)

            n = options.subset

        if n < 3:
            print "ERROR: All stacks must have at least 3 particles in them. This one doesn't:", datafile
            sys.exit(1)

    for datafile in datafiles:
        ret = calcintensities(options, datafile)

        intensitiesSingle = ret[0]
        iwz = ret[1]
        imins = ret[2]
        imaxs = ret[3]
        istds = ret[4]

        intensitiesSeveral.append([datafile, list(intensitiesSingle)])

        iwzSeveral.append([datafile, list(iwz)])
        iminsSeveral.append([datafile, list(imins)])
        imaxsSeveral.append([datafile, list(imaxs)])
        istdsSeveral.append([datafile, list(istds)])

        intensitiesSingleNorm = intensitiesSingle

        if options.normalizeplot:
            intensitiesSingleNorm = normintensities(intensitiesSingle, 0, 0)

        #print "\]n\\n\nIntensities before plotting are", intensitiesSingleNorm
        #print "\n\n\n\n\n"

        ret = plotintensities(intensitiesSingleNorm, options, datafile)
        mean = ret[0]
        std = ret[1]
        means.append(mean)
        stds.append(std)

        ret = plotintensities(iwz, options, datafile, 'wz')
        ret = plotintensities(imins, options, datafile, 'mins')
        ret = plotintensities(imaxs, options, datafile, 'maxs')
        ret = plotintensities(istds, options, datafile, 'stds')

    #print "\nIntensities several len is", len( intensitiesSeveral )
    if len(intensitiesSeveral) > 1:

        datafile1 = intensitiesSeveral[0][0]
        datafile2 = intensitiesSeveral[1][0]

        intensities1 = intensitiesSeveral[0][1]
        intensities2 = intensitiesSeveral[1][1]
        n1 = len(intensities1)
        n2 = len(intensities2)

        zscore = (means[0] - means[1]) / np.sqrt((stds[0] * stds[0]) / n1 +
                                                 (stds[1] * stds[1]) / n2)

        g = open(options.path + '/MIboth_INFO.txt', 'w')
        zscoreline = 'zscore=' + str(
            zscore) + ' for ' + datafile1 + ' vs ' + datafile2 + ' \n'
        lines = [zscoreline]
        g.writelines(lines)
        g.close()

        print "\nzzzzzzz\n%s" % (zscoreline)

        absmax = absmin = 0
        if options.normalizeplot:

            minses = []
            maxes = []
            for intenS in intensitiesSeveral:
                minS = float(min(intenS[1]))
                maxS = float(max(intenS[1]))

                minses.append(minS)
                maxes.append(maxS)

            absmin = min(minses)
            absmax = max(maxes) - absmin

        for intensities in intensitiesSeveral:
            print "Type and len of intensities is", type(intensities[1]), len(
                intensities[1])

            intensitiesNorm = intensities[1]
            if options.normalizeplot:
                print "Normalizeplot on"
                intensitiesNorm = normintensities(intensities[1], absmin,
                                                  absmax)

            plotintensities(intensitiesNorm, options, datafile, 'no')

        plt.savefig(options.path + '/MIbothPlot.png')
        plt.clf()

    E2end(logger)
Exemplo n.º 4
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = """prog <output> [options]
	Program to build an initial subtomogram average by averaging pairs from the largest subset
	in --input that is a power of 2. For example, if you supply an input stack with 100 subtomograms,
	this program will build an initial reference using 64, since 64 is the largest power of 2 contained in 100.
	In the first iteration, particle 1 will be averaged with 2, 3 with 4, 5 with 6... etc.
	32 new averages (each an average of 2 subtomograms) will be used for the second iteration.
	Again, 1 will be averaged with 2, 3 with 4, etc... yielding 16 new averages.
	The algorithm continues until the entire subset (64) has been merged into 1 average.
	
	This program depends on e2spt_classaverage.py because it imports the preprocessing 
	and alignment functions from it.
	
	--mask=mask.sharp:outer_radius=<safe radius>
	--preprocess=filter.lowpass.gauss:cutoff_freq=<1/resolution in A>
	"""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)

    parser.add_header(name="sptbtheader",
                      help="""Options below this label are specific to 
		sptbinarytree""",
                      title="### sptbinarytree options ###",
                      row=6,
                      col=0,
                      rowspan=1,
                      colspan=3,
                      mode="align")

    parser.add_header(
        name="caheader",
        help="""Options below this label are specific to sptclassaverage""",
        title="### sptclassaverage options ###",
        row=3,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--path",
        type=str,
        default='spt',
        help=
        """Default=spt. Directory to store results in. The default is a numbered series of directories containing the prefix 'spt'; for example, spt_02 will be the directory by default if 'spt_01' already exists."""
    )

    parser.add_argument(
        "--input",
        type=str,
        default='',
        help=
        """Default=None. The name of the input volume stack. MUST be HDF since volume stack support is required.""",
        guitype='filebox',
        browser='EMSubTomosTable(withmodal=True,multiselect=False)',
        row=0,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--output",
        type=str,
        default='avg.hdf',
        help=
        """Default=avg.hdf. The name of the output class-average stack. MUST be HDF since volume stack support is required.""",
        guitype='strbox',
        row=2,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    #parser.add_argument("--classmx", type=str, default='', help="""Default=None. The name of the classification matrix specifying how particles in 'input' should be grouped. If omitted, all particles will be averaged.""")

    #parser.add_argument("--ref", type=str, default='', help="""Default=None. Reference image(s). Used as an initial alignment reference and for final orientation adjustment if present. This is typically the projections that were used for classification.""", guitype='filebox', browser='EMBrowserWidget(withmodal=True,multiselect=True)', filecheck=False, row=1, col=0, rowspan=1, colspan=3, mode='alignment')

    #parser.add_argument("--refpreprocess",action="store_true",default=False,help="""Default=False. This will preprocess the reference identically to the particles. It is off by default, but it is internally turned on when no reference is supplied.""")

    #parser.add_argument("--resultmx",type=str,default=None,help="""Default=Npone. Specify an output image to store the result matrix. This is in the same format as the classification matrix. http://blake.bcm.edu/emanwiki/EMAN2/ClassmxFiles""")

    #parser.add_argument("--refinemultireftag", type=str, default='', help="""Default=''. DO NOT USE THIS PARAMETER. It is passed on from e2spt_refinemulti.py if needed.""")

    parser.add_argument(
        "--radius",
        type=float,
        default=0,
        help=
        """Default=0 (which means it's not used by default). Hydrodynamic radius of the particle in Angstroms. This will be used to automatically calculate the angular steps to use in search of the best alignment. Make sure the apix is correct on the particles' headers, sine the radius will be converted from Angstroms to pixels. Then, the fine angular step is equal to 360/(2*pi*radius), and the coarse angular step 4 times that."""
    )

    parser.add_argument(
        "--precision",
        type=float,
        default=1.0,
        help=
        """Default=1.0. Precision in pixels to use when figuring out alignment parameters automatically using --radius. Precision would be the number of pixels that the the edge of the specimen is moved (rotationally) during the finest sampling, --falign. If precision is 1, then the precision of alignment will be that of the sampling (apix of your images) times the --shrinkfine factor specified."""
    )

    parser.add_argument(
        "--search",
        type=int,
        default=8,
        help=
        """"Default=8. During COARSE alignment translational search in X, Y and Z, in pixels. Default=8. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error."""
    )

    parser.add_argument(
        "--searchfine",
        type=int,
        default=2,
        help=
        """"Default=2. During FINE alignment translational search in X, Y and Z, in pixels. Default=2. This WILL overwrite any search: provided through --falign, EXCEPT if you provide --searchfine=2, which is the default. In general, just avoid providing search twice (through here and through the fine aligner --falign). If you do, just be careful to make them consistent to minimize misinterpretation and error."""
    )

    #parser.add_argument("--donotaverage",action="store_true", help="""If e2spt_refinemulti.py is calling e2spt_classaverage.py, the latter need not average any particles, but rather only yield the alignment results.""", default=False)

    parser.add_argument(
        "--iterstop",
        type=int,
        default=0,
        help=
        """Default=0. (Not used). The program is called to convergence by default (all particles merge into one final average). To stop at an intermediate iteration, provide this parameter. For example, --iterstop=1, will only allow the algorithm to complete 1 iteration; --iterstop=2 will allow it to go through 2, etc."""
    )

    parser.add_argument(
        "--savesteps",
        action="store_true",
        default=False,
        help=
        """Default=False. If set, will save the average after each iteration to class_#.hdf. Each class in a separate file. Appends to existing files.""",
        guitype='boolbox',
        row=4,
        col=0,
        rowspan=1,
        colspan=1,
        mode='alignment,breaksym')

    parser.add_argument(
        "--saveali",
        action="store_true",
        default=False,
        help=
        """Default=False. If set, will save the aligned particle volumes in class_ptcl.hdf. Overwrites existing file.""",
        guitype='boolbox',
        row=4,
        col=1,
        rowspan=1,
        colspan=1,
        mode='alignment,breaksym')

    parser.add_argument(
        "--saveallalign",
        action="store_true",
        default=False,
        help=
        """Default=False. If set, will save the alignment parameters after each iteration""",
        guitype='boolbox',
        row=4,
        col=2,
        rowspan=1,
        colspan=1,
        mode='alignment,breaksym')

    parser.add_argument(
        "--sym",
        dest="sym",
        default='',
        help=
        """Default=None (equivalent to c1). Symmetry to impose -choices are: c<n>, d<n>, h<n>, tet, oct, icos""",
        guitype='symbox',
        row=9,
        col=1,
        rowspan=1,
        colspan=2,
        mode='alignment,breaksym')

    parser.add_argument(
        "--mask",
        type=str,
        default="mask.sharp:outer_radius=-2",
        help=
        """Default is mask.sharp:outer_radius=-2. Masking processor applied to particles before alignment. IF using --clipali, make sure to express outer mask radii as negative pixels from the edge.""",
        returnNone=True,
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'mask\')',
        row=11,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--maskfile",
        type=str,
        default='',
        help=
        """Default=None. Mask file (3D IMAGE) applied to particles before alignment. Must be in HDF format. Default is None."""
    )

    parser.add_argument(
        "--normproc",
        type=str,
        default='normalize.edgemean',
        help=
        """Default is 'normalize.edgemean' (see 'e2help.py processors -v 10' at the command line). Normalization processor applied to particles before alignment. If normalize.mask is used, results of the mask option will be passed in automatically. If you want to turn this option off specify \'None\'"""
    )

    parser.add_argument(
        "--threshold",
        type=str,
        default='',
        help=
        """Default=None. A threshold applied to the subvolumes after normalization. For example, --threshold=threshold.belowtozero:minval=0 makes all negative pixels equal 0, so that they do not contribute to the correlation score.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'filter\')',
        row=10,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--preprocess",
        type=str,
        default='',
        help=
        """Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'filter\')',
        row=10,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--preprocessfine",
        type=str,
        default='',
        help=
        """Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging."""
    )

    parser.add_argument(
        "--lowpass",
        type=str,
        default='',
        help=
        """Default=None. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'filter\')',
        row=17,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--lowpassfine",
        type=str,
        default='',
        help=
        """Default=None. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging."""
    )

    parser.add_argument(
        "--highpass",
        type=str,
        default='',
        help=
        """Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'filter\')',
        row=18,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--highpassfine",
        type=str,
        default='',
        help=
        """Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging."""
    )

    parser.add_argument(
        "--shrink",
        type=int,
        default=1,
        help=
        """Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for coarse alignment.""",
        guitype='shrinkbox',
        row=5,
        col=1,
        rowspan=1,
        colspan=1,
        mode='alignment,breaksym')

    parser.add_argument(
        "--shrinkfine",
        type=int,
        default=1,
        help=
        """Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for refine alignment.""",
        guitype='intbox',
        row=5,
        col=2,
        rowspan=1,
        colspan=1,
        mode='alignment')

    parser.add_argument(
        "--clipali",
        type=int,
        default=0,
        help=
        """Default=0 (which means it's not used). Boxsize to clip particles as part of preprocessing to speed up alignment. For example, the boxsize of the particles might be 100 pixels, but the particles are only 50 pixels in diameter. Aliasing effects are not always as deleterious for all specimens, and sometimes 2x padding isn't necessary; still, there are some benefits from 'oversampling' the data during averaging; so you might still want an average of size 2x, but perhaps particles in a box of 1.5x are sufficiently good for alignment. In this case, you would supply --clipali=75"""
    )

    parser.add_argument(
        "--postprocess",
        type=str,
        default='',
        help=
        """A processor to be applied to the FINAL volume after averaging the raw volumes in their FINAL orientations, after all iterations are done.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_processors_list(),\'filter\')',
        row=16,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--procfinelikecoarse",
        action='store_true',
        default=False,
        help=
        """If you supply this parameters, particles for fine alignment will be preprocessed identically to particles for coarse alignment by default. If you supply this, but want specific parameters for preprocessing particles for also supply: fine alignment, nd supply fine alignment parameters, such as --lowpassfine, --highpassfine, etc; to preprocess the particles for FINE alignment differently than for COARSE alignment."""
    )

    parser.add_argument(
        "--npeakstorefine",
        type=int,
        help=
        """Default=1. The number of best coarse alignments to refine in search of the best final alignment. Default=1.""",
        default=4,
        guitype='intbox',
        row=9,
        col=0,
        rowspan=1,
        colspan=1,
        nosharedb=True,
        mode='alignment,breaksym[1]')

    parser.add_argument(
        "--align",
        type=str,
        default="rotate_translate_3d:search=8:delta=12:dphi=12",
        help=
        """This is the aligner used to align particles to the previous class average. Default is rotate_translate_3d:search=8:delta=12:dphi=12, specify 'None' (with capital N) to disable.""",
        returnNone=True,
        guitype='comboparambox',
        choicelist='re_filter_list(dump_aligners_list(),\'3d\')',
        row=12,
        col=0,
        rowspan=1,
        colspan=3,
        nosharedb=True,
        mode="alignment,breaksym['rotate_symmetry_3d']")

    parser.add_argument(
        "--aligncmp",
        type=str,
        default="ccc.tomo",
        help=
        """Default=ccc.tomo. The comparator used for the --align aligner. Do not specify unless you need to use anotherspecific aligner.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_cmps_list(),\'tomo\')',
        row=13,
        col=0,
        rowspan=1,
        colspan=3,
        mode="alignment,breaksym")

    parser.add_argument(
        "--falign",
        type=str,
        default="refine_3d_grid:delta=3:range=15:search=2",
        help=
        """Default="refine_3d_grid:delta=3:range=15:search=2". This is the second stage aligner used to fine-tune the first alignment. Specify 'None' to disable.""",
        returnNone=True,
        guitype='comboparambox',
        choicelist='re_filter_list(dump_aligners_list(),\'refine.*3d\')',
        row=14,
        col=0,
        rowspan=1,
        colspan=3,
        nosharedb=True,
        mode='alignment,breaksym[None]')

    parser.add_argument(
        "--faligncmp",
        type=str,
        default="ccc.tomo",
        help=
        """Default=ccc.tomo. The comparator used by the second stage aligner.""",
        guitype='comboparambox',
        choicelist='re_filter_list(dump_cmps_list(),\'tomo\')',
        row=15,
        col=0,
        rowspan=1,
        colspan=3,
        mode="alignment,breaksym")

    parser.add_argument(
        "--averager",
        type=str,
        default="mean.tomo",
        help=
        """Default=mean.tomo. The type of averager used to produce the class average. Default=mean.tomo."""
    )

    #parser.add_argument("--keep",type=float,default=1.0,help="""Default=1.0 (all particles kept). The fraction of particles to keep in each class.""", guitype='floatbox', row=6, col=0, rowspan=1, colspan=1, mode='alignment,breaksym')

    #parser.add_argument("--keepsig", action="store_true", default=False,help="""Default=False. Causes the keep argument to be interpreted in standard deviations.""", guitype='boolbox', row=6, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')

    #parser.add_argument("--inixforms",type=str,default="",help="""Default=None. .json file containing a dict of transforms to apply to 'pre-align' the particles.""", guitype='dirbox', dirbasename='spt_|sptsym_', row=7, col=0,rowspan=1, colspan=2, nosharedb=True, mode='breaksym')

    parser.add_argument(
        "--breaksym",
        action="store_true",
        default=False,
        help=
        """Default=False. Break symmetry. Do not apply symmetrization after averaging, even if searching the asymmetric unit provided through --sym only for alignment. Default=False""",
        guitype='boolbox',
        row=7,
        col=2,
        rowspan=1,
        colspan=1,
        nosharedb=True,
        mode=',breaksym[True]')

    #parser.add_argument("--groups",type=int,default=0,help="""Default=0 (not used; data not split). This parameter will split the data into a user defined number of groups. For purposes of gold-standard FSC computation later, select --group=2.""")

    parser.add_argument(
        "--randomizewedge",
        action="store_true",
        default=False,
        help=
        """Default=False. This parameter is EXPERIMENTAL. It randomizes the position of the particles BEFORE alignment, to minimize missing wedge bias and artifacts during symmetric alignment where only a fraction of space is scanned"""
    )

    parser.add_argument(
        "--savepreprocessed",
        action="store_true",
        default=False,
        help=
        """Default=False. Will save stacks of preprocessed particles (one for coarse alignment and one for fine alignment if preprocessing options are different)."""
    )

    parser.add_argument(
        "--autocenter",
        type=str,
        default='',
        help=
        """Default=None. Autocenters each averaged pair during initial average generation with --btref and --hacref. Will also autocenter the average of all particles after each iteration of iterative refinement. Options are --autocenter=xform.centerofmass (self descriptive), or --autocenter=xform.centeracf, which applies auto-convolution on the average."""
    )

    parser.add_argument(
        "--autocentermask",
        type=str,
        default='',
        help=
        """Default=None. Masking processor to apply before autocentering. See 'e2help.py processors -v 10' at the command line."""
    )

    parser.add_argument(
        "--autocenterpreprocess",
        action='store_true',
        default=False,
        help=
        """Default=False. This will apply a highpass filter at a frequency of half the box size times the apix, shrink by 2, and apply a low pass filter at half nyquist frequency to any computed average for autocentering purposes if --autocenter is provided. Default=False."""
    )

    parser.add_argument(
        "--parallel",
        default="thread:1",
        help=
        """default=thread:1. Parallelism. See http://blake.bcm.edu/emanwiki/EMAN2/Parallel""",
        guitype='strbox',
        row=19,
        col=0,
        rowspan=1,
        colspan=3,
        mode='alignment,breaksym')

    parser.add_argument(
        "--ppid",
        type=int,
        help=
        """Default=-1. Set the PID of the parent process, used for cross platform PPID""",
        default=-1)

    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        default=0,
        help=
        """Default=0. Verbose level [0-9], higner number means higher level of verboseness"""
    )

    #parser.add_argument("--resume",type=str,default='',help="""(Not working currently). tomo_fxorms.json file that contains alignment information for the particles in the set. If the information is incomplete (i.e., there are less elements in the file than particles in the stack), on the first iteration the program will complete the file by working ONLY on particle indexes that are missing. For subsequent iterations, all the particles will be used.""")

    parser.add_argument(
        "--plots",
        action='store_true',
        default=False,
        help=
        """Default=False. Turn this option on to generatea plot of the ccc scores during each iteration. Running on a cluster or via ssh remotely might not support plotting."""
    )

    parser.add_argument(
        "--subset",
        type=int,
        default=0,
        help=
        """Default=0 (not used). Refine only this substet of particles from the stack provided through --input"""
    )

    parser.add_argument(
        "--notmatchimgs",
        action='store_true',
        default=False,
        help=
        """Default=True. This option prevents applying filter.match.to to one image so that it matches the other's spectral profile during preprocessing for alignment purposes."""
    )

    parser.add_argument(
        "--preavgproc1",
        type=str,
        default='',
        help=
        """Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)"""
    )

    parser.add_argument(
        "--preavgproc2",
        type=str,
        default='',
        help=
        """Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)"""
    )

    parser.add_argument(
        "--weighbytiltaxis",
        type=str,
        default='',
        help=
        """Default=None. A,B, where A is an integer number and B a decimal. A represents the location of the tilt axis in the tomogram in pixels (eg.g, for a 4096x4096xZ tomogram, this value should be 2048), and B is the weight of the particles furthest from the tomogram. For example, --weighbytiltaxis=2048,0.5 means that praticles at the tilt axis (with an x coordinate of 2048) will have a weight of 1.0 during averaging, while the distance in the x coordinates of particles not-on the tilt axis will be used to weigh their contribution to the average, with particles at the edge(0+radius or 4096-radius) weighing 0.5, as specified by the value provided for B."""
    )

    parser.add_argument(
        "--weighbyscore",
        action='store_true',
        default=False,
        help=
        """Default=False. This option will weigh the contribution of each subtomogram to the average by score/bestscore."""
    )

    parser.add_argument(
        "--tweak",
        action='store_true',
        default=False,
        help=
        """WARNING: BUGGY. This will perform a final alignment with no downsampling [without using --shrink or --shrinkfine] if --shrinkfine > 1."""
    )
    '''
	BT SPECIFIC PARAMETERS
	'''

    parser.add_argument("--nseedlimit",
                        type=int,
                        default=0,
                        help="""Maximum number of particles
		to use. For example, if you supply a stack with 150 subtomograms, the program will
		automatically select 128 as the limit to use because it's the largest power of 2 that is
		smaller than 150. But if you provide, say --nseedlimit=100, then the number of particles
		used will be 64, because it's the largest power of 2 that is still smaller than 100."""
                        )

    (options, args) = parser.parse_args()
    '''
	Make the directory where to create the database where the results will be stored
	'''
    from e2spt_classaverage import sptmakepath
    options = sptmakepath(options, 'spt_bt')

    rootpath = os.getcwd()
    if rootpath not in options.path:
        options.path = rootpath + '/' + options.path

    if not options.input:
        parser.print_help()
        exit(0)
    elif options.subset:
        subsetStack = options.path + '/subset' + str(options.subset).zfill(
            len(str(options.subset))) + '.hdf'
        print "\nSubset to be written to", subsetStack

        subsetcmd = 'e2proc3d.py ' + options.input + ' ' + subsetStack + ' --first=0 --last=' + str(
            options.subset - 1)
        print "Subset cmd is", subsetcmd

        p = subprocess.Popen(subsetcmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        text = p.communicate()
        p.stdout.close()

        options.input = subsetStack

    from e2spt_classaverage import sptParseAligner
    options = sptParseAligner(options)
    '''
	If --radius of the particle is provided, we calculate the optimal alignment steps for 
	coarse and fine alignment rounds using --shrink and --shrinkfine options and apix info
	'''

    if options.shrink < options.shrinkfine:
        options.shrink = options.shrinkfine
        print "It makes no sense for shrinkfine to be larger than shrink; therefore, shrink will be made to match shrinkfine"

    if options.radius:
        from e2spt_classaverage import calcAliStep
        options = calcAliStep(options)
    '''
	Parse parameters such that "None" or "none" are adequately interpreted to turn of an option
	'''

    from e2spt_classaverage import sptOptionsParser
    options = sptOptionsParser(options)

    from e2spt_classaverage import writeParameters
    writeParameters(options, 'e2spt_binarytree.py', 'bt')

    hdr = EMData(options.input, 0, True)
    nx = hdr["nx"]
    ny = hdr["ny"]
    nz = hdr["nz"]
    if nx != ny or ny != nz:
        print "ERROR, input volumes are not cubes"
        sys.exit(1)

    logger = E2init(sys.argv, options.ppid)
    '''
	Initialize parallelism if being used
	'''

    if options.parallel:

        if options.parallel == 'none' or options.parallel == 'None' or options.parallel == 'NONE':
            options.parallel = ''
            etc = ''

        else:
            print "\n\n(e2spt_classaverage.py) INITIALIZING PARALLELISM!"
            print "\n\n"

            from EMAN2PAR import EMTaskCustomer
            etc = EMTaskCustomer(options.parallel)

            pclist = [options.input]

            etc.precache(pclist)

    else:
        etc = ''

    nptcl = EMUtil.get_image_count(options.input)
    if nptcl < 1:
        print "ERROR : at least 2 particles required in input stack"
        sys.exit(1)

    ptclnums = range(nptcl)
    nptclForRef = len(ptclnums)

    nseed = 2**int(
        floor(log(len(ptclnums), 2))
    )  # we stick with powers of 2 for this to make the tree easier to collapse

    if options.nseedlimit:
        nseed = 2**int(floor(log(options.nseedlimit, 2)))

    binaryTreeRef(options, nptclForRef, nseed, -1, etc)

    print "Will end logger"
    E2end(logger)

    print "logger ended"
    sys.stdout.flush()

    return
Exemplo n.º 5
0
def main():
	#import pylab
	#import matplotlib.mlab as mlab
	import matplotlib.pyplot as plt

	progname = os.path.basename(sys.argv[0])
	usage = """Produces mean intensity histograms of stack of sub-volumes"""
			
	parser = EMArgumentParser(usage=usage,version=EMANVERSION)
	
	parser.add_argument("--input",type=str,default='',help="""Default=None. Comma-separated stacks of images whose mean intensity distribution you want to plot.""")

	parser.add_argument("--subset",type=int,default=0,help="""Default=0 (not used). N > 2 number of particles to from each stack provided through --input to consider.""")

	parser.add_argument("--path",type=str,default='',help="Directory to store results in. The default is a numbered series of directories containing the prefix 'sptsim'; for example, sptsim_02 will be the directory by default if 'sptsim_01' already exists.")

	#parser.add_argument("--output",type=str,default='',help="""Name of output plot if comparing two populations or more.""")
	
	parser.add_argument("--shrink", type=int,default=1,help="Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount n > 1.")
	
	parser.add_argument("--bins", type=int,default=0,help="""Default=0 (not used). Number of bins for histogram. If not provided, the optimal bin number will be automatically calculated based on bin-width, computed using Scott's normal reference rule, width = (3.5*std)/cuberoot(n), where 'std' is the standard deviation of the mean intensity distribution of population and n is the number of mean intensity values considered (this is affected by --removesigma). Then, bins will be nbins = (max(intensities) - min(intensities)) / width.""")
	
	#parser.add_argument("--sym", type=str, default='c1', help = "Symmetry to enforce before computing mean intensity in the box. Note that this should only be used if the particles are properly aligned to the symmetry axis.")
	
	parser.add_argument("--mask",type=str,default="mask.sharp:outer_radius=-2",help="Default=mask.sharp:outer_radius=-2. Mask processor applied to the particles before alignment. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).")
	
	parser.add_argument("--maskfile",type=str,default='',help="""Default=None. An image file containing an additional mask to apply besides --mask.""")
		
	parser.add_argument("--clip",type=int,default=0,help="""Default=0 (not used). Boxsize to clip particles to before computing mean and standard deviation values for each image. (This can act as a mask, as you'd want to clip the boxes to a smaller size than their current, original size, excluding neighboring particles and background pixels/voxels).""")	
	
	parser.add_argument("--preprocess",type=str,default='',help="""Any processor to be applied to each image before computing mean and standard deviation values. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).""")
	
	parser.add_argument("--lowpass",type=str,default='',help="""Default=None. A lowpass filtering processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).""")
	
	parser.add_argument("--highpass",type=str,default='',help="""Default=None. A highpass filtering processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).""")

	parser.add_argument("--threshold",type=str,default='',help="""A thresholding processor to be applied before computing mean and standard deviation values for each image. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).""")
		
	parser.add_argument("--normproc",type=str,default="normalize.edgemean",help="""Default=normalize.edgemean. Normalization processor applied to particles before computing mean and standard deviation values for each iamge. If normalize.mask is used, --mask will be passed in automatically. If you want to turn normalization off specify \'None\'. (See 'e2help.py processors' at the command line for a list of processors that can be applied through e2proc3d.py).""")

	parser.add_argument("--savepreprocessed",action="store_true",default=False,help="""Default=False. If provided, this option will save the image stacks in --input after all preprocessing options (lowpass, highpass, preprocess, masking, etc.) have been applied.""")
		
	parser.add_argument("--normalizeplot",action="store_true",default=False,help="""Default=False. This will normalize the intensity values of the distribution to be between 0 and 1""")

	parser.add_argument("--removesigma",type=int,default=0,help="""Default=0. Provide a value for the number of standard deviations away from the mean to consider values to exclude. For example, if --removesigma=3, values further than 3 standard deviations away from the mean will be excluded.""")
	
	parser.add_argument("--ppid", type=int, help="Default=1. Set the PID of the parent process, used for cross platform PPID",default=-1)

	parser.add_argument("--verbose", "-v", type=int, default=0, help="Default 0. Verbose level [0-9], higner number means higher level of verboseness",dest="verbose", action="store", metavar="n")

	(options, args) = parser.parse_args()
	
	logger = E2init(sys.argv, options.ppid)
	
	'''
	if options.mask: 
		options.mask=parsemodopt(options.mask)
	
	if options.preprocess: 
		options.preprocess=parsemodopt(options.preprocess)
		
	if options.lowpass: 
		options.lowpass=parsemodopt(options.lowpass)
	
	if options.highpass: 
		options.highpass=parsemodopt(options.highpass)
	
	if options.threshold: 
		options.threshold=parsemodopt(options.threshold)
		
	if options.normproc: 
		options.normproc=parsemodopt(options.normproc)
	'''
	
	from e2spt_classaverage import sptOptionsParser
	options = sptOptionsParser( options )
	
	datafiles = options.input.split(',')
	
	
	from e2spt_classaverage import sptmakepath
	options = sptmakepath( options, 'meanintensityplots')
	
	
	intensitiesSeveral = []
	iwzSeveral = []
	iminsSeveral = []
	imaxsSeveral = []
	istdsSeveral = []
	
	means = []
	stds = []
	
	from e2spt_classaverage import writeParameters
	cmdwp = writeParameters(options,'e2spt_meanintensityplot.py', 'sptmeanintensity')
	
	for datafile in datafiles:
		n = EMUtil.get_image_count(datafile)
		
		if options.subset:
			if options.subset < 3:
				print "ERROR:Subset must be > 2."
				sys.exit(1)
			
			n = options.subset
			
		if n < 3:
			print "ERROR: All stacks must have at least 3 particles in them. This one doesn't:", datafile
			sys.exit(1)
	
	for datafile in datafiles:
		ret = calcintensities( options, datafile )
		
		intensitiesSingle = ret[0]
		iwz = ret[1]
		imins = ret[2]
		imaxs = ret[3]
		istds = ret[4]
		
		intensitiesSeveral.append( [ datafile, list( intensitiesSingle ) ] )
		
		iwzSeveral.append( [ datafile, list( iwz ) ] )
		iminsSeveral.append( [ datafile, list( imins ) ] ) 
		imaxsSeveral.append( [ datafile, list( imaxs ) ] ) 
		istdsSeveral.append( [ datafile, list( istds ) ] ) 
				
		
		intensitiesSingleNorm = intensitiesSingle
		
		if options.normalizeplot:
			intensitiesSingleNorm = normintensities( intensitiesSingle, 0, 0 )
		
		#print "\]n\\n\nIntensities before plotting are", intensitiesSingleNorm
		#print "\n\n\n\n\n"
		
		ret = plotintensities( intensitiesSingleNorm, options, datafile )
		mean = ret[0]
		std = ret[1]
		means.append(mean)
		stds.append(std)
		
		
		ret = plotintensities( iwz, options, datafile,'wz' )
		ret = plotintensities( imins, options, datafile,'mins' )
		ret = plotintensities( imaxs, options, datafile,'maxs' )
		ret = plotintensities( istds, options, datafile,'stds' )
		
	#print "\nIntensities several len is", len( intensitiesSeveral )
	if len( intensitiesSeveral ) > 1:
		
		datafile1 = intensitiesSeveral[0][0]
		datafile2 = intensitiesSeveral[1][0]
		
		intensities1 = intensitiesSeveral[0][1]
		intensities2 = intensitiesSeveral[1][1]
		n1 = len( intensities1 )
		n2 = len( intensities2 )
		
		zscore = ( means[0]-means[1] )/ np.sqrt( (stds[0]*stds[0])/n1 + (stds[1]*stds[1])/n2 )
		
		g = open(options.path + '/MIboth_INFO.txt','w')
		zscoreline = 'zscore=' + str(zscore)+' for ' + datafile1 + ' vs ' + datafile2 + ' \n'
		lines=[ zscoreline ]
		g.writelines(lines)
		g.close()
		
		print "\nzzzzzzz\n%s" %( zscoreline )
		
		absmax = absmin = 0
		if options.normalizeplot:
		
			minses = []
			maxes = []
			for intenS in intensitiesSeveral:
				minS = float(min( intenS[1] ))
				maxS = float(max( intenS[1] ))
				
				minses.append( minS )
				maxes.append( maxS )
	
			absmin = min( minses )
			absmax = max( maxes ) - absmin
		
		for intensities in intensitiesSeveral:	
			print "Type and len of intensities is", type(intensities[1]), len(intensities[1])
			
			intensitiesNorm = intensities[1]			
			if options.normalizeplot:
				print "Normalizeplot on"	
				intensitiesNorm = normintensities( intensities[1], absmin, absmax )
				
			plotintensities( intensitiesNorm, options, datafile, 'no' )
	
		plt.savefig(options.path + '/MIbothPlot.png')
		plt.clf()
		
	E2end(logger)
Exemplo n.º 6
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = """prog [options]

	This program runs different spt programs quickly, in testing mode, such that crashes
	can be identified more easily.
	"""
	
	parser = EMArgumentParser(usage=usage,version=EMANVERSION)

	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="verbose level [0-9], higner number means higher level of verboseness")
	
	parser.add_argument("--testn",type=int,default=6,help="""default=6. size of dataset to run tests with; cannot be < 6, since initial model generation with HAC for gold-standard refinement requires at least 3 particles for the even set and 3 for the odd set.""")
	
	parser.add_argument("--path",type=str,default='spttests',help="""Default=spttests. Directory to store results in. The default is a numbered series of directories containing the prefix 'spttests'; for example, spttests_02 will be the directory by default if 'spttests_01' already exists.""")
	
	parser.add_argument("--parallel",type=str,default='',help="""the program will detect the number of cores available and use threaded parallelism by default. To use only one core, supply --parallel=thread:1. For MPI on clusters, see parallelism at http://blake.bcm.edu/emanwiki/EMAN2/Parallel""")
	
	#parser.add_argument("--testsim",action='store_true',default=False,help="""default=False. If supplied, this option will test e2spt_simulation.py as well and use the generated simulated particles for subsequent tests, opposed to random volumes that do not have a missing wedge, noise or CTF.""")
	
	parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
		
	(options, args) = parser.parse_args()

	logger = E2init(sys.argv, options.ppid)
	
	if not options.parallel:
		import multiprocessing
		nparallel = multiprocessing.cpu_count()
		options.parallel = 'thread:' + str(nparallel)
		print "\nfound %d cores" %(nparallel)
		print "setting --parallel to", options.parallel
	
	if options.testn < 6:
		print "\nERROR: --testn must be > 5."
		sys.exit()
	
	
	'''
	Make the directory where to create the database where the results will be stored
	'''
	from e2spt_classaverage import sptmakepath
	options = sptmakepath(options,'spt_bt')
	
	from e2spt_classaverage import writeParameters
	writeParameters(options,'e2spt_test.py', 'spttests')
	
	for i in range( options.testn ):
		a = test_image_3d()
		t=Transform()
		if i > 0:
			az = random.randint(0,360)
			alt = random.randint(0,180)
			phi = random.randint(0,360)
			tx = random.randint(-5,5)
			ty = random.randint(-5,5)
			tz = random.randint(-5,5)
			t = Transform({'type':'eman','tx':tx,'ty':ty,'tz':tz,'alt':alt,'az':az,'phi':phi})
			a.transform(t)
		
		a.process_inplace('math.meanshrink',{'n':4})
		a['spt_randT'] = t
		a.write_image(options.path + '/testimgs.hdf',-1)
		
		if i==0:
			a.write_image(options.path + '/testimg_ref.hdf',0)
		
	cmds = []
	
	rootpath = os.getcwd()
	
	
	os.system('touch ' + options.path + '/output.txt')
	
	#input = rootpath + '/' + options.path + '/testimgs.hdf'
	#if options.testsim:
	
	simcmd = 'e2spt_simulation.py --input=' + options.path + '/testimg_ref.hdf --nptcls ' + str(options.testn) + ' --tiltrange 60 --nslices 25 --saveprjs --applyctf --snr 2' + ' --parallel=' +options.parallel + ' --path testsim'
	if options.verbose:
		simcmd += ' --verbose ' + str(options.verbose)
	
	cmds.append( simcmd )
	simcmd2 = 'mv testsim ' + options.path
	cmds.append( simcmd2 )

	input = rootpath + '/' + options.path +'/testsim/simptcls.hdf'
	
	
	btcmd = 'e2spt_binarytree.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' +options.parallel + ' --path testbt'
	cmds.append( btcmd )
	btcmd2 = 'mv testbt ' + rootpath + '/' + options.path + '/'
	cmds.append( btcmd2 )
	
	haccmd = 'e2spt_hac.py  --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' +options.parallel + ' --path testhac'
	cmds.append( haccmd )
	haccmd2 = 'mv testhac ' + rootpath + '/' + options.path + '/'
	cmds.append( haccmd2 )
	
	ssacmd = 'e2symsearch3d.py  --input ' + input + ' --sym icos --steps 2 --parallel=' +options.parallel + ' --path testssa'
	cmds.append( ssacmd )
	ssacmd2 = 'mv testssa ' + rootpath + '/' + options.path + '/'
	cmds.append( ssacmd2 )
	
	
	sptdefaultcmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --parallel=' +options.parallel + ' --path testsptdefaultgoldoff'
	cmds.append( sptdefaultcmdgoldoff )
	sptdefaultcmdgoldoff2 = 'mv testsptdefaultgoldoff ' + rootpath + '/' + options.path + '/'
	cmds.append( sptdefaultcmdgoldoff2 )
	
	sptrefbtcmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --btref 2 --parallel=' +options.parallel + ' --path testsptrefbtgoldoff'
	cmds.append( sptrefbtcmdgoldoff )
	sptrefbtcmdgoldoff2 = 'mv testsptrefbtgoldoff ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefbtcmdgoldoff2 )
	
	sptrefssacmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --ssaref 2 --parallel=' +options.parallel + ' --path testsptrefssagoldoff'
	cmds.append( sptrefssacmdgoldoff )
	sptrefssacmdgoldoff2 = 'mv testsptrefssagoldoff ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefssacmdgoldoff2 )
	
	sptrefhaccmdgoldoff = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --goldstandardoff --hacref 3 --parallel=' +options.parallel + ' --path testsptrefhacgoldoff'
	cmds.append( sptrefhaccmdgoldoff )
	sptrefhaccmdgoldoff2 = 'mv testsptrefhacgoldoff ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefhaccmdgoldoff2 )
	
	
	sptdefaultcmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --parallel=' +options.parallel + ' --path testsptdefaultgoldon'
	cmds.append( sptdefaultcmdgoldon )
	sptdefaultcmdgoldon2 = 'mv testsptdefaultgoldon ' + rootpath + '/' + options.path + '/'
	cmds.append( sptdefaultcmdgoldon2 )
	
	sptrefbtcmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --btref 4 --parallel=' +options.parallel + ' --path testsptrefbtgoldon'
	cmds.append( sptrefbtcmdgoldon )
	sptrefbtcmdgoldon2 = 'mv testsptrefbtgoldon ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefbtcmdgoldon2 )
	
	sptrefssacmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --ssaref 4 --parallel=' +options.parallel + ' --path testsptrefssagoldon'
	cmds.append( sptrefssacmdgoldon )
	sptrefssacmdgoldon2 = 'mv testsptrefssagoldon ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefssacmdgoldon2 )
	
	sptrefhaccmdgoldon = 'e2spt_classaverage.py --input ' + input + ' --align rotate_symmetry_3d:sym=c1 --falign refine_3d_grid:range=3:delta=3 --hacref 6 --parallel=' +options.parallel + ' --path testsptrefhacgoldon'
	cmds.append( sptrefhaccmdgoldon )
	sptrefhaccmdgoldon2 = 'mv testsptrefhacgoldon ' + rootpath + '/' + options.path + '/'
	cmds.append( sptrefhaccmdgoldon2 )
	
	for cmd in cmds:
		runcmd( options, cmd )
	

	E2end(logger)
	sys.stdout.flush()
	
	
	
	return