Exemple #1
0
def reorient(data,refvol,verbose=0):

	output=None		# deletes the results from the previous iteration if any

	if verbose>0:print "Checking Slices (%d)"%len(data)

	ptcl=0
	for i,elem in enumerate(data):
		# get the image to insert
		img=EMData(elem["filename"],elem["filenum"])
		if img["sigma"]==0 : continue

		rd=elem["xform"].get_rotation("eman")
		if verbose>0 : print " %d/%d\r"%(i,len(data)),
		sys.stdout.flush()

		simp=Simplex(qual,[rd["az"],rd["alt"],rd["phi"]],[5,5,5],data=[refvol,img])
		final=simp.minimize(maxiters=500,epsilon=.0001,monitor=0)[0]

		newort=Transform({"type":"eman","az":final[0],"alt":final[1],"phi":final[2]})
		
		img=EMData(elem["filename"],elem["filenum"])
		img["xform.projection.old"]=img["xform.projection"]
		img["xform.projection"]=newort
		
		if verbose>1 : print "\n{:1.2f} {:1.2f} {:1.2f}\t{:1.2f} {:1.2f} {:1.2f} ".format(rd["az"],rd["alt"],rd["phi"],final[0],final[1],final[2])
		img.write_image(elem["filename"],elem["filenum"])

	return
Exemple #2
0
	def optimize(self, options):
		"""
		Method to perform optimization of movie alignment.

		@param namespace options :	"argparse" options from e2ddd_movie.py
		"""
		if self._optimized:
			print("Optimal alignment already determined.")
			return
		if options.minimizer == 'annealing':
			cs = Annealer(self, tmax=options.tmax, tmin=options.tmin, steps=options.steps, updates=options.steps, maxshift=options.maxshift, tolerance=options.tolerance)
			if options.presearch:
				if options.verbose: print("Determining the best annealing parameters")
				schedule = cs.auto(options.premins,options.presteps)
				cs.set_schedule(schedule)
			if options.verbose: print("Running simulated annealing\n")
			annealed = cs.anneal()
		if options.minimizer == 'simplex':
			if options.verbose: print("Initializing simplex minimization")
			#state = [t for tform in self.optimal_transforms for t in tform.get_trans_2d()]
			state = [np.random.randint(-4,4) for tform in self.optimal_transforms for t in tform.get_trans_2d()]
			sm = Simplex(self._compares,state,[1]*len(state),kC=options.kC,kE=options.kE,kR=options.kR,data=self)
			#print state
			#if options.verbose: print("Performing simplex minimization\n")
			result, error, iters = sm.minimize(options.epsilon,options.maxiters,monitor=1)
		if options.verbose:
			print("\n\nOptimal Frame Translations:\n")
			ts = [t for tform in self.optimal_transforms for t in tform.get_trans_2d()]
			for s in xrange(len(ts)):
				if s % 2 == 0: print("Frame {} \t( {:.2}, {:.2} )".format((s/2)+1,ts[s],ts[s+1]))
			print("")
		self._optimized = True
Exemple #3
0
	def optimize(self, options):
		"""
		Method to perform optimization of movie alignment.

		@param namespace options :	"argparse" options from e2ddd_movie.py
		"""
		if self._optimized:
			print("Optimal alignment already determined.")
			return
		#if options.minimizer == 'annealing':
			#cs = Annealer(self, tmax=options.tmax, tmin=options.tmin, steps=options.steps, updates=options.steps, maxshift=options.maxshift, tolerance=options.tolerance)
			#if options.presearch:
				#if options.verbose: print("Determining the best annealing parameters")
				#schedule = cs.auto(options.premins,options.presteps)
				#cs.set_schedule(schedule)
			#if options.verbose: print("Running simulated annealing\n")
			#annealed = cs.anneal()
		#if options.minimizer == 'simplex':
		if options.verbose: print("Initializing simplex minimizer")
		#state = [t for tform in self.optimal_transforms for t in tform.get_trans_2d()]
		state = [np.random.randint(-4,4) for tform in self.optimal_transforms for t in tform.get_trans_2d()]
		sm = Simplex(self._compares,state,[1]*len(state),kC=options.kC,kE=options.kE,kR=options.kR,data=self)
		#print state
		if options.verbose: print("Performing simplex minimization\n")
		result, error, iters = sm.minimize(options.epsilon,options.maxiters,monitor=1)
		if options.verbose:
			print("\n\nOptimal Frame Translations:\n")
			ts = [t for tform in self.optimal_transforms for t in tform.get_trans_2d()]
			for s in xrange(len(ts)):
				if s % 2 == 0: print(("Frame {} \t( {:.2}, {:.2} )".format((s/2)+1,ts[s],ts[s+1])))
			print("")
		self._optimized = True
Exemple #4
0
    def computeTrendTime_Simplex(self,serv_id,source_id,target):
	h_data = self.ndb.monitor_history.fetchRows(
	    [ ('serv_id', serv_id),
	      ('source_id', source_id)] )

	now = time.time()
	# vals = map(lambda x: ((x.pstart - (now-86400))*200,x.value),h_data)
	vals = map(lambda x: (x.pstart,x.value),h_data)
        vals = vals + map(lambda x: (x.pend,x.value),h_data)

	def lin_f(x,par):
	    m,b = par
	    return (m * x) + b  # y = mx + b

	def simplex_f(args):
	    err = 0.0
	    for x,y in vals:
		res = abs(lin_f(x,args) - y) + 1
		err = err + (res * res)
	    return err

	# m = (y-b) / x
	i_b = 0
	i_x,i_y = vals[0]
	i_m = (i_y-i_b)/i_x
	initial = [ i_m, i_b ]

	initial = [random.random(),random.random()]

        # use the linear regression to compute intial coefficients
        b,m = self._linCoeff(h_data) 
        initial = [ m, b ]

	s = Simplex(simplex_f,initial,
		    [random.random(),random.random()])
	values, err, iter = s.minimize()
	log('args = ' + repr(values))
	log('error = ' + repr(err))
	log('iterations = ' + repr(iter))

	def lin_fsolve(y,par):
	    m,b = par
	    return (y-b) / m # x = (y-b)/m

	at = lin_fsolve(float(target),values)
	time_to_target = at - time.time()
	log("time_to_target: " + str(time_to_target))
	return time_to_target
Exemple #5
0
	def optimize(self, epsilon = 0.001, maxiters = 250, verbose = 1):
		"""
		Method to perform optimization of movie alignment.
		
		@param float epsilon	: the learning rate for the simplex optimizer
		@param int maxiters		: the maximum number of iterations to be computed by the simplex optimizer
		@param int verbose		: 1 or 0 specifying whether (or not) simplex optimization steps will be printed
		"""
		if verbose != 0: verbose = 1
		if self._optimized: 
			print("Optimal alignment already determined.")
			return
		nm=2*self.hdr['nimg']
		guess=[np.random.randint(self._min,self._max) for i in range(nm)]
		sm=Simplex(self._compares,guess,[5]*nm,data=self)
		mn=sm.minimize(epsilon = epsilon, maxiters = maxiters, monitor = verbose) 
		print("\n\nBest Parameters: {}\nError: {}\nIterations: {}\n".format(mn[0],mn[1],mn[2]))
		self._optimized = True
Exemple #6
0
def try_simplex(h_data):
    now = time.time()
    # vals = map(lambda x: ((x.pstart - (now-86400))*200,x.value),h_data)
    vals = map(lambda x: (x.pstart, x.value), h_data)

    def lin_f(x, par):
        m, b = par
        return (m * x) + b  # y = mx + b

    def simplex_f(args):
        err = 0.0
        for x, y in vals:
            res = abs(lin_f(x, args) - y)
            err = err + res
        return err

        # m = (y-b) / x

    i_b = 0
    i_x, i_y = vals[0]
    i_m = (i_y - i_b) / i_x
    initial = [i_m, i_b]

    initial = [whrandom.random(), whrandom.random()]

    s = Simplex(simplex_f, initial, [whrandom.random(), whrandom.random()])
    values, err, iter = s.minimize()
    print "args = ", values
    print "error = ", err
    print "iterations = ", iter

    def lin_fsolve(y, par):
        m, b = par
        return (y - b) / m  # x = (y-b)/m

        # print 'today: ' , lin_f((time.time() - (now-86399))*200,values)

    print "today: ", lin_f(time.time(), values)
    print "next week: ", lin_f(time.time() + (86400 * 7), values)
    print "next year: ", lin_f(time.time() + (86400 * 365), values)

    print "hit 90 at: ", time.asctime(time.localtime(lin_fsolve(90, values)))
    print "hit 100 at: ", time.asctime(time.localtime(lin_fsolve(100, values)))
    def Solve(self):
        print(" The intit BFS:")
        findBFS = self.BFS()
        if (findBFS != True):
            print("No have BFS!")
            return False
        print("Find the LP maximum solution: ")
        print("First Step: ")
        print_table(self.tbl)
        print('Base = ', self.base)
        simplex = Simplex(self.tbl, self.base)
        if (simplex.Solve() != True):
            print("Can't solve phase 2")
            return False
        self.tbl = simplex.tbl
        self.base = simplex.base

        print("LP optimum solution!")
        print_table(self.tbl)
        print('Base = ', self.base)

        return True
Exemple #8
0
def reorient(data, refvol, verbose=0):

    output = None  # deletes the results from the previous iteration if any

    if verbose > 0: print("Checking Slices (%d)" % len(data))

    ptcl = 0
    for i, elem in enumerate(data):
        # get the image to insert
        img = EMData(elem["filename"], elem["filenum"])
        if img["sigma"] == 0: continue

        rd = elem["xform"].get_rotation("eman")
        if verbose > 0: print(" %d/%d    \r" % (i, len(data)), end=' ')
        sys.stdout.flush()

        simp = Simplex(qual, [rd["az"], rd["alt"], rd["phi"]], [5, 5, 5],
                       data=[refvol, img])
        final = simp.minimize(maxiters=500, epsilon=.0001, monitor=0)[0]

        newort = Transform({
            "type": "eman",
            "az": final[0],
            "alt": final[1],
            "phi": final[2]
        })

        img = EMData(elem["filename"], elem["filenum"])
        img["xform.projection.old"] = img["xform.projection"]
        img["xform.projection"] = newort

        if verbose > 1:
            print("\n{:1.2f} {:1.2f} {:1.2f}\t{:1.2f} {:1.2f} {:1.2f} ".format(
                rd["az"], rd["alt"], rd["phi"], final[0], final[1], final[2]))
        img.write_image(elem["filename"], elem["filenum"])

    return
Exemple #9
0
    def __init__(self, n, m, a, b, constraints, objective_function, nature=False):
        super(SolvingWidget, self).__init__()
        self.solution = Simplex(n, m, a, b, constraints, [objective_function, 0], not nature)
        self.setWindowTitle("Simplex Solution")

        self.widget = QtWidgets.QWidget()  # main widget that contains content
        main_layout = QtWidgets.QVBoxLayout(self.widget)
        main_layout.addWidget(QtWidgets.QLabel("Solution"))

        phase1: List = self.solution.phase1_steps if self.solution.phase1_steps else False
        iteration: int = 1
        if phase1:
            for iteration_table in phase1:
                main_layout.addWidget(QtWidgets.QLabel(str(iteration)))
                iteration += 1
                main_layout.addWidget(self._construct_table(iteration_table))
        else:
            main_layout.addWidget(QtWidgets.QLabel("Phase One not needed !!!"))


        main_layout.addWidget(QtWidgets.QLabel("Phase Two"))
        iteration = 1
        phase2 = self.solution.phase2_steps if self.solution.phase2_steps else False
        if phase2:
            for iteration_table in phase2:
                main_layout.addWidget(QtWidgets.QLabel(str(iteration)))
                iteration += 1
                main_layout.addWidget(self._construct_table(iteration_table))
        else:
            main_layout.addWidget(QtWidgets.QLabel("No need for phase, you already on the optimal solution"))

        if len(self.solution.error_message):
            main_layout.addWidget(QtWidgets.QLabel(self.solution.error_message))
            print(self.solution.error_message)


        self.setWidget(self.widget)
        self.setWidgetResizable(True)
        self.showMaximized()
Exemple #10
0
#print csum2.keys()

print("{:1.1f} s\nAlignment optimization".format(time() - t0))
t0 = time()

# we start with a heavy filter, optimize, then repeat for successively less filtration
for scale in [0.02, 0.04, 0.07, 0.1, 0.5]:
    csum3 = {
        k: csum2[k].process("filter.lowpass.gauss", {"cutoff_abs": scale})
        for k in csum2.keys()
    }

    incr = [16] * len(locs)
    incr[-1] = incr[
        -2] = 4  # if step is zero for last 2, it gets stuck as an outlier, so we just make the starting step smaller
    simp = Simplex(qual, locs, incr, data=csum3)
    locs = simp.minimize(maxiters=int(100 / scale), epsilon=.01)[0]
    locs = [int(floor(i * 10 + .5)) / 10.0 for i in locs]
    print(locs)
    if VERBOSE:
        out = open("path_{:02d}.txt".format(int(1.0 / scale)), "w")
        for i in xrange(0, len(locs), 2):
            out.write("%f\t%f\n" % (locs[i], locs[i + 1]))

# compute the quality of each frame
quals = [
    0
] * n  # quality of each frame based on its correlation peak summed over all images
cen = csum2[(0, 1)]["nx"] / 2
for i in xrange(n - 1):
    for j in xrange(i + 1, n):
Exemple #11
0
def refine_align(job, ret):
    clskeep = .7
    #print job
    curidx = job["curidx"]
    ptcl_ori = job["ptcl_ori"]
    rings = job["rings"]
    ptclfile = job["ptclfile"]
    sli = job["sli"]
    mapft = job["mapft"]
    e3d = job["3dmap"]
    sz = mapft.shape[0]

    if job["usesimmx"]:
        idx = job["idx"]
        proj_oris = job["proj_oris"]
        oo = get_transform_simx(curidx[idx], job["amin"], job["simmx"],
                                proj_oris)
        oi = oo * ptcl_ori[curidx[idx]].inverse()

    else:
        tr0 = Transform(job["trans0"])
        oi = tr0 * ptcl_ori[curidx[0]].inverse()

    oi = oi.get_params("eman")
    oilst = [oi["tx"], oi["ty"], oi["tz"], oi["alt"], oi["az"], oi["phi"]]

    imgs = []
    for i in curidx:
        e = EMData(ptclfile, i)
        e.process_inplace("filter.lowpass.gauss",
                          {"cutoff_freq": old_div(1.0, 30.)})
        e.process_inplace("normalize")
        imgs.append(e)
    # score=0

    def get_score(olst, data):
        score = []
        oi = Transform({
            "alt": olst[3],
            "az": olst[4],
            "phi": olst[5],
            "type": "eman"
        })
        vv = [olst[0], olst[1], olst[2]]

        for i in range(len(curidx)):

            tr = oi * ptcl_ori[curidx[i]]

            #pp= e3d.project("standard", tr)

            surf = np.tensordot(np.asarray(tr.get_matrix_4x4()).reshape(
                4, 4)[:3, :3],
                                sli,
                                axes=(0, 0))
            ind = (surf + old_div(sz, 2)).astype(int)
            ind = np.clip(ind, 0, sz - 1)
            imft = mapft[ind[2], ind[1], ind[0]]
            v = tr.transform(vv)
            img = get_img(imft).T
            pp = from_numpy(img)
            pp.translate(v[0], v[1], 0)
            #score.append(imgs[i].cmp("frc", pp,{'maxres':30}))
            score.append(imgs[i].cmp("ccc", pp))

            #e=imgs[i].copy()
            #e.translate(-v[0],-v[1],0)

            #tt=get_fft(e.numpy())
            #xreal=imft.real
            #ximag=imft.imag
            #treal=tt.real
            #timag=tt.imag

            #it1=xreal**2+ximag**2
            #it2=treal**2+timag**2
            #it1ring=np.sqrt(np.tensordot(it1,rings))
            #it2ring=np.sqrt(np.tensordot(it2,rings))
            #nrm=it1ring*it2ring

            #loss= - (np.tensordot((xreal*treal) + (ximag*timag),rings)/nrm)
            #score.append(np.mean(loss[:10]))

        #score/=float(len(curidx))
        score = np.sort(score)[:int(len(curidx) * clskeep)]
        score = np.mean(score)
        #print score

        return score

    incr = [20, 20, 20, 90, 180, 180]

    #for k in range(10000):
    #if k%100==0: print k/100
    #get_score(oilst,0)
    simp = Simplex(get_score, np.array(oilst), incr)

    locs = simp.minimize(maxiters=100, epsilon=.001, monitor=0)

    olst = locs[0]
    #gc.collect()
    print(job["modelid"], locs[1], locs[2])
    ret.put(olst)
    return olst
Exemple #12
0
def main():
	global tdim,pdim,tdim2,pdim2,sfac
	global cmp_probe,cmp_target
	progname = os.path.basename(sys.argv[0])
	usage = """prog [options] target.mrc probe.mrc
	
Locates the best 'docking' locations for a small probe in a large target map. Note that the probe
should be in a box barely large enough for it. The target may be arbitrarily padded. For best speed
both box sizes should be multiples of 8."""

	parser = EMArgumentParser(usage=usage,version=EMANVERSION)

	parser.add_argument("--shrink", "-S", type=int, help="shrink factor for initial search, default=auto", default=0)
	parser.add_argument("--num", "-N", type=int, help="Number of initial alternative positions, default=5", default=5)
	parser.add_argument("--epsilon","-E", type=float,help="final target accuracy, default=.01",default=.01)
	parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="verbose level [0-9], higner number means higher level of verboseness")

	print "WARNING: This program is currently considered experimental. Contact [email protected] before using it for any serious project"
	
	(options, args) = parser.parse_args()
	if len(args)<2 : parser.error("Input and output files required")
	try: chains=options.chains
	except: chains=None
	logid=E2init(sys.argv,options.ppid)
	
	try : infile=open(args[0],"r")
	except : parser.error("Cannot open input file")
	

	
	# read the target and probe
	target=EMData()
	target.read_image(args[0])
	
	
	apix=target["apix_x"]
	probe=EMData()
	probefilename=args[1]
	# support pdb format
	if args[1].endswith(".pdb"):
		print "e2pdb2mrc.py {s} probe.mrc -R 10 --het --apix={a}>tmp.txt".format(s=args[1],a=apix)
		os.system("e2pdb2mrc.py {s} probe.mrc -R 10 --het --apix={a}>tmp.txt".format(s=args[1],a=apix))
		tmp=open('tmp.txt')
		lines=tmp.readlines()
		tmp.close()
		cent=[]
		for l in range(len(lines)):
			if lines[l].startswith("Bounding box"):
				break
		for q in range(3):
			#print lines[q+l][17:-1].split('-')
			t=[float(i) for i in (lines[q+l][17:-1].split(' - '))]
			#print t
			cent.append((t[0]+t[1])/2)
		
		probefilename="probe.mrc"
	else:
		probefilename=args[1]
	probe.read_image(probefilename)
	
		
	tdim=(target.get_xsize(),target.get_ysize(),target.get_zsize())
	pdim=(probe.get_xsize(),probe.get_ysize(),probe.get_zsize())
	
	if (pdim[0]>tdim[0] or pdim[1]>tdim[1] or pdim[2]>tdim[2]):
		print "Probe must fit within target"
		exit(1)
	target.process_inplace("normalize.unitsum")
	target.mult(10000)
	
	# shrink both by some factor which keeps the smallest axis of the probe at least 10 pixels
	# we'll have to reread the files if we want to recover the unscaled images
#	sfac=int(floor(min(pdim)/10.0))
	if options.shrink>0 : sfac=options.shrink
	else : sfac=int(floor(min(pdim)/12.0))
	print "Shrink by %d"%sfac
	target.process_inplace("math.meanshrink",{"n":sfac})
	probe.process_inplace("math.meanshrink",{"n":sfac})
	tdim2=(target.get_xsize(),target.get_ysize(),target.get_zsize())
	pdim2=(probe.get_xsize(),probe.get_ysize(),probe.get_zsize())
#	print (pdim2[0]-tdim2[0])/2,(pdim2[1]-tdim2[1])/2,(pdim2[2]-tdim2[2])/2,tdim2[0],tdim2[1],tdim2[2]
	probe.process_inplace("normalize.edgemean")

	probeclip=probe.get_clip(Region((pdim2[0]-tdim2[0])/2,(pdim2[1]-tdim2[1])/2,(pdim2[2]-tdim2[2])/2,tdim2[0],tdim2[1],tdim2[2]))
	#roughang=[(0,0)]

	#roughang=[(0,0),(45,0),(45,90),(45,180),(45,270),(90,0),(90,60),(90,120),(90,180),(90,240),(90,300),(135,0),(135,90),(135,180),(135,270),(180,0)]
	roughang=[(0,0),(30,0),(30,90),(30,180),(30,270),(60,0),(60,45),(60,90),(60,135),(60,180),(60,225),(60,270),(60,315),
	(90,0),(90,30),(90,60),(90,90),(90,120),(90,150),(90,180),(90,210),(90,240),(90,270),(90,300),(90,330),
	(180,0),(150,0),(150,90),(150,180),(150,270),(120,0),(120,45),(120,90),(120,135),(120,180),(120,225),(120,270),(120,315)]

#	Log.logger().set_level(Log.LogLevel.DEBUG_LOG)
	
	print "Searching for candidate locations in reduced map"
	edge=max(pdim2)/2		# technically this should be max(pdim), but generally there is some padding in the probe model, and this is relatively harmless
	print "edge ",edge
	best=[]
	sum=probeclip.copy_head()
	sum.to_zero()
	for a1,a2 in roughang:
		for a3 in range(0,360,30):
			prr=probeclip.copy()
			prr.rotate(a1,a2,float(a3))
			#prr.write_image('prr.%0d%0d%0d.mrc'%(a1,a2,a3))
			
			ccf=target.calc_ccf(prr,fp_flag.CIRCULANT,1)
			mean=float(ccf.get_attr("mean"))
			sig=float(ccf.get_attr("sigma"))
			ccf.process_inplace("mask.zeroedge3d",{"x0":edge,"x1":edge,"y0":edge,"y1":edge,"z0":edge,"z1":edge})
			sum+=ccf
			ccf.process_inplace("mask.onlypeaks",{"npeaks":0})		# only look at peak values in the CCF map
			#ccf.write_image('ccf.%0d%0d%0d.mrc'%(a1,a2,a3))
			vec=ccf.calc_highest_locations(mean+sig)
			
			for v in vec: best.append([v.value,a1,a2,a3,v.x-tdim2[0]/2,v.y-tdim2[1]/2,v.z-tdim2[2]/2,0])

#			print a1,a2,a3,mean+sig,float(ccf.get_attr("max")),len(vec)
	
	best.sort()		# this is a list of all reasonable candidate locations

	best.reverse()
	
	if len(best)<1:
		cm=target.calc_center_of_mass(0)
		best.append([0,0,0,0,cm[0]-tdim2[0]/2,cm[1]-tdim2[1]/2,cm[2]-tdim2[2]/2,0])
	print len(best)," possible candidates"

	# this is designed to eliminate angular redundancies in peak location
	print best[0]
	print best[-1]
	if len(best)>10000:
		best=best[0:10000]
	#print best
	for ii in range(len(best)):
		for jj in range(ii+1,len(best)):
			i=best[ii]
			j=best[jj]
			if (i[4]-j[4])**2+(i[5]-j[5])**2+(i[6]-j[6])**2>8.8 : continue
			if j[0]==i[0] : i[7]=1
	for i in best:
		for j in best:
			if (i[4]-j[4])**2+(i[5]-j[5])**2+(i[6]-j[6])**2>8.8 : continue
			if j[0]>i[0] : i[7]=1
			
	
	best2=[]
	for i in best:
		if not i[7]: best2.append([i[0],i[1],i[2],i[3],i[4]*sfac,i[5]*sfac,i[6]*sfac,i[7]])

	# now we find peaks in the sum of all CCF calculations, and find the best angle associated with each peak
	#sum.process_inplace("mask.onlypeaks",{"npeaks":0})
	#sum.write_image("sum.mrc")
	#vec=sum.calc_highest_locations(mean+sig+.0000001)
	#best2=[]
	#for v in vec:
		#print "%5.1f  %5.1f  %5.1f"%(v.x*sfac-tdim[0]/2,v.y*sfac-tdim[1]/2,v.z*sfac-tdim[2]/2)
		#for i in best:
			#if i[4]+tdim2[0]/2==v.x and i[5]+tdim2[1]/2==v.y and i[6]+tdim2[2]/2==v.z :
				#best2.append([i[0],i[1],i[2],i[3],i[4]*sfac,i[5]*sfac,i[6]*sfac,i[7]])
				#break

	best2.sort()
	best2.reverse()
	best2=best2[0:options.num]
	print len(best2), " final candidates"
	print "Qual     \talt\taz\tphi\tdx\tdy\tdz\t"
	for i in best2: 
		print "%1.5f  \t%1.3f\t%1.3f\t%1.3f\t%1.1f\t%1.1f\t%1.1f"%(-i[0],i[1],i[2],i[3],i[4],i[5],i[6])
	#exit()
	# try to improve the angles for each position
	print "\nOptimize each candidate in the reduced map with multiple angle trials"
	print "Qual     \talt\taz\tphi\tdx\tdy\tdz\t"
	cmp_target=target
	cmp_probe=probe
	for j in range(len(best2)):
		print j," --------"
		tries=[[0,0],[0,0],[0,0],[0,0]]
		testang=((0,0),(180.0,0),(0,180.0),(180.0,180.0))	# modify the 'best' angle a few times to try to find a better minimum
		for k in range(4):
			guess=best2[j][1:7]
			guess[0]+=testang[k][0]
			guess[1]+=testang[k][1]
			sm=Simplex(compares,guess,[15,15,15,5,5,5])
			m=sm.minimize(monitor=0,epsilon=.01)
			tries[k][0]=m[1]
			tries[k][1]=m[0]
			print "%1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f"%(-tries[k][0],tries[k][1][0],tries[k][1][1],tries[k][1][2],
				tries[k][1][3],tries[k][1][4],tries[k][1][5])
		best2[j][1:7]=min(tries)[1]		# best of the 4 angles we started with
	
	# reread the original images
	target.read_image(args[0])
	probe.read_image(probefilename)
	probe.process_inplace("normalize.unitsum")
	probe.mult(10000)
	
	cmp_target=target
	cmp_probe=probe
	
#	for i in best2:
#		c=probe.get_clip(Region((pdim[0]-tdim[0])/2,(pdim[1]-tdim[1])/2,(pdim[2]-tdim[2])/2,tdim[0],tdim[1],tdim[2]))
#		c.rotate_translate(*i[1:7])
#		c.write_image("z.%02d.mrc"%best2.index(i))
	
	print "Final optimization of each candidate"
	final=[]
	for j in range(len(best2)):
		sm=Simplex(compare,best2[j][1:7],[.5,.5,.5,2.,2.,2.])
		bt=sm.minimize(epsilon=options.epsilon)
		b=bt[0]
		print "\n%1.2f\t(%5.2f  %5.2f  %5.2f    %5.1f  %5.1f  %5.1f)"%(-bt[1],b[0],b[1],b[2],b[3],b[4],b[5])
		final.append((bt[1],b))
	
	print "\n\nFinal Results"
	print "Qual     \talt\taz\tphi\tdx\tdy\tdz\t"
	out=open("foldfitter.out","w")
	final.sort()
	for i,j in enumerate(final):
		b=j[1]
		print "%d. %1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f"%(i,-j[0],b[0],b[1],b[2],b[3],b[4],b[5])
		out.write("%d. %1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f\n"%(i,-j[0],b[0],b[1],b[2],b[3],b[4],b[5]))
		
		t=Transform()
		#t.set_pre_trans((b[3]+tdim[0]/2,b[4]+tdim[1]/2,b[5]+tdim[2]/2),b[0],b[1],b[2],(0,0,0))
		t.set_pre_trans((b[3]+tdim[0]/2,b[4]+tdim[1]/2,b[5]+tdim[2]/2))
		t.set_rotation({'type':'eman', 'az':b[0], 'alt':b[1], 'phi':b[2]})
		#t.set_trans((0,0,0))
		#t.set_trans((b[3]+tdim[0]/2,b[4]+tdim[1]/2,b[5]+tdim[2]/2))
		
		s=Transform()
		t=Transform()
		s.set_rotation({'type':'eman', 'az':b[0], 'alt':b[1], 'phi':b[2]})
		s.set_trans((b[3],b[4],b[5]))
		print (pdim[0]-tdim[0])/2,(pdim[1]-tdim[1])/2,(pdim[2]-tdim[2])/2
		pc=probe.get_clip(Region((pdim[0]-tdim[0])/2,(pdim[1]-tdim[1])/2,(pdim[2]-tdim[2])/2,tdim[0],tdim[1],tdim[2]))
		pc.transform(s)
		if target["MRC.nxstart"]==0 and target["MRC.nystart"]==0:
			shx=target['origin_x']
			shy=target['origin_y']
			shz=target['origin_z']
		else:
			shx=target["MRC.nxstart"]*target["apix_x"]
			shy=target["MRC.nystart"]*target["apix_x"]
			shz=target["MRC.nzstart"]*target["apix_x"]
		b[3]=b[3]*apix-pc['origin_x']+shx
		b[4]=b[4]*apix-pc['origin_y']+shy
		b[5]=b[5]*apix-pc["origin_z"]+shz
		#pc['origin_x']=target["MRC.nxstart"]*target["apix_x"]
		#pc['origin_y']=target["MRC.nystart"]*target["apix_x"]
		#pc["origin_z"]=target["MRC.nzstart"]*target["apix_x"]
		#pc.write_image("tst.mrc")
		t.set_rotation({'type':'eman', 'az':b[0], 'alt':b[1], 'phi':b[2]})
		t.set_trans((b[3],b[4],b[5]))
		pdb_transform(args[1],'final.%02d.pdb'%i,t,cent)


	print ncmp," total comparisons"
	out.close()
Exemple #13
0
]

model = cp.Maximize(cp.sum(3 * x + 5 * y))

prob = cp.Problem(model, constraints)

result = prob.solve()

x = x.value
y = y.value

cvxpy_result = np.array([x, y])

##Simplex

problema = Simplex(c, A, b, problem='Max')
method_result, opt, status = problema.solve()

print('Test con paqueteria scipy')
print(method_result == approx(scipy_result, abs=1e-8, rel=1e-8))

print('Test con paqueteria pulp')
print(method_result == approx(pulp_result, abs=1e-8, rel=1e-8))

print('Test con paqueteria cvxpy')
print(method_result == approx(cvxpy_result, abs=1e-6, rel=1e-6))

#Minimization case

##Scipy
Exemple #14
0
def main():
    global tdim, pdim, tdim2, pdim2, sfac
    global cmp_probe, cmp_target
    progname = os.path.basename(sys.argv[0])
    usage = """prog [options] target.mrc probe.mrc
	
Locates the best 'docking' locations for a small probe in a large target map. Note that the probe
should be in a box barely large enough for it. The target may be arbitrarily padded. For best speed
both box sizes should be multiples of 8."""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)

    parser.add_argument("--shrink",
                        "-S",
                        type=int,
                        help="shrink factor for initial search, default=auto",
                        default=0)
    parser.add_argument(
        "--num",
        "-N",
        type=int,
        help="Number of initial alternative positions, default=5",
        default=5)
    parser.add_argument("--epsilon",
                        "-E",
                        type=float,
                        help="final target accuracy, default=.01",
                        default=.01)
    parser.add_argument(
        "--ppid",
        type=int,
        help="Set the PID of the parent process, used for cross platform PPID",
        default=-1)
    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        default=0,
        help=
        "verbose level [0-9], higner number means higher level of verboseness")

    print(
        "WARNING: This program is currently considered experimental. Contact [email protected] before using it for any serious project"
    )

    (options, args) = parser.parse_args()
    if len(args) < 2: parser.error("Input and output files required")
    try:
        chains = options.chains
    except:
        chains = None
    logid = E2init(sys.argv, options.ppid)

    try:
        infile = open(args[0], "r")
    except:
        parser.error("Cannot open input file")

    # read the target and probe
    target = EMData()
    target.read_image(args[0])

    apix = target["apix_x"]
    probe = EMData()
    probefilename = args[1]
    # support pdb format
    if args[1].endswith(".pdb"):
        print(
            "e2pdb2mrc.py {s} probe.mrc -R 10 --het --apix={a}>tmp.txt".format(
                s=args[1], a=apix))
        os.system(
            "e2pdb2mrc.py {s} probe.mrc -R 10 --het --apix={a}>tmp.txt".format(
                s=args[1], a=apix))
        tmp = open('tmp.txt')
        lines = tmp.readlines()
        tmp.close()
        cent = []
        for l in range(len(lines)):
            if lines[l].startswith("Bounding box"):
                break
        for q in range(3):
            #print lines[q+l][17:-1].split('-')
            t = [float(i) for i in (lines[q + l][17:-1].split(' - '))]
            #print t
            cent.append(old_div((t[0] + t[1]), 2))

        probefilename = "probe.mrc"
    else:
        probefilename = args[1]
    probe.read_image(probefilename)

    tdim = (target.get_xsize(), target.get_ysize(), target.get_zsize())
    pdim = (probe.get_xsize(), probe.get_ysize(), probe.get_zsize())

    if (pdim[0] > tdim[0] or pdim[1] > tdim[1] or pdim[2] > tdim[2]):
        print("Probe must fit within target")
        exit(1)
    target.process_inplace("normalize.unitsum")
    target.mult(10000)

    # shrink both by some factor which keeps the smallest axis of the probe at least 10 pixels
    # we'll have to reread the files if we want to recover the unscaled images
    #	sfac=int(floor(min(pdim)/10.0))
    if options.shrink > 0: sfac = options.shrink
    else: sfac = int(floor(old_div(min(pdim), 12.0)))
    print("Shrink by %d" % sfac)
    target.process_inplace("math.meanshrink", {"n": sfac})
    probe.process_inplace("math.meanshrink", {"n": sfac})
    tdim2 = (target.get_xsize(), target.get_ysize(), target.get_zsize())
    pdim2 = (probe.get_xsize(), probe.get_ysize(), probe.get_zsize())
    #	print (pdim2[0]-tdim2[0])/2,(pdim2[1]-tdim2[1])/2,(pdim2[2]-tdim2[2])/2,tdim2[0],tdim2[1],tdim2[2]
    probe.process_inplace("normalize.edgemean")

    probeclip = probe.get_clip(
        Region(old_div((pdim2[0] - tdim2[0]), 2),
               old_div((pdim2[1] - tdim2[1]), 2),
               old_div((pdim2[2] - tdim2[2]), 2), tdim2[0], tdim2[1],
               tdim2[2]))
    #roughang=[(0,0)]

    #roughang=[(0,0),(45,0),(45,90),(45,180),(45,270),(90,0),(90,60),(90,120),(90,180),(90,240),(90,300),(135,0),(135,90),(135,180),(135,270),(180,0)]
    roughang = [(0, 0), (30, 0), (30, 90), (30, 180), (30, 270), (60, 0),
                (60, 45), (60, 90), (60, 135), (60, 180), (60, 225), (60, 270),
                (60, 315), (90, 0), (90, 30), (90, 60), (90, 90), (90, 120),
                (90, 150), (90, 180), (90, 210), (90, 240),
                (90, 270), (90, 300), (90, 330), (180, 0), (150, 0), (150, 90),
                (150, 180), (150, 270), (120, 0), (120, 45), (120, 90),
                (120, 135), (120, 180), (120, 225), (120, 270), (120, 315)]

    #	Log.logger().set_level(Log.LogLevel.DEBUG_LOG)

    print("Searching for candidate locations in reduced map")
    edge = old_div(
        max(pdim2), 2
    )  # technically this should be max(pdim), but generally there is some padding in the probe model, and this is relatively harmless
    print("edge ", edge)
    best = []
    sum = probeclip.copy_head()
    sum.to_zero()
    for a1, a2 in roughang:
        for a3 in range(0, 360, 30):
            prr = probeclip.copy()
            prr.rotate(a1, a2, float(a3))
            #prr.write_image('prr.%0d%0d%0d.mrc'%(a1,a2,a3))

            ccf = target.calc_ccf(prr, fp_flag.CIRCULANT, 1)
            mean = float(ccf.get_attr("mean"))
            sig = float(ccf.get_attr("sigma"))
            ccf.process_inplace(
                "mask.zeroedge3d", {
                    "x0": edge,
                    "x1": edge,
                    "y0": edge,
                    "y1": edge,
                    "z0": edge,
                    "z1": edge
                })
            sum += ccf
            ccf.process_inplace(
                "mask.onlypeaks",
                {"npeaks": 0})  # only look at peak values in the CCF map
            #ccf.write_image('ccf.%0d%0d%0d.mrc'%(a1,a2,a3))
            vec = ccf.calc_highest_locations(mean + sig)

            for v in vec:
                best.append([
                    v.value, a1, a2, a3, v.x - old_div(tdim2[0], 2),
                    v.y - old_div(tdim2[1], 2), v.z - old_div(tdim2[2], 2), 0
                ])

#			print a1,a2,a3,mean+sig,float(ccf.get_attr("max")),len(vec)

    best.sort()  # this is a list of all reasonable candidate locations

    best.reverse()

    if len(best) < 1:
        cm = target.calc_center_of_mass(0)
        best.append([
            0, 0, 0, 0, cm[0] - old_div(tdim2[0], 2),
            cm[1] - old_div(tdim2[1], 2), cm[2] - old_div(tdim2[2], 2), 0
        ])
    print(len(best), " possible candidates")

    # this is designed to eliminate angular redundancies in peak location
    print(best[0])
    print(best[-1])
    if len(best) > 10000:
        best = best[0:10000]
    #print best
    for ii in range(len(best)):
        for jj in range(ii + 1, len(best)):
            i = best[ii]
            j = best[jj]
            if (i[4] - j[4])**2 + (i[5] - j[5])**2 + (i[6] - j[6])**2 > 8.8:
                continue
            if j[0] == i[0]: i[7] = 1
    for i in best:
        for j in best:
            if (i[4] - j[4])**2 + (i[5] - j[5])**2 + (i[6] - j[6])**2 > 8.8:
                continue
            if j[0] > i[0]: i[7] = 1

    best2 = []
    for i in best:
        if not i[7]:
            best2.append([
                i[0], i[1], i[2], i[3], i[4] * sfac, i[5] * sfac, i[6] * sfac,
                i[7]
            ])

    # now we find peaks in the sum of all CCF calculations, and find the best angle associated with each peak
    #sum.process_inplace("mask.onlypeaks",{"npeaks":0})
    #sum.write_image("sum.mrc")
    #vec=sum.calc_highest_locations(mean+sig+.0000001)
    #best2=[]
    #for v in vec:
    #print "%5.1f  %5.1f  %5.1f"%(v.x*sfac-tdim[0]/2,v.y*sfac-tdim[1]/2,v.z*sfac-tdim[2]/2)
    #for i in best:
    #if i[4]+tdim2[0]/2==v.x and i[5]+tdim2[1]/2==v.y and i[6]+tdim2[2]/2==v.z :
    #best2.append([i[0],i[1],i[2],i[3],i[4]*sfac,i[5]*sfac,i[6]*sfac,i[7]])
    #break

    best2.sort()
    best2.reverse()
    best2 = best2[0:options.num]
    print(len(best2), " final candidates")
    print("Qual     \talt\taz\tphi\tdx\tdy\tdz\t")
    for i in best2:
        print("%1.5f  \t%1.3f\t%1.3f\t%1.3f\t%1.1f\t%1.1f\t%1.1f" %
              (-i[0], i[1], i[2], i[3], i[4], i[5], i[6]))
    #exit()
    # try to improve the angles for each position
    print(
        "\nOptimize each candidate in the reduced map with multiple angle trials"
    )
    print("Qual     \talt\taz\tphi\tdx\tdy\tdz\t")
    cmp_target = target
    cmp_probe = probe
    for j in range(len(best2)):
        print(j, " --------")
        tries = [[0, 0], [0, 0], [0, 0], [0, 0]]
        testang = (
            (0, 0), (180.0, 0), (0, 180.0), (180.0, 180.0)
        )  # modify the 'best' angle a few times to try to find a better minimum
        for k in range(4):
            guess = best2[j][1:7]
            guess[0] += testang[k][0]
            guess[1] += testang[k][1]
            sm = Simplex(compares, guess, [15, 15, 15, 5, 5, 5])
            m = sm.minimize(monitor=0, epsilon=.01)
            tries[k][0] = m[1]
            tries[k][1] = m[0]
            print(
                "%1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f" %
                (-tries[k][0], tries[k][1][0], tries[k][1][1], tries[k][1][2],
                 tries[k][1][3], tries[k][1][4], tries[k][1][5]))
        best2[j][1:7] = min(tries)[1]  # best of the 4 angles we started with

    # reread the original images
    target.read_image(args[0])
    probe.read_image(probefilename)
    probe.process_inplace("normalize.unitsum")
    probe.mult(10000)

    cmp_target = target
    cmp_probe = probe

    #	for i in best2:
    #		c=probe.get_clip(Region((pdim[0]-tdim[0])/2,(pdim[1]-tdim[1])/2,(pdim[2]-tdim[2])/2,tdim[0],tdim[1],tdim[2]))
    #		c.rotate_translate(*i[1:7])
    #		c.write_image("z.%02d.mrc"%best2.index(i))

    print("Final optimization of each candidate")
    final = []
    for j in range(len(best2)):
        sm = Simplex(compare, best2[j][1:7], [.5, .5, .5, 2., 2., 2.])
        bt = sm.minimize(epsilon=options.epsilon)
        b = bt[0]
        print("\n%1.2f\t(%5.2f  %5.2f  %5.2f    %5.1f  %5.1f  %5.1f)" %
              (-bt[1], b[0], b[1], b[2], b[3], b[4], b[5]))
        final.append((bt[1], b))

    print("\n\nFinal Results")
    print("Qual     \talt\taz\tphi\tdx\tdy\tdz\t")
    out = open("foldfitter.out", "w")
    final.sort()
    for i, j in enumerate(final):
        b = j[1]
        print("%d. %1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f" %
              (i, -j[0], b[0], b[1], b[2], b[3], b[4], b[5]))
        out.write("%d. %1.3f  \t%1.2f\t%1.2f\t%1.2f\t%1.1f\t%1.1f\t%1.1f\n" %
                  (i, -j[0], b[0], b[1], b[2], b[3], b[4], b[5]))

        t = Transform()
        #t.set_pre_trans((b[3]+tdim[0]/2,b[4]+tdim[1]/2,b[5]+tdim[2]/2),b[0],b[1],b[2],(0,0,0))
        t.set_pre_trans(
            (b[3] + old_div(tdim[0], 2), b[4] + old_div(tdim[1], 2),
             b[5] + old_div(tdim[2], 2)))
        t.set_rotation({'type': 'eman', 'az': b[0], 'alt': b[1], 'phi': b[2]})
        #t.set_trans((0,0,0))
        #t.set_trans((b[3]+tdim[0]/2,b[4]+tdim[1]/2,b[5]+tdim[2]/2))

        s = Transform()
        t = Transform()
        s.set_rotation({'type': 'eman', 'az': b[0], 'alt': b[1], 'phi': b[2]})
        s.set_trans((b[3], b[4], b[5]))
        print(old_div((pdim[0] - tdim[0]), 2), old_div((pdim[1] - tdim[1]), 2),
              old_div((pdim[2] - tdim[2]), 2))
        pc = probe.get_clip(
            Region(old_div((pdim[0] - tdim[0]), 2),
                   old_div((pdim[1] - tdim[1]), 2),
                   old_div((pdim[2] - tdim[2]), 2), tdim[0], tdim[1], tdim[2]))
        pc.transform(s)
        shx = shy = shz = 0
        try:
            if target["MRC.nxstart"] == 0 and target["MRC.nystart"] == 0:
                shx = target['origin_x']
                shy = target['origin_y']
                shz = target['origin_z']
            else:
                shx = target["MRC.nxstart"] * target["apix_x"]
                shy = target["MRC.nystart"] * target["apix_x"]
                shz = target["MRC.nzstart"] * target["apix_x"]
        except:
            pass

        ox = oy = oz = 0
        try:
            ox = pc['origin_x']
            oy = pc['origin_y']
            oz = pc['origin_z']
        except:
            pass

        b[3] = b[3] * apix - ox + shx
        b[4] = b[4] * apix - oy + shy
        b[5] = b[5] * apix - oz + shz
        #pc['origin_x']=target["MRC.nxstart"]*target["apix_x"]
        #pc['origin_y']=target["MRC.nystart"]*target["apix_x"]
        #pc["origin_z"]=target["MRC.nzstart"]*target["apix_x"]
        #pc.write_image("tst.mrc")
        t.set_rotation({'type': 'eman', 'az': b[0], 'alt': b[1], 'phi': b[2]})
        t.set_trans((b[3], b[4], b[5]))
        if args[1].endswith(".pdb"):
            pdb_transform(args[1], 'final.%02d.pdb' % i, t, cent)

    print(ncmp, " total comparisons")
    out.close()
Exemple #15
0
def main():
	global debug,logid
	progname = os.path.basename(sys.argv[0])
	usage = """%prog [options] <input stack/image> ...
	
Various CTF-related operations on images, including automatic fitting. Note that automatic fitting is limited to 5 microns
underfocus at most. Input particles should be unmasked and unfiltered. A minimum of ~20% padding around the
particles is required for background extraction, even if this brings the edge of another particle into the box in some cases.
Particles should be reasonably well centered. Can also optionally phase flip and Wiener filter particles. Wiener filtration comes
after phase-flipping, so if phase flipping is performed Wiener filtered particles will also be phase-flipped. Note that both
operations are performed on oversampled images if specified (though final real-space images are clipped back to their original
size. Increasing padding during the particle picking process will improve the accuracy of phase-flipping, particularly for
images far from focus."""

	parser = OptionParser(usage=usage,version=EMANVERSION)

	parser.add_option("--gui",action="store_true",help="Start the GUI for interactive fitting",default=False)
	parser.add_option("--auto_fit",action="store_true",help="Runs automated CTF fitting on the input images",default=False)
	parser.add_option("--bgmask",type="int",help="Compute the background power spectrum from the edge of the image, specify a mask radius in pixels which would largely mask out the particles. Default is boxsize/2.",default=0)
	parser.add_option("--apix",type="float",help="Angstroms per pixel for all images",default=0)
	parser.add_option("--voltage",type="float",help="Microscope voltage in KV",default=0)
	parser.add_option("--cs",type="float",help="Microscope Cs (spherical aberation)",default=0)
	parser.add_option("--ac",type="float",help="Amplitude contrast (percentage, default=10)",default=10)
	parser.add_option("--autohp",action="store_true",help="Automatic high pass filter of the SNR only to remove initial sharp peak, phase-flipped data is not directly affected (default false)",default=False)
	#parser.add_option("--invert",action="store_true",help="Invert the contrast of the particles in output files (default false)",default=False)
	parser.add_option("--nonorm",action="store_true",help="Suppress per image real-space normalization",default=False)
	parser.add_option("--nosmooth",action="store_true",help="Disable smoothing of the background (running-average of the log with adjustment at the zeroes of the CTF)",default=False)
	#parser.add_option("--phaseflip",action="store_true",help="Perform phase flipping after CTF determination and writes to specified file.",default=False)
	#parser.add_option("--wiener",action="store_true",help="Wiener filter (optionally phaseflipped) particles.",default=False)
	parser.add_option("--oversamp",type="int",help="Oversampling factor",default=1)
	parser.add_option("--sf",type="string",help="The name of a file containing a structure factor curve. Can improve B-factor determination.",default=None)
	parser.add_option("--debug",action="store_true",default=False)
	
	(options, args) = parser.parse_args()

	if len(args)<1 : parser.error("Input image required")
	
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()

	if options.auto_fit:
		if options.voltage==0 : parser.error("Please specify voltage")
		if options.cs==0 : parser.error("Please specify Cs")
	if options.apix==0 : print "Using A/pix from header"
		
	debug=options.debug

	global sfcurve
	if options.sf :
		sfcurve=XYData()
		sfcurve.read_file(options.sf)

	logid=E2init(sys.argv)

#	if options.oversamp>1 : options.apix/=float(options.oversamp)

	db_project=db_open_dict("bdb:project")
	db_parms=db_open_dict("bdb:e2ctf.parms")
	db_misc=db_open_dict("bdb:e2ctf.misc")

	options.filenames = args
	### Power spectrum and CTF fitting
	if options.auto_fit:
		img_sets=pspec_and_ctf_fit(options,debug) # converted to a function so to work with the workflow
		
		### This computes the intensity of the background subtracted power spectrum at each CTF maximum for all sets
		global envelopes # needs to be a global for the Simplex minimizer
		# envelopes is essentially a cache of information that could be useful at later stages of refinement
		# as according to Steven Ludtke
		for i in img_sets:
			envelopes.append(ctf_env_points(i[2],i[3],i[1]))

		# we use a simplex minimizer to try to rescale the individual sets to match as best they can
		scales=[1.0]*len(img_sets)
		if (len(img_sets)>3) :
			incr=[0.2]*len(img_sets)
			simp=Simplex(env_cmp,scales,incr)
			scales=simp.minimize(maxiters=1000)[0]
	#		print scales
			print " "

		# apply the final rescaling
		envelope=[]
		for i in range(len(scales)):
			cur=envelopes[i]
			for j in range(len(cur)):
				envelope.append((cur[j][0],cur[j][1]*scales[i]))

		envelope.sort()
		envelope=[i for i in envelope if i[1]>0]	# filter out all negative peak values

		db_misc=db_open_dict("bdb:e2ctf.misc")
		db_misc["envelope"]=envelope
		#db_close_dict("bdb:e2ctf.misc")

		#out=file("envelope.txt","w")
		#for i in envelope: out.write("%f\t%f\n"%(i[0],i[1]))
		#out.close()

	### GUI - user can update CTF parameters interactively
	if options.gui :
		img_sets = get_gui_arg_img_sets(options.filenames)
		if len(img_sets) == 0:
			E2end(logid)
			sys.exit(1)
		app=EMApp()
		gui=GUIctf(app,img_sets)
		gui.show_guis()
		app.exec_()

		print "done execution"

	### Process input files
	#if debug : print "Phase flipping / Wiener filtration"
	# write wiener filtered and/or phase flipped particle data to the local database
	#if options.phaseflip or options.wiener: # only put this if statement here to make the program flow obvious
	#	write_e2ctf_output(options) # converted to a function so to work with the workflow

	E2end(logid)
Exemple #16
0
def refine_align(job, ret):
    clskeep = 0.7
    # print job
    curidx = job["curidx"]
    ptcl_ori = job["ptcl_ori"]
    rings = job["rings"]
    ptclfile = job["ptclfile"]
    sli = job["sli"]
    mapft = job["mapft"]
    e3d = job["3dmap"]
    sz = mapft.shape[0]

    if job["usesimmx"]:
        idx = job["idx"]
        proj_oris = job["proj_oris"]
        oo = get_transform_simx(curidx[idx], job["amin"], job["simmx"], proj_oris)
        oi = oo * ptcl_ori[curidx[idx]].inverse()

    else:
        tr0 = Transform(job["trans0"])
        oi = tr0 * ptcl_ori[curidx[0]].inverse()

    oi = oi.get_params("eman")
    oilst = [oi["tx"], oi["ty"], oi["tz"], oi["alt"], oi["az"], oi["phi"]]

    imgs = []
    for i in curidx:
        e = EMData(ptclfile, i)
        e.process_inplace("filter.lowpass.gauss", {"cutoff_freq": 1.0 / 30.0})
        e.process_inplace("normalize")
        imgs.append(e)
        # score=0

    def get_score(olst, data):
        score = []
        oi = Transform({"alt": olst[3], "az": olst[4], "phi": olst[5], "type": "eman"})
        vv = [olst[0], olst[1], olst[2]]

        for i in range(len(curidx)):

            tr = oi * ptcl_ori[curidx[i]]

            # pp= e3d.project("standard", tr)

            surf = np.tensordot(np.asarray(tr.get_matrix_4x4()).reshape(4, 4)[:3, :3], sli, axes=(0, 0))
            ind = (surf + sz / 2).astype(int)
            ind = np.clip(ind, 0, sz - 1)
            imft = mapft[ind[2], ind[1], ind[0]]
            v = tr.transform(vv)
            img = get_img(imft).T
            pp = from_numpy(img)
            pp.translate(v[0], v[1], 0)
            # score.append(imgs[i].cmp("frc", pp,{'maxres':30}))
            score.append(imgs[i].cmp("ccc", pp))

            # e=imgs[i].copy()
            # e.translate(-v[0],-v[1],0)

            # tt=get_fft(e.numpy())
            # xreal=imft.real
            # ximag=imft.imag
            # treal=tt.real
            # timag=tt.imag

            # it1=xreal**2+ximag**2
            # it2=treal**2+timag**2
            # it1ring=np.sqrt(np.tensordot(it1,rings))
            # it2ring=np.sqrt(np.tensordot(it2,rings))
            # nrm=it1ring*it2ring

            # loss= - (np.tensordot((xreal*treal) + (ximag*timag),rings)/nrm)
            # score.append(np.mean(loss[:10]))

            # score/=float(len(curidx))
        score = np.sort(score)[: int(len(curidx) * clskeep)]
        score = np.mean(score)
        # print score

        return score

    incr = [20, 20, 20, 90, 180, 180]

    # for k in range(10000):
    # if k%100==0: print k/100
    # get_score(oilst,0)
    simp = Simplex(get_score, np.array(oilst), incr)

    locs = simp.minimize(maxiters=100, epsilon=0.001, monitor=0)

    olst = locs[0]
    # gc.collect()
    print job["modelid"], locs[1], locs[2]
    ret.put(olst)
    return olst
Exemple #17
0
def process_movie(fsp,dark,gain,first,flast,step,options):
		outname=fsp.rsplit(".",1)[0]+"_proc.hdf"		# always output to an HDF file. Output contents vary with options
		alioutname="micrographs/"+base_name(fsp)

		#if fsp[-4:].lower() in (".mrc","mrcs") :
		if fsp[-4:].lower() in (".mrc") :
			hdr=EMData(fsp,0,True)			# read header
			nx,ny=hdr["nx"],hdr["ny"]

		# bgsub and gain correct the stack

		outim=[]
		for ii in xrange(first,flast,step):
			if options.verbose:
				print " {}/{}   \r".format(ii-first+1,flast-first+1),
				sys.stdout.flush()

			#if fsp[-4:].lower() in (".mrc","mrcs") :
			if fsp[-4:].lower() in (".mrc") :
				im=EMData(fsp,0,False,Region(0,0,ii,nx,ny,1))
			else: im=EMData(fsp,ii)

			if dark!=None : im.sub(dark)
			if gain!=None : im.mult(gain)
			im.process_inplace("threshold.clampminmax",{"minval":0,"maxval":im["mean"]+im["sigma"]*3.5,"tozero":1})
			if options.fixbadpixels : im.process_inplace("threshold.outlier.localmean",{"sigma":3.5,"fix_zero":1})		# fixes clear outliers as well as values which were exactly zero

			#im.process_inplace("threshold.clampminmax.nsigma",{"nsigma":3.0})
#			im.mult(-1.0)
			if options.normalize : im.process_inplace("normalize.edgemean")

			if options.frames : im.write_image(outname[:-4]+"_corr.hdf",ii-first)
			outim.append(im)
			#im.write_image(outname,ii-first)

		nx=outim[0]["nx"]
		ny=outim[0]["ny"]

		# show a little movie of 5 averaged frames

		if options.movie>0 :
			mov=[]
			for i in xrange(options.movie+1,len(outim)):
				im=sum(outim[i-options.movie-1:i])
				#im.write_image("movie%d.hdf"%(i/5-1),0)
				#im.process_inplace("filter.lowpass.gauss",{"cutoff_freq":.02})
				mov.append(im)

			display(mov)

			#mov2=[]
			#for i in xrange(0,len(outim)-10,2):
				#im=sum(outim[i+5:i+10])-sum(outim[i:i+5])
				#mov2.append(im)

			#display(mov2)

			#mov=[i.get_clip(Region(1000,500,2048,2048)) for i in mov]
			#s=sum(mov)
#			fsc=[i.calc_fourier_shell_correlation(s)[1025:2050] for i in mov]
#			plot(fsc)

		# A simple average

		if options.simpleavg :
			if options.verbose : print "Simple average"
			avgr=Averagers.get("mean")
			for i in xrange(len(outim)):						# only use the first second for the unweighted average
				if options.verbose:
					print " {}/{}   \r".format(i+1,len(outim)),
					sys.stdout.flush()
				avgr.add_image(outim[i])
			print ""

			av=avgr.finish()
			if first!=1 or flast!=-1 : av.write_image(outname[:-4]+"_{}-{}_mean.hdf".format(first,flast),0)
			else: av.write_image(outname[:-4]+"_mean.hdf",0)

		# Generates different possibilites for resolution-weighted, but unaligned, averages

		xy=XYData()
		xy.set_size(2)
		xy.set_x(0,0)
		xy.set_y(0,1.0)
		xy.set_x(1,0.707)
		xy.set_y(1,0.0)
		if options.avgs :
			if options.verbose : print "Weighted average"
			normim=EMData(nx/2+1,ny)
			avgr=Averagers.get("weightedfourier",{"normimage":normim})
			for i in xrange(min(len(outim),25)):						# only use the first second for the unweighted average
				if options.verbose:
					print " {}/{}   \r".format(i+1,len(outim)),
					sys.stdout.flush()
				xy.set_y(1,1.0)					# no weighting
				outim[i]["avg_weight"]=xy
				avgr.add_image(outim[i])
			print ""

			av=avgr.finish()
			av.write_image(outname[:-4]+"_a.hdf",0)
#			display(normim)

			# linear weighting with shifting 0 cutoff

			xy.set_y(1,0.0)
			for i in xrange(len(outim)):
				if options.verbose:
					print " {}/{}   \r".format(i+1,len(outim)),
					sys.stdout.flush()
				xy.set_x(1,0.025+0.8*(len(outim)-i)/len(outim))
				outim[i]["avg_weight"]=xy
				avgr.add_image(outim[i])
			print ""

			av=avgr.finish()
			av.write_image(outname[:-4]+"_b.hdf",0)

			# exponential falloff with shifting width

			xy.set_size(64)
			for j in xrange(64): xy.set_x(j,0.8*j/64.0)
			for i in xrange(len(outim)):
				if options.verbose:
					print " {}/{}   \r".format(i+1,len(outim)),
					sys.stdout.flush()
				for j in xrange(64) : xy.set_y(j,exp(-j/(3.0+48.0*(len(outim)-i)/float(len(outim)))))
#				plot(xy)
				outim[i]["avg_weight"]=xy
				avgr.add_image(outim[i])
			print ""

			av=avgr.finish()
			av.write_image(outname[:-4]+"_c.hdf",0)

		if options.align_frames :
			n=len(outim)
			print(n)
			nx=outim[0]["nx"]
			ny=outim[0]["ny"]
			print("{} frames read {} x {}".format(n,nx,ny))

			ccfs=Queue.Queue(0)

			# prepare image data (outim) by clipping and FFT'ing all tiles
			# this is threaded as well
			immx=[0]*n
			thds = []
			for i in range(n):
				thd = threading.Thread(target=split_fft,args=(outim[i],i,options.optbox,options.optstep,ccfs))
				thds.append(thd)
			print("Precompute FFTs: {} threads".format(len(thds)))
			t0=time()

			thrtolaunch=0
			while thrtolaunch<len(thds) or threading.active_count()>1:
				if thrtolaunch<len(thds) :
					while (threading.active_count()==options.threads ) : sleep(.1)
			#		if options.verbose : print "Starting thread {}/{}".format(thrtolaunch,len(thds))
					thds[thrtolaunch].start()
					thrtolaunch+=1
				else: sleep(1)

				while not ccfs.empty():
					i,d=ccfs.get()
					immx[i]=d

			for th in thds: th.join()

			# create threads
			thds=[]
			i=0
			for ima in range(n-1):
				for imb in range(ima+1,n):
					thds.append(threading.Thread(target=calc_ccf,args=((ima,imb),options.optbox,options.optstep,immx[ima],immx[imb],ccfs)))
					i+=1

			print "{:1.1f} s\nCompute ccfs: {} threads".format(time()-t0,len(thds))
			t0=time()

			# here we run the threads and save the results, no actual alignment done here
			csum2={}
			thrtolaunch=0
			while thrtolaunch<len(thds) or threading.active_count()>1:
				# If we haven't launched all threads yet, then we wait for an empty slot, and launch another
				# note that it's ok that we wait here forever, since there can't be new results if an existing
				# thread hasn't finished.
				if thrtolaunch<len(thds) :
					while (threading.active_count()==options.threads ) : sleep(.1)
					#if options.verbose : print "Starting thread {}/{}".format(thrtolaunch,len(thds))
					thds[thrtolaunch].start()
					thrtolaunch+=1
				else:
					sleep(1)

				while not ccfs.empty():
					i,d=ccfs.get()
					csum2[i]=d

				if options.verbose:
					print "  {}/{} {}\r".format(thrtolaunch,len(thds),threading.active_count()),
					sys.stdout.flush()


			for th in thds: th.join()

			avgr=Averagers.get("minmax",{"max":0})
			avgr.add_image_list(csum2.values())
			csum=avgr.finish()
			#csum=sum(csum2.values())
			#csum.mult(1.0/len(csum2))
			#csum.process_inplace("normalize.edgemean")
			#display(csum)
			#csum.write_image("a.hdf",0)
			for i,k in enumerate(sorted(csum2.keys())):
				im=csum2[k]
			#	norm=im[BOX/2,BOX/2]/csum[BOX/2,BOX/2]
			#	norm=im.get_clip(Region(BOX/2-5,BOX/2-5,11,11))["mean"]/csum.get_clip(Region(BOX/2-5,BOX/2-5,11,11))["mean"]
			#	im.write_image("aa.hdf",i)

			# This has been disabled since it eliminates the peak for zero shift. Instead we try the zero/zero elimination hack
				norm=1.0
				im.sub(csum*norm)

				# set the 0,0 peak to the average of neighboring pixels to reduce fixed pattern noise issues (this worked poorly)
				# im[BOX/2,BOX/2]=(im[BOX/2-1,BOX/2]+im[BOX/2+1,BOX/2]+im[BOX/2,BOX/2+1]+im[BOX/2,BOX/2-1])

			#	s=im.process("math.sub.optimal",{"ref":csum,"ctfweight":0})

			#	im.write_image("a.hdf",i+1)
				# This is critical. Without this, after filtering we get too many false peaks
				thr=im["mean"]+im["sigma"]*1.5
				im.process_inplace("threshold.belowtozero",{"minval":thr})

			#####
			# Alignment code
			#####

			# array of x,y locations of each frame, all relative to the last frame in the series, which will always have 0,0 shift
			# we store the value for the last frame as well as a conveience
			locs=[0]*(n*2)

			#print csum2.keys()

			print("{:1.1f} s\nAlignment optimization".format(time()-t0))
			t0=time()

			# we start with a heavy filter, optimize, then repeat for successively less filtration
			for scale in [0.02,0.04,0.07,0.1,0.5]:
				csum3={k:csum2[k].process("filter.lowpass.gauss",{"cutoff_abs":scale}) for k in csum2.keys()}

				incr=[16]*len(locs)
				incr[-1]=incr[-2]=4	# if step is zero for last 2, it gets stuck as an outlier, so we just make the starting step smaller
				simp=Simplex(qual,locs,incr,data=csum3)
				locs=simp.minimize(maxiters=int(100/scale),epsilon=.01)[0]
				locs=[int(floor(i*10+.5))/10.0 for i in locs]
				print locs
				if options.verbose > 7:
					out=file("{}_path_{:02d}.txt".format(outname[:-4],int(1.0/scale)),"w")
					for i in xrange(0,len(locs),2): out.write("%f\t%f\n"%(locs[i],locs[i+1]))

			# compute the quality of each frame
			quals=[0]*n			# quality of each frame based on its correlation peak summed over all images
			cen=csum2[(0,1)]["nx"]/2
			for i in xrange(n-1):
				for j in xrange(i+1,n):
					val=csum2[(i,j)].sget_value_at_interp(int(cen+locs[j*2]-locs[i*2]),int(cen+locs[j*2+1]-locs[i*2+1]))*sqrt(float(n-fabs(i-j))/n)
					quals[i]+=val
					quals[j]+=val

			# round for integer only shifting
			#locs=[int(floor(i+.5)) for i in locs]

			if options.noali:
				print "{:1.1f}Write unaligned".format(time()-t0)
				t0=time()


				#write out the unaligned average movie
				out=qsum(outim)
				out.write_image("{}__noali.hdf".format(outname[:-4]),0)

			print("Shift images ({})".format(time()-t0))
			t0=time()
			#write individual aligned frames
			for i,im in enumerate(outim):
				im.translate(int(floor(locs[i*2]+.5)),int(floor(locs[i*2+1]+.5)),0)
			#	im.write_image("a_all_ali.hdf",i)

			if options.allali:
				out=qsum(outim)
				out.write_image("{}__allali.hdf".format(alioutname),0)

			#out=sum(outim[5:15])	# FSC with the earlier frames instead of whole average
			# compute fsc between each aligned frame and the average
			# we tile this for better curves, since we don't need the detail
			fscq=[0]*n
			if options.optfsc:
				for i in range(n):
					rgnc=0
					for x in range(64,out["nx"]-192,64):
						for y in range(64,out["ny"]-192,64):
							rgnc+=1.0
							cmpto=out.get_clip(Region(x,y,64,64))
							cscen=outim[i].get_clip(Region(x,y,64,64))
							s,f=calcfsc(cmpto,cscen)
							f=array(f)
							try: fs+=f
							except: fs=f
					fs/=rgnc
					fs=list(fs)
					fscq.append(qsum(fs[2:24]))

					Util.save_data(s[1],s[1]-s[0],fs[1:-1],"{}_fsc_{:02d}.txt".format(outname[:-4],i))

			print("{:1.1f}\nSubsets".format(time()-t0))
			t0=time()
			# write translations and qualities
			db=js_open_dict(info_name(fsp))
			db["movieali_trans"]=locs
			db["movieali_qual"]=quals
			db["movie_name"]=fsp
			if gain:
				db["gain_name"]=gain["filename"]
				db["gain_id"]=gain["fileid"]
			if dark:
				db["dark_name"]=dark["filename"]
				db["dark_id"]=dark["fileid"]
			db.close()

			out=open("{}_info.txt".format(outname[:-4]),"w")
			out.write("#i,dx,dy,dr,rel dr,qual,(opt)fscqual\n")
			for i in range(n):
				out.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(i,locs[i*2],locs[i*2+1],hypot(locs[i*2],locs[i*2+1]),hypot(locs[i*2]-locs[i*2-2],locs[i*2+1]-locs[i*2-1]),quals[i],fscq[i]))

			if options.goodali:
				thr=max(quals)*0.6	# max correlation cutoff for inclusion
				best=[im for i,im in enumerate(outim) if quals[i]>thr]
				out=qsum(best)
				print "Keeping {}/{} frames".format(len(best),len(outim))
				out.write_image("{}__goodali.hdf".format(alioutname),0)

			if options.bestali:
				thr=max(quals)*0.75	# max correlation cutoff for inclusion
				best=[im for i,im in enumerate(outim) if quals[i]>thr]
				out=qsum(best)
				print "Keeping {}/{} frames".format(len(best),len(outim))
				out.write_image("{}__bestali.hdf".format(alioutname),0)

			if options.ali4to14:
				# skip the first 4 frames then keep 10
				out=qsum(outim[4:14])
				out.write_image("{}__4-14.hdf".format(alioutname),0)

			# Write out the translated correlation maps for debugging
			#cen=csum2[(0,1)]["nx"]/2
			#n=len(locs)/2
			#for ii,k in enumerate(sorted(csum2.keys())):
			#	i,j=k
			#	csum2[k].translate(-int(locs[j*2]-locs[i*2]),-int(locs[j*2+1]-locs[i*2+1]),0)
			#	csum2[k].write_image("aa.hdf",ii)

			print "{:1.1f}\nDone".format(time()-t0)
Exemple #18
0
def process_movie(fsp, dark, gain, first, flast, step, options):
    outname = fsp.rsplit(
        ".", 1
    )[0] + "_proc.hdf"  # always output to an HDF file. Output contents vary with options
    alioutname = "micrographs/" + base_name(fsp)

    #if fsp[-4:].lower() in (".mrc","mrcs") :
    if fsp[-4:].lower() in (".mrc"):
        hdr = EMData(fsp, 0, True)  # read header
        nx, ny = hdr["nx"], hdr["ny"]

    # bgsub and gain correct the stack

    outim = []
    for ii in xrange(first, flast, step):
        if options.verbose:
            print " {}/{}   \r".format(ii - first + 1, flast - first + 1),
            sys.stdout.flush()

        #if fsp[-4:].lower() in (".mrc","mrcs") :
        if fsp[-4:].lower() in (".mrc"):
            im = EMData(fsp, 0, False, Region(0, 0, ii, nx, ny, 1))
        else:
            im = EMData(fsp, ii)

        if dark != None: im.sub(dark)
        if gain != None: im.mult(gain)
        im.process_inplace("threshold.clampminmax", {
            "minval": 0,
            "maxval": im["mean"] + im["sigma"] * 3.5,
            "tozero": 1
        })
        if options.fixbadpixels:
            im.process_inplace(
                "threshold.outlier.localmean", {
                    "sigma": 3.5,
                    "fix_zero": 1
                }
            )  # fixes clear outliers as well as values which were exactly zero

        #im.process_inplace("threshold.clampminmax.nsigma",{"nsigma":3.0})
        #			im.mult(-1.0)
        if options.normalize: im.process_inplace("normalize.edgemean")

        if options.frames:
            im.write_image(outname[:-4] + "_corr.hdf", ii - first)
        outim.append(im)
        #im.write_image(outname,ii-first)

    nx = outim[0]["nx"]
    ny = outim[0]["ny"]

    # show a little movie of 5 averaged frames

    if options.movie > 0:
        mov = []
        for i in xrange(options.movie + 1, len(outim)):
            im = sum(outim[i - options.movie - 1:i])
            #im.write_image("movie%d.hdf"%(i/5-1),0)
            #im.process_inplace("filter.lowpass.gauss",{"cutoff_freq":.02})
            mov.append(im)

        display(mov)

        #mov2=[]
        #for i in xrange(0,len(outim)-10,2):
        #im=sum(outim[i+5:i+10])-sum(outim[i:i+5])
        #mov2.append(im)

        #display(mov2)

        #mov=[i.get_clip(Region(1000,500,2048,2048)) for i in mov]
        #s=sum(mov)
#			fsc=[i.calc_fourier_shell_correlation(s)[1025:2050] for i in mov]
#			plot(fsc)

# A simple average

    if options.simpleavg:
        if options.verbose: print "Simple average"
        avgr = Averagers.get("mean")
        for i in xrange(
                len(outim
                    )):  # only use the first second for the unweighted average
            if options.verbose:
                print " {}/{}   \r".format(i + 1, len(outim)),
                sys.stdout.flush()
            avgr.add_image(outim[i])
        print ""

        av = avgr.finish()
        if first != 1 or flast != -1:
            av.write_image(
                outname[:-4] + "_{}-{}_mean.hdf".format(first, flast), 0)
        else:
            av.write_image(outname[:-4] + "_mean.hdf", 0)

    # Generates different possibilites for resolution-weighted, but unaligned, averages

    xy = XYData()
    xy.set_size(2)
    xy.set_x(0, 0)
    xy.set_y(0, 1.0)
    xy.set_x(1, 0.707)
    xy.set_y(1, 0.0)
    if options.avgs:
        if options.verbose: print "Weighted average"
        normim = EMData(nx / 2 + 1, ny)
        avgr = Averagers.get("weightedfourier", {"normimage": normim})
        for i in xrange(min(
                len(outim),
                25)):  # only use the first second for the unweighted average
            if options.verbose:
                print " {}/{}   \r".format(i + 1, len(outim)),
                sys.stdout.flush()
            xy.set_y(1, 1.0)  # no weighting
            outim[i]["avg_weight"] = xy
            avgr.add_image(outim[i])
        print ""

        av = avgr.finish()
        av.write_image(outname[:-4] + "_a.hdf", 0)
        #			display(normim)

        # linear weighting with shifting 0 cutoff

        xy.set_y(1, 0.0)
        for i in xrange(len(outim)):
            if options.verbose:
                print " {}/{}   \r".format(i + 1, len(outim)),
                sys.stdout.flush()
            xy.set_x(1, 0.025 + 0.8 * (len(outim) - i) / len(outim))
            outim[i]["avg_weight"] = xy
            avgr.add_image(outim[i])
        print ""

        av = avgr.finish()
        av.write_image(outname[:-4] + "_b.hdf", 0)

        # exponential falloff with shifting width

        xy.set_size(64)
        for j in xrange(64):
            xy.set_x(j, 0.8 * j / 64.0)
        for i in xrange(len(outim)):
            if options.verbose:
                print " {}/{}   \r".format(i + 1, len(outim)),
                sys.stdout.flush()
            for j in xrange(64):
                xy.set_y(
                    j,
                    exp(-j / (3.0 + 48.0 *
                              (len(outim) - i) / float(len(outim)))))
            #				plot(xy)
            outim[i]["avg_weight"] = xy
            avgr.add_image(outim[i])
        print ""

        av = avgr.finish()
        av.write_image(outname[:-4] + "_c.hdf", 0)

    if options.align_frames:
        n = len(outim)
        print(n)
        nx = outim[0]["nx"]
        ny = outim[0]["ny"]
        print("{} frames read {} x {}".format(n, nx, ny))

        ccfs = Queue.Queue(0)

        # prepare image data (outim) by clipping and FFT'ing all tiles
        # this is threaded as well
        immx = [0] * n
        thds = []
        for i in range(n):
            thd = threading.Thread(target=split_fft,
                                   args=(outim[i], i, options.optbox,
                                         options.optstep, ccfs))
            thds.append(thd)
        print("Precompute FFTs: {} threads".format(len(thds)))
        t0 = time()

        thrtolaunch = 0
        while thrtolaunch < len(thds) or threading.active_count() > 1:
            if thrtolaunch < len(thds):
                while (threading.active_count() == options.threads):
                    sleep(.1)
                #		if options.verbose : print "Starting thread {}/{}".format(thrtolaunch,len(thds))
                thds[thrtolaunch].start()
                thrtolaunch += 1
            else:
                sleep(1)

            while not ccfs.empty():
                i, d = ccfs.get()
                immx[i] = d

        for th in thds:
            th.join()

        # create threads
        thds = []
        i = 0
        for ima in range(n - 1):
            for imb in range(ima + 1, n):
                thds.append(
                    threading.Thread(target=calc_ccf,
                                     args=((ima, imb), options.optbox,
                                           options.optstep, immx[ima],
                                           immx[imb], ccfs)))
                i += 1

        print "{:1.1f} s\nCompute ccfs: {} threads".format(
            time() - t0, len(thds))
        t0 = time()

        # here we run the threads and save the results, no actual alignment done here
        csum2 = {}
        thrtolaunch = 0
        while thrtolaunch < len(thds) or threading.active_count() > 1:
            # If we haven't launched all threads yet, then we wait for an empty slot, and launch another
            # note that it's ok that we wait here forever, since there can't be new results if an existing
            # thread hasn't finished.
            if thrtolaunch < len(thds):
                while (threading.active_count() == options.threads):
                    sleep(.1)
                #if options.verbose : print "Starting thread {}/{}".format(thrtolaunch,len(thds))
                thds[thrtolaunch].start()
                thrtolaunch += 1
            else:
                sleep(1)

            while not ccfs.empty():
                i, d = ccfs.get()
                csum2[i] = d

            if options.verbose:
                print "  {}/{} {}\r".format(thrtolaunch, len(thds),
                                            threading.active_count()),
                sys.stdout.flush()

        for th in thds:
            th.join()

        avgr = Averagers.get("minmax", {"max": 0})
        avgr.add_image_list(csum2.values())
        csum = avgr.finish()
        #csum=sum(csum2.values())
        #csum.mult(1.0/len(csum2))
        #csum.process_inplace("normalize.edgemean")
        #display(csum)
        #csum.write_image("a.hdf",0)
        for i, k in enumerate(sorted(csum2.keys())):
            im = csum2[k]
            #	norm=im[BOX/2,BOX/2]/csum[BOX/2,BOX/2]
            #	norm=im.get_clip(Region(BOX/2-5,BOX/2-5,11,11))["mean"]/csum.get_clip(Region(BOX/2-5,BOX/2-5,11,11))["mean"]
            #	im.write_image("aa.hdf",i)

            # This has been disabled since it eliminates the peak for zero shift. Instead we try the zero/zero elimination hack
            norm = 1.0
            im.sub(csum * norm)

            # set the 0,0 peak to the average of neighboring pixels to reduce fixed pattern noise issues (this worked poorly)
            # im[BOX/2,BOX/2]=(im[BOX/2-1,BOX/2]+im[BOX/2+1,BOX/2]+im[BOX/2,BOX/2+1]+im[BOX/2,BOX/2-1])

            #	s=im.process("math.sub.optimal",{"ref":csum,"ctfweight":0})

            #	im.write_image("a.hdf",i+1)
            # This is critical. Without this, after filtering we get too many false peaks
            thr = im["mean"] + im["sigma"] * 1.5
            im.process_inplace("threshold.belowtozero", {"minval": thr})

        #####
        # Alignment code
        #####

        # array of x,y locations of each frame, all relative to the last frame in the series, which will always have 0,0 shift
        # we store the value for the last frame as well as a conveience
        locs = [0] * (n * 2)

        #print csum2.keys()

        print("{:1.1f} s\nAlignment optimization".format(time() - t0))
        t0 = time()

        # we start with a heavy filter, optimize, then repeat for successively less filtration
        for scale in [0.02, 0.04, 0.07, 0.1, 0.5]:
            csum3 = {
                k: csum2[k].process("filter.lowpass.gauss",
                                    {"cutoff_abs": scale})
                for k in csum2.keys()
            }

            incr = [16] * len(locs)
            incr[-1] = incr[
                -2] = 4  # if step is zero for last 2, it gets stuck as an outlier, so we just make the starting step smaller
            simp = Simplex(qual, locs, incr, data=csum3)
            locs = simp.minimize(maxiters=int(100 / scale), epsilon=.01)[0]
            locs = [int(floor(i * 10 + .5)) / 10.0 for i in locs]
            print locs
            if options.verbose > 7:
                out = file(
                    "{}_path_{:02d}.txt".format(outname[:-4],
                                                int(1.0 / scale)), "w")
                for i in xrange(0, len(locs), 2):
                    out.write("%f\t%f\n" % (locs[i], locs[i + 1]))

        # compute the quality of each frame
        quals = [
            0
        ] * n  # quality of each frame based on its correlation peak summed over all images
        cen = csum2[(0, 1)]["nx"] / 2
        for i in xrange(n - 1):
            for j in xrange(i + 1, n):
                val = csum2[(i, j)].sget_value_at_interp(
                    int(cen + locs[j * 2] - locs[i * 2]),
                    int(cen + locs[j * 2 + 1] - locs[i * 2 + 1])) * sqrt(
                        float(n - fabs(i - j)) / n)
                quals[i] += val
                quals[j] += val

        # round for integer only shifting
        #locs=[int(floor(i+.5)) for i in locs]

        if options.noali:
            print "{:1.1f}Write unaligned".format(time() - t0)
            t0 = time()

            #write out the unaligned average movie
            out = qsum(outim)
            out.write_image("{}__noali.hdf".format(outname[:-4]), 0)

        print("Shift images ({})".format(time() - t0))
        t0 = time()
        #write individual aligned frames
        for i, im in enumerate(outim):
            im.translate(int(floor(locs[i * 2] + .5)),
                         int(floor(locs[i * 2 + 1] + .5)), 0)
        #	im.write_image("a_all_ali.hdf",i)

        if options.allali:
            out = qsum(outim)
            out.write_image("{}__allali.hdf".format(alioutname), 0)

        #out=sum(outim[5:15])	# FSC with the earlier frames instead of whole average
        # compute fsc between each aligned frame and the average
        # we tile this for better curves, since we don't need the detail
        fscq = [0] * n
        if options.optfsc:
            for i in range(n):
                rgnc = 0
                for x in range(64, out["nx"] - 192, 64):
                    for y in range(64, out["ny"] - 192, 64):
                        rgnc += 1.0
                        cmpto = out.get_clip(Region(x, y, 64, 64))
                        cscen = outim[i].get_clip(Region(x, y, 64, 64))
                        s, f = calcfsc(cmpto, cscen)
                        f = array(f)
                        try:
                            fs += f
                        except:
                            fs = f
                fs /= rgnc
                fs = list(fs)
                fscq.append(qsum(fs[2:24]))

                Util.save_data(s[1], s[1] - s[0], fs[1:-1],
                               "{}_fsc_{:02d}.txt".format(outname[:-4], i))

        print("{:1.1f}\nSubsets".format(time() - t0))
        t0 = time()
        # write translations and qualities
        db = js_open_dict(info_name(fsp))
        db["movieali_trans"] = locs
        db["movieali_qual"] = quals
        db["movie_name"] = fsp
        if gain:
            db["gain_name"] = gain["filename"]
            db["gain_id"] = gain["fileid"]
        if dark:
            db["dark_name"] = dark["filename"]
            db["dark_id"] = dark["fileid"]
        db.close()

        out = open("{}_info.txt".format(outname[:-4]), "w")
        out.write("#i,dx,dy,dr,rel dr,qual,(opt)fscqual\n")
        for i in range(n):
            out.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
                i, locs[i * 2], locs[i * 2 + 1],
                hypot(locs[i * 2], locs[i * 2 + 1]),
                hypot(locs[i * 2] - locs[i * 2 - 2],
                      locs[i * 2 + 1] - locs[i * 2 - 1]), quals[i], fscq[i]))

        if options.goodali:
            thr = max(quals) * 0.6  # max correlation cutoff for inclusion
            best = [im for i, im in enumerate(outim) if quals[i] > thr]
            out = qsum(best)
            print "Keeping {}/{} frames".format(len(best), len(outim))
            out.write_image("{}__goodali.hdf".format(alioutname), 0)

        if options.bestali:
            thr = max(quals) * 0.75  # max correlation cutoff for inclusion
            best = [im for i, im in enumerate(outim) if quals[i] > thr]
            out = qsum(best)
            print "Keeping {}/{} frames".format(len(best), len(outim))
            out.write_image("{}__bestali.hdf".format(alioutname), 0)

        if options.ali4to14:
            # skip the first 4 frames then keep 10
            out = qsum(outim[4:14])
            out.write_image("{}__4-14.hdf".format(alioutname), 0)

        # Write out the translated correlation maps for debugging
        #cen=csum2[(0,1)]["nx"]/2
        #n=len(locs)/2
        #for ii,k in enumerate(sorted(csum2.keys())):
        #	i,j=k
        #	csum2[k].translate(-int(locs[j*2]-locs[i*2]),-int(locs[j*2+1]-locs[i*2+1]),0)
        #	csum2[k].write_image("aa.hdf",ii)

        print "{:1.1f}\nDone".format(time() - t0)
Exemple #19
0
def main():
    global debug, logid
    progname = os.path.basename(sys.argv[0])
    usage = """%prog [options] <input stack/image> ...
	
Various CTF-related operations on images, including automatic fitting. Note that automatic fitting is limited to 5 microns
underfocus at most. Input particles should be unmasked and unfiltered. A minimum of ~20% padding around the
particles is required for background extraction, even if this brings the edge of another particle into the box in some cases.
Particles should be reasonably well centered. Can also optionally phase flip and Wiener filter particles. Wiener filtration comes
after phase-flipping, so if phase flipping is performed Wiener filtered particles will also be phase-flipped. Note that both
operations are performed on oversampled images if specified (though final real-space images are clipped back to their original
size. Increasing padding during the particle picking process will improve the accuracy of phase-flipping, particularly for
images far from focus."""

    parser = OptionParser(usage=usage, version=EMANVERSION)

    parser.add_option("--gui",
                      action="store_true",
                      help="Start the GUI for interactive fitting",
                      default=False)
    parser.add_option("--auto_fit",
                      action="store_true",
                      help="Runs automated CTF fitting on the input images",
                      default=False)
    parser.add_option(
        "--bgmask",
        type="int",
        help=
        "Compute the background power spectrum from the edge of the image, specify a mask radius in pixels which would largely mask out the particles. Default is boxsize/2.",
        default=0)
    parser.add_option("--apix",
                      type="float",
                      help="Angstroms per pixel for all images",
                      default=0)
    parser.add_option("--voltage",
                      type="float",
                      help="Microscope voltage in KV",
                      default=0)
    parser.add_option("--cs",
                      type="float",
                      help="Microscope Cs (spherical aberation)",
                      default=0)
    parser.add_option("--ac",
                      type="float",
                      help="Amplitude contrast (percentage, default=10)",
                      default=10)
    parser.add_option(
        "--autohp",
        action="store_true",
        help=
        "Automatic high pass filter of the SNR only to remove initial sharp peak, phase-flipped data is not directly affected (default false)",
        default=False)
    #parser.add_option("--invert",action="store_true",help="Invert the contrast of the particles in output files (default false)",default=False)
    parser.add_option("--nonorm",
                      action="store_true",
                      help="Suppress per image real-space normalization",
                      default=False)
    parser.add_option(
        "--nosmooth",
        action="store_true",
        help=
        "Disable smoothing of the background (running-average of the log with adjustment at the zeroes of the CTF)",
        default=False)
    #parser.add_option("--phaseflip",action="store_true",help="Perform phase flipping after CTF determination and writes to specified file.",default=False)
    #parser.add_option("--wiener",action="store_true",help="Wiener filter (optionally phaseflipped) particles.",default=False)
    parser.add_option("--oversamp",
                      type="int",
                      help="Oversampling factor",
                      default=1)
    parser.add_option(
        "--sf",
        type="string",
        help=
        "The name of a file containing a structure factor curve. Can improve B-factor determination.",
        default=None)
    parser.add_option("--debug", action="store_true", default=False)

    (options, args) = parser.parse_args()

    if len(args) < 1: parser.error("Input image required")

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    if options.auto_fit:
        if options.voltage == 0: parser.error("Please specify voltage")
        if options.cs == 0: parser.error("Please specify Cs")
    if options.apix == 0: print "Using A/pix from header"

    debug = options.debug

    global sfcurve
    if options.sf:
        sfcurve = XYData()
        sfcurve.read_file(options.sf)

    logid = E2init(sys.argv)

    #	if options.oversamp>1 : options.apix/=float(options.oversamp)

    db_project = db_open_dict("bdb:project")
    db_parms = db_open_dict("bdb:e2ctf.parms")
    db_misc = db_open_dict("bdb:e2ctf.misc")

    options.filenames = args
    ### Power spectrum and CTF fitting
    if options.auto_fit:
        img_sets = pspec_and_ctf_fit(
            options,
            debug)  # converted to a function so to work with the workflow

        ### This computes the intensity of the background subtracted power spectrum at each CTF maximum for all sets
        global envelopes  # needs to be a global for the Simplex minimizer
        # envelopes is essentially a cache of information that could be useful at later stages of refinement
        # as according to Steven Ludtke
        for i in img_sets:
            envelopes.append(ctf_env_points(i[2], i[3], i[1]))

        # we use a simplex minimizer to try to rescale the individual sets to match as best they can
        scales = [1.0] * len(img_sets)
        if (len(img_sets) > 3):
            incr = [0.2] * len(img_sets)
            simp = Simplex(env_cmp, scales, incr)
            scales = simp.minimize(maxiters=1000)[0]
            #		print scales
            print " "

        # apply the final rescaling
        envelope = []
        for i in range(len(scales)):
            cur = envelopes[i]
            for j in range(len(cur)):
                envelope.append((cur[j][0], cur[j][1] * scales[i]))

        envelope.sort()
        envelope = [i for i in envelope
                    if i[1] > 0]  # filter out all negative peak values

        db_misc = db_open_dict("bdb:e2ctf.misc")
        db_misc["envelope"] = envelope
        #db_close_dict("bdb:e2ctf.misc")

        #out=file("envelope.txt","w")
        #for i in envelope: out.write("%f\t%f\n"%(i[0],i[1]))
        #out.close()

    ### GUI - user can update CTF parameters interactively
    if options.gui:
        img_sets = get_gui_arg_img_sets(options.filenames)
        if len(img_sets) == 0:
            E2end(logid)
            sys.exit(1)
        app = EMApp()
        gui = GUIctf(app, img_sets)
        gui.show_guis()
        app.exec_()

        print "done execution"

    ### Process input files
    #if debug : print "Phase flipping / Wiener filtration"
    # write wiener filtered and/or phase flipped particle data to the local database
    #if options.phaseflip or options.wiener: # only put this if statement here to make the program flow obvious
    #	write_e2ctf_output(options) # converted to a function so to work with the workflow

    E2end(logid)
Exemple #20
0
#	print nrg
	return nrg
			
#print csum2.keys()

print "{:1.1f} s\nAlignment optimization".format(time()-t0)
t0=time()

# we start with a heavy filter, optimize, then repeat for successively less filtration
for scale in [0.02,0.04,0.07,0.1,0.5]:
	csum3={k:csum2[k].process("filter.lowpass.gauss",{"cutoff_abs":scale}) for k in csum2.keys()}

	incr=[16]*len(locs)
	incr[-1]=incr[-2]=4	# if step is zero for last 2, it gets stuck as an outlier, so we just make the starting step smaller
	simp=Simplex(qual,locs,incr,data=csum3)
	locs=simp.minimize(maxiters=int(100/scale),epsilon=.01)[0]
	locs=[int(floor(i*10+.5))/10.0 for i in locs]
	print locs
	if VERBOSE:
		out=file("path_{:02d}.txt".format(int(1.0/scale)),"w")
		for i in xrange(0,len(locs),2): out.write("%f\t%f\n"%(locs[i],locs[i+1]))
	


# compute the quality of each frame
quals=[0]*n			# quality of each frame based on its correlation peak summed over all images
cen=csum2[(0,1)]["nx"]/2
for i in xrange(n-1):
	for j in xrange(i+1,n):
		val=csum2[(i,j)].sget_value_at_interp(int(cen+locs[j*2]-locs[i*2]),int(cen+locs[j*2+1]-locs[i*2+1]))*sqrt(float(n-fabs(i-j))/n)
Exemple #21
0
#!/usr/bin/env python3

import numpy as np
from Simplex import Simplex


if __name__ == '__main__':
	
	A = [[3, 1, -1, 0],
		 [1, 1, 0, -1]]
	
	b = [3, 2]
	
	c = [3, 2, 0, 0]
	
	index_b_guess = [0, 2]		# initial guess of the basis index
	
	
	smplx = Simplex(A, b, c)
	
	smplx.algorithm(index_b_guess)
    def BFS(self):
        solver: bool
        # giai bai toan voi dau vao chua them bien
        tbl = np.empty((self.M + 1, self.M + self.N + 1), dtype=Fraction)
        b = np.empty(self.M,
                     dtype='int')  # b[i] =k if k-th is the base variable

        # xac dinh he so cac bien thuc
        for i in range(self.M):
            for j in range(self.N):
                tbl[i][j] = self.tbl[i][j]

        for i in range(self.M):
            for j in range(self.N, self.N + self.M):
                tbl[i][j] = zero
            tbl[i][self.N + i] = one
            tbl[i][self.N + self.M] = self.tbl[i][self.N]
            b[i] = self.N + i

        for j in range(self.N):
            tbl[self.M][j] = zero

        for j in range(self.N, self.N + self.M):
            tbl[self.M][j] = ngone
        tbl[self.M][self.M + self.N] = zero

        print("BFS with SLack vars")
        print_table(tbl)
        print('Base =  ', b)

        sim = Simplex(tbl, b)
        if (sim.Solve() != True):
            solver = False
            return solver

        b = sim.base
        tbl = sim.tbl

        print("Anwser after Simplex 1-th")
        print_table(tbl)
        print('Base', b)

        if (tbl[-1][-1] != zero):
            print("Not have BFS!")
            solver = False
            return solver

        for i in range(self.M):
            for j in range(self.N):
                self.tbl[i][j] = tbl[i][j]
            self.tbl[i][self.N] = tbl[i][self.N + self.M]

        flag = np.full(self.N, False, dtype=bool)
        for i in range(self.M):
            if (b[i] < self.N):
                self.base[i] = b[i]
                flag[b[i]] = True
        for i in range(self.M):
            if (b[i] >= self.N):
                for j in range(self.N):
                    if (flag[j] == False):
                        self.base[i] = j
                        flag[j] = True
                        break
        solver = True
        return solver