def runFFT(images, apix=3.7, do_avg=True): """Run radially averaged FFT on an EMAN image. Args: images: apix: angstroms per pixel do_avg: compute average Rets: curve: radially-averaged FFT sf_dx: corresponding x values. """ # This code by Steven Ludtke nx = images[0]["nx"] + 2 ny = images[0]["ny"] fftavg = EMData(nx, ny, 1) fftavg.to_zero() fftavg.set_complex(1) for d in images: d.process_inplace("mask.ringmean") d.process_inplace("normalize") df = d.do_fft() df.mult(df.get_ysize()) fftavg.add_incoherent(df) if do_avg: fftavg.mult(1.0 / len(images)) curve = fftavg.calc_radial_dist(ny, 0, 0.5, 1) sf_dx = 1.0 / (apix * 2.0 * ny) return (curve, sf_dx)
def addNoiseToMapSolvent(map, varNoise): #********************************* #****** add Noise To Map ********* #********************************* mapData = np.copy(map) mapSize = mapData.shape noiseMap = np.random.randn(mapSize[0], mapSize[1], mapSize[2]) * math.sqrt(varNoise) mask = EMData() mask.set_size(mapSize[0], mapSize[1], mapSize[2]) mask.to_zero() sphere_radius = (np.min(mapSize) / 2.0 - 60) mask.process_inplace("testimage.circlesphere", {"radius": sphere_radius}) maskData = np.copy(EMNumPy.em2numpy(mask)) maskData[maskData > 0] = 10 maskData[maskData <= 0] = 0 maskData[maskData == 0] = 1 maskData[maskData == 10] = 0 noiseMap = noiseMap * maskData mapData = mapData + noiseMap return mapData
def runFFT_err(images, apix=3.7, nresamples=1000, conf_int=95): """Run radially averaged FFT with bootstrapped error estimates. Args: images: images to operate on apix: angstroms per pixel nresmaples: number of bootstrap resamples conf_int: confidence interval to calculate Rets: curve: average y-vals conf_lo: low confidence bound conf_hi: high confidence bound sf_dx: x-vals """ bootidx = numpy.random.randint(len(images) - 1, size=(nresamples, len(images))) nx = images[0]["nx"] ny = images[0]["ny"] # compute FFT on everything # for now also computing sample average df = [] sampleavg = EMData(nx + 2, ny, 1) sampleavg.to_zero() sampleavg.set_complex(1) for d in images: d.process_inplace("mask.ringmean") d.process_inplace("normalize") curimg = d.do_fft() curimg.mult(d.get_ysize()) sampleavg.add_incoherent(curimg) df.append(curimg) sampleavg.mult(1.0 / len(images)) # calculate average for each bootstrap resample fftavg = EMData(nx + 2, ny, 1) curve = [] for r in range(nresamples): fftavg.to_zero() fftavg.set_complex(1) for idx in bootidx[r]: fftavg.add_incoherent(df[idx]) fftavg.mult(1.0 / len(bootidx[r])) curve.append(fftavg.calc_radial_dist(ny, 0, 0.5, 1)) cdata = numpy.vstack(curve) xdata = numpy.array(range(cdata.shape[-1])) * 1.0 / (apix * 2.0 * ny) return (sampleavg.calc_radial_dist(ny, 0, 0.5, 1), numpy.percentile(cdata, 50 - conf_int/2, axis=0), numpy.percentile(cdata, 50 + conf_int/2, axis=0), xdata)
def prepare_mask_and_maps_for_scaling(args): emmap = get_image(args.em_map) modmap = get_image(args.model_map) if args.mask is None: mask = EMData() xsize, ysize, zsize = emmap.get_xsize(), emmap.get_ysize( ), emmap.get_zsize() mask.set_size(xsize, ysize, zsize) mask.to_zero() if xsize == ysize and xsize == zsize and ysize == zsize: sphere_radius = xsize // 2 mask.process_inplace("testimage.circlesphere", {"radius": sphere_radius}) else: mask += 1 mask = Util.window(mask, xsize - 1, ysize - 1, zsize - 1) mask = Util.pad(mask, xsize, ysize, zsize, 0, 0, 0, '0') elif args.mask is not None: mask = binarize(get_image(args.mask), 0.5) if args.window_size is None: wn = int(math.ceil(round((7 * 3 * args.apix)) / 2.) * 2) elif args.window_size is not None: wn = int(math.ceil(args.window_size / 2.) * 2) window_bleed_and_pad = check_for_window_bleeding(mask, wn) if window_bleed_and_pad: pad_int_emmap = compute_padding_average(emmap, mask) pad_int_modmap = compute_padding_average(modmap, mask) map_shape = [(emmap.get_xsize() + wn), (emmap.get_ysize() + wn), (emmap.get_zsize() + wn)] emmap = Util.pad(emmap, map_shape[0], map_shape[1], map_shape[2], 0, 0, 0, 'pad_int_emmap') modmap = Util.pad(modmap, map_shape[0], map_shape[1], map_shape[2], 0, 0, 0, 'pad_int_modmap') mask = Util.pad(mask, map_shape[0], map_shape[1], map_shape[2], 0, 0, 0, '0') return emmap, modmap, mask, wn, window_bleed_and_pad
def execute(self): ''' The main function - executes the job of performing all v all boot strapped probe generation ''' if self.logger: E2progress(self.logger, 0.0) # all_v_all_cmd = self.get_all_v_all_cmd() # all_v_all_output = self.get_all_v_all_output() # # # NOTE: calling the allvall program is probably not strictly necessary, seeing # # as there is a generic framework for generating and executing alignment jobs # # implemented below that would be easily adaptable to this - however I left it # # because doing it this way is absolutely equivalent and has the same cost. # all_v_all_cmd += " --output="+all_v_all_output # print "executing",all_v_all_cmd # if self.logger: E2progress(self.logger,0.01) # if ( launch_childprocess(all_v_all_cmd) != 0 ): # print "Failed to execute %s" %all_v_all_cmd # sys.exit(1) # if self.logger: E2progress(self.logger,0.02) # # images = [] # images.append(EMData(all_v_all_output,0)) # images.append(EMData(all_v_all_output,1)) # images.append(EMData(all_v_all_output,2)) # images.append(EMData(all_v_all_output,3)) # images.append(EMData(all_v_all_output,4)) # images.append(EMData(all_v_all_output,5)) # images.append(EMData(all_v_all_output,6)) # start_n = len(self.files) # the number of averages produced images = [] e = EMData(start_n, start_n) e.to_zero() images.append(e) for j in range(6): images.append(e.copy()) # keep tracks of the names of the new files big_n = images[0].get_xsize() * (images[0].get_xsize() - 1) / 2.0 iter = 1 current_files = self.files alignment_jobs = [] # a list of comparisons to be performed for i in range(len(current_files)): for j in range(i + 1, len(current_files)): alignment_jobs.append([i, j]) self.register_current_images(images) self.register_current_files(self.files) alignments_manager = EMTomoAlignments(self.options) alignments_manager.execute(alignment_jobs, self.files, self) self.write_current_images(self.files) # this loop while True: couples = self.get_couples(images[0]) taken = list(range(images[0].get_xsize())) done = False if len(couples) == 1 and len(taken) == 2: done = True #print len(couples),len(taken) new_files = [] # write the averages of the couples to disk, store the new names for i, j in couples: image_1 = EMData(current_files[j], 0) image_2 = EMData(current_files[i], 0) image_1_weight = 1 if image_1.has_attr("total_inc"): image_1_weight = image_1["total_inc"] image_2_weight = 1 if image_2.has_attr("total_inc"): image_2_weight = image_2["total_inc"] total_weight = image_1_weight + image_2_weight image_1.mult(old_div(float(image_1_weight), total_weight)) image_2.mult(old_div(float(image_2_weight), total_weight)) d = {} d["type"] = "eman" d["tx"] = images[1].get(i, j) d["ty"] = images[2].get(i, j) d["tz"] = images[3].get(i, j) d["az"] = images[4].get(i, j) d["alt"] = images[5].get(i, j) d["phi"] = images[6].get(i, j) t = Transform(d) image_1.process_inplace("xform", {"transform": t}) image_2 += image_1 image_2.set_attr( "src_image", current_files[j]) # so we can recollect how it was created image_2.set_attr( "added_src_image", current_files[i]) # so we can recollect how it was created image_2.set_attr("added_src_transform", t) # so we can recollect how it was created image_2.set_attr("added_src_cmp", images[0]( i, j)) # so we can recollect how it was created image_2.set_attr( "total_inc", total_weight) # so we can recollect how it was created output_name = numbered_bdb("bdb:" + self.options.path + "#tomo_ave_0" + str(iter - 1)) image_2.write_image(output_name, 0) if self.options.dbls: self.save_to_workflow_db(output_name) new_files.append(output_name) taken.remove(i) taken.remove(j) if done: break num_new = len(new_files) # the number of averages produced new_n = len(new_files) + len(taken) new_images = [] e = EMData(new_n, new_n) e.to_zero() new_images.append(e) for j in range(6): new_images.append(e.copy()) for i, idxi in enumerate(taken): new_files.append(current_files[idxi]) for j, idxj in enumerate(taken): if i == j: continue else: new_images[0].set(num_new + i, num_new + j, images[0].get(idxi, idxj)) new_images[1].set(num_new + i, num_new + j, images[1].get(idxi, idxj)) new_images[2].set(num_new + i, num_new + j, images[2].get(idxi, idxj)) new_images[3].set(num_new + i, num_new + j, images[3].get(idxi, idxj)) new_images[4].set(num_new + i, num_new + j, images[4].get(idxi, idxj)) new_images[5].set(num_new + i, num_new + j, images[5].get(idxi, idxj)) new_images[6].set(num_new + i, num_new + j, images[6].get(idxi, idxj)) alignment_jobs = [] # a list of comparisons to be performed for i in range(num_new): for j in range(i + 1, len(new_files)): alignment_jobs.append([i, j]) if self.logger: E2progress(self.logger, 1.0 - old_div(len(alignment_jobs), big_n)) self.register_current_images(new_images) self.register_current_files(new_files) alignments_manager = EMTomoAlignments(self.options) alignments_manager.execute(alignment_jobs, new_files, self) self.write_current_images(new_files) current_files = new_files images = new_images iter += 1 print(couples, taken) if self.logger: E2progress(self.logger, 1.0)
def execute(self): ''' The main function - executes the job of performing all v all boot strapped probe generation ''' if self.logger: E2progress(self.logger,0.0) # all_v_all_cmd = self.get_all_v_all_cmd() # all_v_all_output = self.get_all_v_all_output() # # # NOTE: calling the allvall program is probably not strictly necessary, seeing # # as there is a generic framework for generating and executing alignment jobs # # implemented below that would be easily adaptable to this - however I left it # # because doing it this way is absolutely equivalent and has the same cost. # all_v_all_cmd += " --output="+all_v_all_output # print "executing",all_v_all_cmd # if self.logger: E2progress(self.logger,0.01) # if ( launch_childprocess(all_v_all_cmd) != 0 ): # print "Failed to execute %s" %all_v_all_cmd # sys.exit(1) # if self.logger: E2progress(self.logger,0.02) # # images = [] # images.append(EMData(all_v_all_output,0)) # images.append(EMData(all_v_all_output,1)) # images.append(EMData(all_v_all_output,2)) # images.append(EMData(all_v_all_output,3)) # images.append(EMData(all_v_all_output,4)) # images.append(EMData(all_v_all_output,5)) # images.append(EMData(all_v_all_output,6)) # start_n = len(self.files) # the number of averages produced images = [] e = EMData(start_n,start_n) e.to_zero() images.append(e) for j in range(6): images.append(e.copy()) # keep tracks of the names of the new files big_n = images[0].get_xsize()*(images[0].get_xsize()-1)/2.0 iter = 1 current_files = self.files alignment_jobs = []# a list of comparisons to be performed for i in range(len(current_files)): for j in range(i+1,len(current_files)): alignment_jobs.append([i,j]) self.register_current_images(images) self.register_current_files(self.files) alignments_manager = EMTomoAlignments(self.options) alignments_manager.execute(alignment_jobs, self.files,self) self.write_current_images(self.files) # this loop while True: couples = self.get_couples(images[0]) taken = range(images[0].get_xsize()) done = False if len(couples) == 1 and len(taken) == 2: done = True #print len(couples),len(taken) new_files = [] # write the averages of the couples to disk, store the new names for i,j in couples: image_1 = EMData(current_files[j],0) image_2 = EMData(current_files[i],0) image_1_weight = 1 if image_1.has_attr("total_inc"): image_1_weight = image_1["total_inc"] image_2_weight = 1 if image_2.has_attr("total_inc"): image_2_weight = image_2["total_inc"] total_weight = image_1_weight+image_2_weight image_1.mult(float(image_1_weight)/total_weight) image_2.mult(float(image_2_weight)/total_weight) d = {} d["type"] = "eman" d["tx"] = images[1].get(i,j) d["ty"] = images[2].get(i,j) d["tz"] = images[3].get(i,j) d["az"] = images[4].get(i,j) d["alt"] = images[5].get(i,j) d["phi"] = images[6].get(i,j) t = Transform(d) image_1.process_inplace("xform",{"transform":t}) image_2 += image_1 image_2.set_attr("src_image",current_files[j]) # so we can recollect how it was created image_2.set_attr("added_src_image",current_files[i]) # so we can recollect how it was created image_2.set_attr("added_src_transform",t) # so we can recollect how it was created image_2.set_attr("added_src_cmp",images[0](i,j)) # so we can recollect how it was created image_2.set_attr("total_inc",total_weight) # so we can recollect how it was created output_name = numbered_bdb("bdb:"+self.options.path+"#tomo_ave_0"+str(iter-1)) image_2.write_image(output_name,0) if self.options.dbls: self.save_to_workflow_db(output_name) new_files.append(output_name) taken.remove(i) taken.remove(j) if done: break num_new = len(new_files) # the number of averages produced new_n = len(new_files) + len(taken) new_images = [] e = EMData(new_n,new_n) e.to_zero() new_images.append(e) for j in range(6): new_images.append(e.copy()) for i,idxi in enumerate(taken): new_files.append(current_files[idxi]) for j,idxj in enumerate(taken): if i == j: continue else: new_images[0].set(num_new+i,num_new+j,images[0].get(idxi,idxj)) new_images[1].set(num_new+i,num_new+j,images[1].get(idxi,idxj)) new_images[2].set(num_new+i,num_new+j,images[2].get(idxi,idxj)) new_images[3].set(num_new+i,num_new+j,images[3].get(idxi,idxj)) new_images[4].set(num_new+i,num_new+j,images[4].get(idxi,idxj)) new_images[5].set(num_new+i,num_new+j,images[5].get(idxi,idxj)) new_images[6].set(num_new+i,num_new+j,images[6].get(idxi,idxj)) alignment_jobs = []# a list of comparisons to be performed for i in range(num_new): for j in range(i+1,len(new_files)): alignment_jobs.append([i,j]) if self.logger: E2progress(self.logger,1.0-len(alignment_jobs)/big_n) self.register_current_images(new_images) self.register_current_files(new_files) alignments_manager = EMTomoAlignments(self.options) alignments_manager.execute(alignment_jobs, new_files,self) self.write_current_images(new_files) current_files = new_files images = new_images iter += 1 print couples,taken if self.logger: E2progress(self.logger,1.0)