def __establish_tmp_file_name(self): ''' Called internally to establish temporary file name member variables ''' self.__gen_tmp_file_name() while (db_check_dict(self.__tmp_db_name)): self.__gen_tmp_file_name()
def check(options, verbose): error = False if (options.sep < 1): if verbose > 0: print( "Error: the --sep argument must be greater than zero, currently it is %d" % (options.sep)) error = True if (options.nofilecheck == False): if os.path.exists(options.outfile): if (not options.force): if verbose > 0: print("File %s exists, will not write over, exiting" % options.outfile) error = True if not os.path.exists(options.simmxfile) and not db_check_dict( options.simmxfile): if verbose > 0: print( "Error: the similarity matrix file (%s) was not found, cannot run e2classify.py" % (options.simmxfile)) error = True else: num_sim = EMUtil.get_image_count(options.simmxfile) if (num_sim < 5): if verbose > 0: print( "Error, the similarity matrix did not contain 5 images - be sure to use the --saveali argument when running e2simmx.py" ) error = True return error
def get_gui_arg_img_sets(filenames): ''' returns the img_sets list required to intialized the GUI correctly ''' img_sets = [] if db_check_dict("bdb:e2ctf.parms"): db_parms = db_open_dict("bdb:e2ctf.parms", ro=True) else: return img_sets for file in filenames: name = get_file_tag(file) if not db_parms.has_key(name): print "error, you must first run auto fit before running the gui - there are no parameters for", name return [] img_set = db_parms[name] ctf = EMAN2Ctf() ctf.from_string( img_set[0]) # convert to ctf object seeing as it's a string img_set[0] = ctf actual = [file] actual.extend(img_set) img_sets.append(actual) return img_sets
def check(options,verbose): error = False if (options.sep < 1): if verbose>0: print "Error: the --sep argument must be greater than zero, currently it is %d" %(options.sep) error = True if ( options.nofilecheck == False ): if os.path.exists(options.outfile): if (not options.force): if verbose>0: print "File %s exists, will not write over, exiting" %options.outfile error = True if not os.path.exists(options.simmxfile) and not db_check_dict(options.simmxfile): if verbose>0: print "Error: the similarity matrix file (%s) was not found, cannot run e2classify.py" %(options.simmxfile) error = True else: num_sim = EMUtil.get_image_count(options.simmxfile) if (num_sim<5): if verbose>0: print "Error, the similarity matrix did not contain 5 images - be sure to use the --saveali argument when running e2simmx.py" error = True return error
def remove_tmp_file(self): ''' removes the temporary file ''' if (db_check_dict(self.__tmp_db_name)): db_remove_dict(self.__tmp_db_name) else: pass
def _checkfiles(self, filename): files = filename.split() for f in files: if not os.access(f, os.F_OK) and not db_check_dict(f): self._onBadFile(f) # Display the rubbish file to the user self.filenamebox.setText(filename) return False return True
def __init__(self,file_name): ''' @param file_name the name of the file that which is being overwritten @exception RuntimeError raised of the file_name is not a valid bdb style database handle ''' if not db_check_dict(file_name): raise RuntimeError("%s is not a valid database name" %file_name) self.__orig_db_name = file_name EMTmpFileHandleBase.__init__(self) self.__tmp_db_name = "" self.__establish_tmp_file_name()
def _checkfiles(self, filename): # Posional arguments must be space delimted for multiple files, whereas options must be comma delimted if self.getPositional(): files = filename.split() else: files = filename.split(",") # If we have too many files, the user will have to wait a LOOONG time for the image check, so we just skip it if len(files) > 20: try: tst = EMData(files[0], 0) nx, ny, nz = tst["nx"], tst["ny"], tst["nz"] except: nx, ny, nz = 0, 0, 0 if nx > 0: self.infolabel.setText("Files: %d %dx%dx%d" % (len(files), nx, ny, nz)) else: self.infolabel.setText("Files: %d" % (len(files))) return True # Check each file numimages = 0 nx, ny, nz = 0, 0, 0 for f in files: if not os.access(f, os.F_OK) and not db_check_dict(f): self._onBadFile(f) # Display the rubbish file to the user self.filenamebox.setText(filename) return False try: numimages += EMUtil.get_image_count(f) tst = EMData(files[0], 0) nx, ny, nz = tst["nx"], tst["ny"], tst["nz"] except: nx, ny, nz = 0, 0, 0 if nx > 0: self.infolabel.setText("Files: %d Images: %d %dx%dx%d" % (len(files), numimages, nx, ny, nz)) else: self.infolabel.setText("Files: %d Images: %d" % (len(files), numimages)) return True
def _checkfiles(self, filename): # Posional arguments must be space delimted for multiple files, whereas options must be comma delimted if self.getPositional(): files = filename.split() else: files = filename.split(",") # If we have too many files, the user will have to wait a LOOONG time for the image check, so we just skip it if len(files)>20 : try: tst=EMData(files[0],0) nx,ny,nz=tst["nx"],tst["ny"],tst["nz"] except: nx,ny,nz=0,0,0 if nx>0: self.infolabel.setText("Files: %d %dx%dx%d"%(len(files),nx,ny,nz)) else : self.infolabel.setText("Files: %d"%(len(files))) return True # Check each file numimages = 0 nx,ny,nz=0,0,0 for f in files: if not os.access(f, os.F_OK) and not db_check_dict(f): self._onBadFile(f) # Display the rubbish file to the user self.filenamebox.setText(filename) return False try: numimages += EMUtil.get_image_count(f) tst=EMData(files[0],0) nx,ny,nz=tst["nx"],tst["ny"],tst["nz"] except: nx,ny,nz=0,0,0 if nx>0: self.infolabel.setText("Files: %d Images: %d %dx%dx%d"%(len(files),numimages,nx,ny,nz)) else : self.infolabel.setText("Files: %d Images: %d"%(len(files),numimages)) return True
def get_gui_arg_img_sets(filenames): ''' returns the img_sets list required to intialized the GUI correctly ''' img_sets = [] if db_check_dict("bdb:e2ctf.parms"): db_parms=db_open_dict("bdb:e2ctf.parms",ro=True) else: return img_sets for file in filenames: name = get_file_tag(file) if not db_parms.has_key(name): print "error, you must first run auto fit before running the gui - there are no parameters for",name return [] img_set = db_parms[name] ctf=EMAN2Ctf() ctf.from_string(img_set[0]) # convert to ctf object seeing as it's a string img_set[0] = ctf actual = [file] actual.extend(img_set) img_sets.append(actual) return img_sets
def isac_substack(args): from utilities import get_im, write_text_file from EMAN2db import db_open_dict, db_check_dict from e2bdb import makerelpath # To make the execution exit upon fatal error by ERROR in global_def.py global_def.BATCH = True # Check error conditions subcommand_name = "isac_substack" if not db_check_dict(args.input_bdb_stack_path, readonly=True): ERROR( "Input BDB image stack file does not exist. Please check the file path and restart the program.", subcommand_name) # action=1 - fatal error, exit if not os.path.exists(args.input_isac_class_avgs_path): ERROR( "Input ISAC class average stack file does not exist. Please check the file path and restart the program.", subcommand_name) # action=1 - fatal error, exit if os.path.exists(args.output_directory): ERROR( "Output directory exists. Please change the name and restart the program.", subcommand_name) # action=1 - fatal error, exit if args.substack_basename.strip() == "": ERROR("Substack basename cannot be empty string or only white spaces.", subcommand_name) # action=1 - fatal error, exit assert (db_check_dict(args.input_bdb_stack_path, readonly=True)) assert (os.path.exists(args.input_isac_class_avgs_path)) assert (not os.path.exists(args.output_directory)) assert (args.substack_basename.strip() != "") # Create output directory os.mkdir(args.output_directory) # Retrieve original particle IDs of member particles listed in ISAC class average stack n_img_processed = EMUtil.get_image_count(args.input_isac_class_avgs_path) isac_substack_particle_id_list = [] for i_img in xrange(n_img_processed): isac_substack_particle_id_list += get_im( args.input_isac_class_avgs_path, i_img).get_attr("members") isac_substack_particle_id_list.sort() # Save the substack particle id list isac_substack_particle_id_list_file_path = os.path.join( args.output_directory, "{0}_particle_id_list.txt".format(args.substack_basename)) write_text_file(isac_substack_particle_id_list, isac_substack_particle_id_list_file_path) # Open the output BDB dictionary assert (args.output_directory != "") output_virtual_bdb_stack_real_path = "bdb:{0}#{1}".format( args.output_directory, args.substack_basename) output_virtual_bdb_stack = db_open_dict(output_virtual_bdb_stack_real_path) # Convert an absolute path to the actual output data to a relative path by eliminating any symbolic links output_virtual_bdb_stack_real_path = os.path.realpath( output_virtual_bdb_stack.path) + "/" # Open the input BDB dictionary input_bdb_stack = db_open_dict(args.input_bdb_stack_path, ro=True) # Read only # Copy the header from input to output BDB dictionary n_img_detected = len(isac_substack_particle_id_list) print(" ") print_progress("Detected %d ISAC validated particles in %s" % (n_img_detected, args.input_isac_class_avgs_path)) # Loop through all ISAC validated particles print(" ") n_img_processed = 0 n_img_of_10_percent = n_img_detected // 10 for i_img_detected, isac_substack_particle_id in enumerate( isac_substack_particle_id_list): # Print progress if i_img_detected % n_img_of_10_percent == 0: try: print_progress( "Progress %5.2f%%: Processing entry %6d (Particle ID %6d)." % (float(i_img_detected) / n_img_detected * 100.0, i_img_detected, isac_substack_particle_id)) sys.stdout.flush() except: pass # Read a particle image header from input bdb stack try: img_header = input_bdb_stack.get( isac_substack_particle_id, nodata=1).get_attr_dict() # Need only header information except: # ERROR("Failed to read image header of particle #%d from %s. Skipping this image..."%(isac_substack_particle_id, args.input_bdb_stack_path), subcommand_name, action = 0) # action = 0 - non-fatal, print a warning; # continue ERROR( "Failed to read image header of particle #%d from %s. Please make sure input_bdb_stack_path (%s) and input_isac_class_avgs_path (%s) are correct pair and run the command again..." % (isac_substack_particle_id, args.input_bdb_stack_path, args.input_bdb_stack_path, args.input_isac_class_avgs_path), subcommand_name) # action=1 - fatal error, exit # Convert an absolute path to the actual input data to a relative path by eliminating any symbolic links try: input_bdb_stack_real_path = os.path.realpath( input_bdb_stack.get_data_path(isac_substack_particle_id)) # Conver the path to OS specific format if os.name == "nt": output_virtual_bdb_stack_real_path = output_virtual_bdb_stack_real_path.replace( "\\", "/") input_bdb_stack_real_path = input_bdb_stack_real_path.replace( "\\", "/") # Takes a pair of paths /a/b/c/d and /a/b/e/f/g and returns a relative path to b from a, ../../e/f/g common_relative_path = makerelpath( output_virtual_bdb_stack_real_path, input_bdb_stack_real_path) except: ERROR( "Failure to find common relative data path for particle image #%d. Skipping this image..." % (isac_substack_particle_id), subcommand_name, action=0) # action = 0 - non-fatal, print a warning; continue # Update the image header for output img_header["data_path"] = common_relative_path img_header["data_n"] = isac_substack_particle_id img_header["data_source"] = args.input_bdb_stack_path # Register the image header to output virtual bdb stack output_virtual_bdb_stack[n_img_processed] = img_header # Increment process image counts n_img_processed += 1 # Close input and output bdb stacks output_virtual_bdb_stack.close() input_bdb_stack.close() # Print summary of processing print(" ") print_progress("Summary of processing...") print_progress("Detected : %6d" % (n_img_detected)) print_progress("Processed : %6d" % (n_img_processed)) print(" ")
def updateTable(self): """ Update FSC table""" dirs = [] for pattern in self.patterns: dirs.extend(glob.glob("%s*"%pattern)) self.tablewidget.setRowCount(len(dirs)) for i, directory in enumerate(sorted(dirs)): # load each directory qwi_dirname = QtGui.QTableWidgetItem(str(directory)) self.tablewidget.setItem(i, 0, qwi_dirname) #load info from DB db_name = "bdb:"+str(directory)+"#convergence.results" if not db_check_dict(db_name): continue db = db_open_dict(db_name,ro=True) keys = db.keys() # count iterations, refinement rcount = 0 ccount = 0 for key in keys: if "init_00_fsc" == key: rcount+=1 #do I need to increment ccount too? continue if ("%02d_%02d_fsc"%(rcount,rcount+1)) == key: rcount+=1 continue if ("conv_even_odd_%02d"%(ccount+1)) == key : ccount+=1 # no need for further processing if rcount == 0 and ccount == 0: continue # load refinement results if rcount > 0: qwi_iterations = QtGui.QTableWidgetItem(str(rcount)) qwi_iterations.setTextAlignment(QtCore.Qt.AlignCenter) self.tablewidget.setItem(i, 1, qwi_iterations) # get res estimates, I jacked this from David reo = EMAN2fsc.get_e2refine_even_odd_results_list(keys) eo = EMAN2fsc.get_e2eotest_results_list(keys) if len(reo) > 0: # get the latest one, this will be the last as guaranteed by sorted results last_res = reo[-1] [xaxis,yaxis] = db[last_res] resolution = self.find_first_point_5_crossing(xaxis,yaxis,.143) # with this test, the .143 threshold is reasonable qwi_res = QtGui.QTableWidgetItem(str(resolution)) qwi_res.setTextAlignment(QtCore.Qt.AlignCenter) self.tablewidget.setItem(i, 2, qwi_res) if len(eo) > 0: last_res = eo[-1] [xaxis,yaxis] = db[last_res] resolution = self.find_first_point_5_crossing(xaxis,yaxis) qwi_eotest = QtGui.QTableWidgetItem(str(resolution)) qwi_eotest.setTextAlignment(QtCore.Qt.AlignCenter) self.tablewidget.setItem(i, 3, qwi_eotest)