def peaksearch_driver(options, args): """ To be called with options from command line """ ################## debugging still for a in args: print("arg: "+str(a)+","+str(type(a))) for o in list(options.__dict__.keys()): # FIXME if getattr(options,o) in [ "False", "FALSE", "false" ]: setattr(options,o,False) if getattr(options,o) in [ "True", "TRUE", "true" ]: setattr(options,o,True) print("option:",str(o),str(getattr(options,o)),",",\ str(type( getattr(options,o) ) )) ################### print("This peaksearcher is from",__file__) if options.killfile is not None and os.path.exists(options.killfile): print("The purpose of the killfile option is to create that file") print("only when you want peaksearcher to stop") print("If the file already exists when you run peaksearcher it is") print("never going to get started") raise ValueError("Your killfile "+options.killfile+" already exists") if options.thresholds is None: raise ValueError("No thresholds supplied [-t 1234]") if len(args) == 0 and options.stem is None: raise ValueError("No files to process") # What to do about spatial if options.perfect=="N" and os.path.exists(options.spline): print("Using spatial from",options.spline) corrfunc = blobcorrector.correctorclass(options.spline) else: print("Avoiding spatial correction") corrfunc = blobcorrector.perfect() # This is always the case now corrfunc.orientation = "edf" scan = None if options.format in ['bruker', 'BRUKER', 'Bruker']: extn = "" if options.perfect is not "N": print("WARNING: Your spline file is ImageD11 specific") print("... from a fabio converted to edf first") elif options.format == 'GE': extn = "" # KE: This seems to be a mistake and keeps PeakSearch from working in # some cases. Should be revisited if commenting it out causes problems. # options.ndigits = 0 elif options.format == 'py': import importlib sys.path.append( '.' ) scan = importlib.import_module( options.stem ) first_image = scan.first_image file_series_object = scan.file_series_object else: extn = options.format if scan is None: if options.interlaced: f0 = ["%s0_%04d%s"%(options.stem,i,options.format) for i in range( options.first, options.last+1)] f1 = ["%s1_%04d%s"%(options.stem,i,options.format) for i in range( options.first, options.last+1)] if options.iflip: f1 = [a for a in f1[::-1]] def fso(f0,f1): for a,b in zip(f0,f1): try: yield fabio.open(a) yield fabio.open(b) except: print(a,b) raise file_series_object = fso(f0,f1) first_image = openimage( f0[0] ) else: import fabio if options.ndigits > 0: file_name_object = fabio.filename_object( options.stem, num = options.first, extension = extn, digits = options.ndigits) else: file_name_object = options.stem first_image = openimage( file_name_object ) import fabio.file_series # Use traceback = True for debugging file_series_object = fabio.file_series.new_file_series( first_image, nimages = options.last - options.first + 1, traceback = True ) # Output files: if options.outfile[-4:] != ".spt": options.outfile = options.outfile + ".spt" print("Your output file must end with .spt, changing to ",options.outfile) # Omega overrides # global OMEGA, OMEGASTEP, OMEGAOVERRIDE OMEGA = options.OMEGA OMEGASTEP = options.OMEGASTEP OMEGAOVERRIDE = options.OMEGAOVERRIDE # Make a blobimage the same size as the first image to process # List comprehension - convert remaining args to floats # must be unique list so go via a set thresholds_list = list( set( [float(t) for t in options.thresholds] ) ) thresholds_list.sort() li_objs={} # label image objects, dict of s = first_image.data.shape # data array shape # Create label images for t in thresholds_list: # the last 4 chars are guaranteed to be .spt above mergefile="%s_t%d.flt"%(options.outfile[:-4], t) spotfile = "%s_t%d.spt"%(options.outfile[:-4], t) li_objs[t]=labelimage(shape = s, fileout = mergefile, spatial = corrfunc, sptfile=spotfile) print("make labelimage",mergefile,spotfile) # Not sure why that was there (I think if glob was used) # files.sort() if options.dark is not None: print("Using dark (background)",options.dark) darkimage= openimage(options.dark).data.astype(numpy.float32) else: darkimage=None if options.darkoffset!=0: print("Adding darkoffset",options.darkoffset) if darkimage is None: darkimage = options.darkoffset else: darkimage += options.darkoffset if options.flood is not None: floodimage=openimage(options.flood).data cen0 = int(floodimage.shape[0]/6) cen1 = int(floodimage.shape[0]/6) middle = floodimage[cen0:-cen0, cen1:-cen1] nmid = middle.shape[0]*middle.shape[1] floodavg = numpy.mean(middle) print("Using flood",options.flood,"average value",floodavg) if floodavg < 0.7 or floodavg > 1.3: print("Your flood image does not seem to be normalised!!!") else: floodimage=None start = time.time() print("File being treated in -> out, elapsed time") # If we want to do read-ahead threading we fill up a Queue object with data # objects # THERE MUST BE ONLY ONE peaksearching thread for 3D merging to work # there could be several read_and_correct threads, but they'll have to get the order right, # for now only one if options.oneThread: # Wrap in a function to allow profiling (perhaps? what about globals??) def go_for_it(file_series_object, darkimage, floodimage, corrfunc , thresholds_list , li_objs, OMEGA, OMEGASTEP, OMEGAOVERRIDE ): for data_object in file_series_object: t = timer() if not hasattr( data_object, "data"): # Is usually an IOError if isinstance( data_object[1], IOError): sys.stdout.write(data_object[1].strerror + '\n') # data_object[1].filename else: import traceback traceback.print_exception(data_object[0],data_object[1],data_object[2]) sys.exit() continue filein = data_object.filename if OMEGAOVERRIDE or "Omega" not in data_object.header: data_object.header["Omega"] = OMEGA OMEGA += OMEGASTEP OMEGAOVERRIDE = True # once you do it once, continue if not OMEGAOVERRIDE and options.omegamotor != "Omega": data_object.header["Omega"] = float( data_object.header[options.omegamotor] ) data_object = correct( data_object, darkimage, floodimage, do_median = options.median, monitorval = options.monitorval, monitorcol = options.monitorcol, ) t.tick(filein+" io/cor") peaksearch( filein, data_object , corrfunc , thresholds_list , li_objs ) for t in thresholds_list: li_objs[t].finalise() go_for_it(file_series_object, darkimage, floodimage, corrfunc , thresholds_list, li_objs, OMEGA, OMEGASTEP, OMEGAOVERRIDE ) else: print("Going to use threaded version!") try: # TODO move this to a module ? class read_only(ImageD11_thread.ImageD11_thread): def __init__(self, queue, file_series_obj , myname="read_only", OMEGA=0, OMEGAOVERRIDE = False, OMEGASTEP = 1): """ Reads files in file_series_obj, writes to queue """ self.queue = queue self.file_series_obj = file_series_obj self.OMEGA = OMEGA self.OMEGAOVERRIDE = OMEGAOVERRIDE self.OMEGASTEP = OMEGASTEP ImageD11_thread.ImageD11_thread.__init__(self , myname=myname) print("Reading thread initialised", end=' ') def ImageD11_run(self): """ Read images and copy them to self.queue """ for data_object in self.file_series_obj: if self.ImageD11_stop_now(): print("Reader thread stopping") break if not hasattr( data_object, "data" ): import pdb; pdb.set_trace() # Is usually an IOError if isinstance( data_object[1], IOError): sys.stdout.write(str(data_object[1].strerror) + '\n') else: import traceback traceback.print_exception(data_object[0],data_object[1],data_object[2]) sys.exit() continue ti = timer() filein = data_object.filename + "[%d]"%( data_object.currentframe ) try: if self.OMEGAOVERRIDE: # print "Over ride due to option",self.OMEGAOVERRIDE data_object.header["Omega"] = self.OMEGA self.OMEGA += self.OMEGASTEP else: if options.omegamotor != 'Omega' and options.omegamotor in data_object.header: data_object.header["Omega"] = float(data_object.header[options.omegamotor]) if "Omega" not in data_object.header: # print "Computing omega as not in header" data_object.header["Omega"] = self.OMEGA self.OMEGA += self.OMEGASTEP self.OMEGAOVERRIDE = True # print "Omega = ", data_object.header["Omega"],data_object.filename except KeyboardInterrupt: raise except: continue ti.tick(filein) self.queue.put((filein, data_object) , block = True) ti.tock(" enqueue ") if self.ImageD11_stop_now(): print("Reader thread stopping") break # Flag the end of the series self.queue.put( (None, None) , block = True) class correct_one_to_many(ImageD11_thread.ImageD11_thread): def __init__(self, queue_read, queues_out, thresholds_list, dark = None , flood = None, myname="correct_one", monitorcol = None, monitorval = None, do_median = False): """ Using a single reading queue retains a global ordering corrects and copies images to output queues doing correction once """ self.queue_read = queue_read self.queues_out = queues_out self.dark = dark self.flood = flood self.do_median = do_median self.monitorcol = monitorcol self.monitorval = monitorval self.thresholds_list = thresholds_list ImageD11_thread.ImageD11_thread.__init__(self , myname=myname) def ImageD11_run(self): while not self.ImageD11_stop_now(): ti = timer() filein, data_object = self.queue_read.get(block = True) if filein is None: for t in self.thresholds_list: self.queues_out[t].put( (None, None) , block = True) # exit the while 1 break data_object = correct(data_object, self.dark, self.flood, do_median = self.do_median, monitorval = self.monitorval, monitorcol = self.monitorcol, ) ti.tick(filein+" correct ") for t in self.thresholds_list: # Hope that data object is read only self.queues_out[t].put((filein, data_object) , block = True) ti.tock(" enqueue ") print("Corrector thread stopping") class peaksearch_one(ImageD11_thread.ImageD11_thread): def __init__(self, q, corrfunc, threshold, li_obj, myname="peaksearch_one" ): """ This will handle a single threshold and labelimage object """ self.q = q self.corrfunc = corrfunc self.threshold = threshold self.li_obj = li_obj ImageD11_thread.ImageD11_thread.__init__( self, myname=myname+"_"+str(threshold)) def run(self): while not self.ImageD11_stop_now(): filein, data_object = self.q.get(block = True) if not hasattr( data_object, "data" ): break peaksearch( filein, data_object , self.corrfunc , [self.threshold] , { self.threshold : self.li_obj } ) self.li_obj.finalise() # 8 MB images - max 40 MB in this queue read_queue = queue.Queue(5) reader = read_only(read_queue, file_series_object, OMEGA = OMEGA, OMEGASTEP = OMEGASTEP, OMEGAOVERRIDE = OMEGAOVERRIDE ) reader.start() queues = {} searchers = {} for t in thresholds_list: print("make queue and peaksearch for threshold",t) queues[t] = queue.Queue(3) searchers[t] = peaksearch_one(queues[t], corrfunc, t, li_objs[t] ) corrector = correct_one_to_many( read_queue, queues, thresholds_list, dark=darkimage, flood=floodimage, do_median = options.median, monitorcol = options.monitorcol, monitorval = options.monitorval) corrector.start() my_threads = [reader, corrector] for t in thresholds_list[::-1]: searchers[t].start() my_threads.append(searchers[t]) nalive = len(my_threads) def empty_queue(q): while 1: try: q.get(block=False, timeout=1) except: break q.put((None, None), block=False) while nalive > 0: try: nalive = 0 for thr in my_threads: if thr.isAlive(): nalive += 1 if options.killfile is not None and \ os.path.exists(options.killfile): raise KeyboardInterrupt() time.sleep(1) except KeyboardInterrupt: print("Got keyboard interrupt in waiting loop") ImageD11_thread.stop_now = True try: time.sleep(1) except: pass empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) print("finishing from waiting loop") except: print("Caught exception in waiting loop") ImageD11_thread.stop_now = True time.sleep(1) empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) raise except ImportError: print("Probably no threading module present") raise
def bgmaker(options): """ execute the command line script """ # Generate list of files to proces if options.format in ['bruker', 'BRUKER', 'Bruker', 'GE']: extn = "" else: extn = options.format first_image_name = fabio.filename_object(options.stem, num=options.first, extension=extn, digits=options.ndigits) first_image = openimage(first_image_name) print(first_image.filename) allimagenumbers = list( range(options.first, options.last + 1 - options.step, options.step)) if options.kalman_error <= 0: print("Using minimum image algorithm") bko = minimum_image(image=first_image.data) else: print("Using Kalman algorithm with error =", options.kalman_error) bko = kbg(first_image.data, options.kalman_error * options.kalman_error) print("Taking images in random order") random.seed(42) # reproducible random.shuffle(allimagenumbers) for current_num in allimagenumbers: try: im = first_image.getframe(current_num) print(im.filename) bko.add_image(im.data) except KeyboardInterrupt: print("Got a keyboard interrupt") break except: import traceback traceback.print_exc() print("Failed for", current_num) # finally write out the answer # model header + data # write as edf - we should actually have a way to flag # which fabioimage formats know how to write themselves if options.outfile[-3:] == "edf": print("writing", options.outfile, "in edf format") im = fabio.edfimage.edfimage(data=bko.bkg) else: im = first_image im.data = bko.bkg try: im.write(options.outfile, force_type=im.data.dtype) except TypeError: # WTF? im.write(options.outfile) except: print("problem writing") print("trying to write", options.outfile, "in edf format") im = fabio.edfimage.edfimage(data=minim.minimum_image) try: im.write(options.outfile, force_type=im.data.dtype) except TypeError: im.write(options.outfile)