def make_powder_mask( parfile, ndeg = 1, splinefile=None, dims=(2048, 2048) ): """ Compute a two theta and azimuth image """ pars = parameters.parameters() pars.loadparameters( parfile ) if splinefile is None: spatial = blobcorrector.perfect() else: spatial = blobcorrector.correctorclass( splinefile ) xim, yim = spatial.make_pixel_lut ( dims ) peaks = [ np.ravel( xim ) , np.ravel( yim ) ] tth , eta = transform.compute_tth_eta( peaks , **pars.get_parameters() ) tth.shape = dims eta.shape = dims # Assume a circle geometry for now # tth * eta ~ length on detector # lim = tth * eta # need some idea how to cut it up... # degree bins m = (eta.astype(np.int) % 2)==0 return m
def __init__(self, dims, pars, ubi, splinefile=None, np=16, border=10, omegarange=list(range(360)), maxpix=None, mask=None): """ Create a new mapper intance. It will transform images into reciprocal space (has its own rsv object holding the space) dims - image dimensions par - ImageD11 parameter filename for experiment ubi - Orientation matrix (ImageD11 style) np - Number of pixels per hkl index [16] border - amount to add around edge of images [10] omegarange - omega values to be mapped (0->360) maxpix - value for saturated pixels to be ignored mask - fit2d style mask for removing bad pixels / border """ if len(dims) != 2: raise Exception("For 2D dims!") self.dims = dims print(dims) # Experiment parameters if not isinstance(pars, parameters.parameters): raise Exception("Pars should be an ImageD11 parameters object") for key in ["distance", "wavelength"]: #etc assert key in pars.parameters self.pars = pars # Orientation matrix self.ubi = ubi # Saturation self.maxpix = maxpix # Mask self.mask = mask if self.mask is not None: assert self.mask.shape == self.dims, "Mask dimensions mush match image" # spatial if splinefile is None: self.spatial = blobcorrector.perfect() else: self.spatial = blobcorrector.correctorclass(splinefile) # npixels self.np = np self.uspace = np * ubi self.find_vol(border=border, omegarange=omegarange) self.rsv.metadata['ubi'] = ubi self.rsv.metadata['uspace'] = self.uspace # Make and cache the k vectors self.make_k_vecs()
def compute_rad_arc(self): """ This part needs more work - how to properly define the output after spd is patched For now we aim to make a triangle filling the same array size # FIXME - this remains unclear to me, what are the output units supposed to be?? """ tth_rad = self.tth * math.pi / 180.0 eta_rad = self.eta * math.pi / 180.0 arclength = tth_rad * eta_rad # x-axis, eg [0], is tth tthmax = numpy.max(self.tth) tthmin = numpy.min(self.tth) tthstep = (tthmax - tthmin) / (self.dims[0] - 1) self.tthbin = numpy.floor((self.tth - tthmin) / tthstep) self.tthvals = numpy.arange(tthmin, tthmax + tthstep * 0.5, tthstep) # Ideally we want the arc bins to vary with tth? #arcmax = numpy.max( arclength ) #arcmin = numpy.min( arclength ) # 4 corners of image arcmin = arclength.min() arcmax = arclength.max() #from matplotlib.pylab import imshow,show, colorbar #imshow(arclength) #colorbar() #show() arcstep = (arcmax - arcmin) / (self.dims[1] - 1) arcmid = 0.5 * (arcmax + arcmin) # Make integer pixel id images self.arcbin = numpy.floor( (arclength - arcmid) / arcstep) + self.dims[1] / 2 assert self.tthbin.min() >= 0 assert self.tthbin.max() < self.dims[0], self.tthbin.max() assert self.arcbin.min() >= 0, self.arcbin.min() assert self.arcbin.max() < self.dims[1] # Now convert these to displacements compared to input image # Use the same code as for the spline case to get the "x/y" images ideal = blobcorrector.perfect() idealx, idealy = ideal.make_pixel_lut(self.dims) self.dx = self.tthbin - idealx self.dy = self.arcbin - idealy
def __init__(self, shape, fileout = sys.stdout, spatial = blobcorrector.perfect(), flipper = flip2, sptfile = sys.stdout ): """ Shape - image dimensions fileout - writeable stream for merged peaks spatial - correction of of peak positions """ self.shape = shape # Array shape if not hasattr(sptfile,"write"): self.sptfile = open(sptfile, "w") else: self.sptfile = sptfile # place for peaksearch to print - file object self.corrector = spatial # applies spatial distortion self.fs2yz = flipper # generates y/z self.onfirst = 1 # Flag for first image in series self.onlast = 0 # Flag for last image in series self.blim = np.zeros(shape, np.int) # 'current' blob image self.npk = 0 # Number of peaks on current self.res = None # properties of current self.threshold = None # cache for writing files self.lastbl = np.zeros(shape, np.int)# 'previous' blob image self.lastres = None self.lastnp = "FIRST" # Flags initial state self.verbose = 0 # For debugging if hasattr(fileout,"write"): self.outfile = fileout else: self.outfile = open(fileout,"w") self.spot3d_id = 0 # counter for printing try: self.outfile.write(self.titles) except: print type(self.outfile),self.outfile raise
def mymain(): # If we are running from a command line: inname = sys.argv[1] if not os.path.exists(inname) or len(sys.argv) < 4: help() sys.exit() outname = sys.argv[2] if os.path.exists(outname): if not input("Sure you want to overwrite %s ?" % (outname))[0] in ['y', 'Y']: sys.exit() splinename = sys.argv[3] if splinename == 'perfect': cor = blobcorrector.perfect() else: cor = blobcorrector.correctorclass(splinename) fix_flt(inname, outname, cor)
def __init__(self, splinefile=None, parfile=None): """ splinefile = fit2d spline file, or None for images that are already corrected parfile = ImageD11 parameter file. Can be fitted using old ImageD11_gui.py or newer fable.transform plugin. """ self.splinefile = splinefile if self.splinefile is None: self.spatial = blobcorrector.perfect() else: self.spatial = blobcorrector.correctorclass(splinefile) self.parfile = parfile self.pars = parameters.parameters() self.pars.loadparameters(parfile) for key in self.required_pars: if key not in self.pars.parameters: raise Exception("Missing parameter " + str(par))
def peaksearch_driver(options, args): """ To be called with options from command line """ ################## debugging still for a in args: print("arg: "+str(a)+","+str(type(a))) for o in list(options.__dict__.keys()): # FIXME if getattr(options,o) in [ "False", "FALSE", "false" ]: setattr(options,o,False) if getattr(options,o) in [ "True", "TRUE", "true" ]: setattr(options,o,True) print("option:",str(o),str(getattr(options,o)),",",\ str(type( getattr(options,o) ) )) ################### print("This peaksearcher is from",__file__) if options.killfile is not None and os.path.exists(options.killfile): print("The purpose of the killfile option is to create that file") print("only when you want peaksearcher to stop") print("If the file already exists when you run peaksearcher it is") print("never going to get started") raise ValueError("Your killfile "+options.killfile+" already exists") if options.thresholds is None: raise ValueError("No thresholds supplied [-t 1234]") if len(args) == 0 and options.stem is None: raise ValueError("No files to process") # What to do about spatial if options.perfect=="N" and os.path.exists(options.spline): print("Using spatial from",options.spline) corrfunc = blobcorrector.correctorclass(options.spline) else: print("Avoiding spatial correction") corrfunc = blobcorrector.perfect() # This is always the case now corrfunc.orientation = "edf" scan = None if options.format in ['bruker', 'BRUKER', 'Bruker']: extn = "" if options.perfect is not "N": print("WARNING: Your spline file is ImageD11 specific") print("... from a fabio converted to edf first") elif options.format == 'GE': extn = "" # KE: This seems to be a mistake and keeps PeakSearch from working in # some cases. Should be revisited if commenting it out causes problems. # options.ndigits = 0 elif options.format == 'py': import importlib sys.path.append( '.' ) scan = importlib.import_module( options.stem ) first_image = scan.first_image file_series_object = scan.file_series_object else: extn = options.format if scan is None: if options.interlaced: f0 = ["%s0_%04d%s"%(options.stem,i,options.format) for i in range( options.first, options.last+1)] f1 = ["%s1_%04d%s"%(options.stem,i,options.format) for i in range( options.first, options.last+1)] if options.iflip: f1 = [a for a in f1[::-1]] def fso(f0,f1): for a,b in zip(f0,f1): try: yield fabio.open(a) yield fabio.open(b) except: print(a,b) raise file_series_object = fso(f0,f1) first_image = openimage( f0[0] ) else: import fabio if options.ndigits > 0: file_name_object = fabio.filename_object( options.stem, num = options.first, extension = extn, digits = options.ndigits) else: file_name_object = options.stem first_image = openimage( file_name_object ) import fabio.file_series # Use traceback = True for debugging file_series_object = fabio.file_series.new_file_series( first_image, nimages = options.last - options.first + 1, traceback = True ) # Output files: if options.outfile[-4:] != ".spt": options.outfile = options.outfile + ".spt" print("Your output file must end with .spt, changing to ",options.outfile) # Omega overrides # global OMEGA, OMEGASTEP, OMEGAOVERRIDE OMEGA = options.OMEGA OMEGASTEP = options.OMEGASTEP OMEGAOVERRIDE = options.OMEGAOVERRIDE # Make a blobimage the same size as the first image to process # List comprehension - convert remaining args to floats # must be unique list so go via a set thresholds_list = list( set( [float(t) for t in options.thresholds] ) ) thresholds_list.sort() li_objs={} # label image objects, dict of s = first_image.data.shape # data array shape # Create label images for t in thresholds_list: # the last 4 chars are guaranteed to be .spt above mergefile="%s_t%d.flt"%(options.outfile[:-4], t) spotfile = "%s_t%d.spt"%(options.outfile[:-4], t) li_objs[t]=labelimage(shape = s, fileout = mergefile, spatial = corrfunc, sptfile=spotfile) print("make labelimage",mergefile,spotfile) # Not sure why that was there (I think if glob was used) # files.sort() if options.dark is not None: print("Using dark (background)",options.dark) darkimage= openimage(options.dark).data.astype(numpy.float32) else: darkimage=None if options.darkoffset!=0: print("Adding darkoffset",options.darkoffset) if darkimage is None: darkimage = options.darkoffset else: darkimage += options.darkoffset if options.flood is not None: floodimage=openimage(options.flood).data cen0 = int(floodimage.shape[0]/6) cen1 = int(floodimage.shape[0]/6) middle = floodimage[cen0:-cen0, cen1:-cen1] nmid = middle.shape[0]*middle.shape[1] floodavg = numpy.mean(middle) print("Using flood",options.flood,"average value",floodavg) if floodavg < 0.7 or floodavg > 1.3: print("Your flood image does not seem to be normalised!!!") else: floodimage=None start = time.time() print("File being treated in -> out, elapsed time") # If we want to do read-ahead threading we fill up a Queue object with data # objects # THERE MUST BE ONLY ONE peaksearching thread for 3D merging to work # there could be several read_and_correct threads, but they'll have to get the order right, # for now only one if options.oneThread: # Wrap in a function to allow profiling (perhaps? what about globals??) def go_for_it(file_series_object, darkimage, floodimage, corrfunc , thresholds_list , li_objs, OMEGA, OMEGASTEP, OMEGAOVERRIDE ): for data_object in file_series_object: t = timer() if not hasattr( data_object, "data"): # Is usually an IOError if isinstance( data_object[1], IOError): sys.stdout.write(data_object[1].strerror + '\n') # data_object[1].filename else: import traceback traceback.print_exception(data_object[0],data_object[1],data_object[2]) sys.exit() continue filein = data_object.filename if OMEGAOVERRIDE or "Omega" not in data_object.header: data_object.header["Omega"] = OMEGA OMEGA += OMEGASTEP OMEGAOVERRIDE = True # once you do it once, continue if not OMEGAOVERRIDE and options.omegamotor != "Omega": data_object.header["Omega"] = float( data_object.header[options.omegamotor] ) data_object = correct( data_object, darkimage, floodimage, do_median = options.median, monitorval = options.monitorval, monitorcol = options.monitorcol, ) t.tick(filein+" io/cor") peaksearch( filein, data_object , corrfunc , thresholds_list , li_objs ) for t in thresholds_list: li_objs[t].finalise() go_for_it(file_series_object, darkimage, floodimage, corrfunc , thresholds_list, li_objs, OMEGA, OMEGASTEP, OMEGAOVERRIDE ) else: print("Going to use threaded version!") try: # TODO move this to a module ? class read_only(ImageD11_thread.ImageD11_thread): def __init__(self, queue, file_series_obj , myname="read_only", OMEGA=0, OMEGAOVERRIDE = False, OMEGASTEP = 1): """ Reads files in file_series_obj, writes to queue """ self.queue = queue self.file_series_obj = file_series_obj self.OMEGA = OMEGA self.OMEGAOVERRIDE = OMEGAOVERRIDE self.OMEGASTEP = OMEGASTEP ImageD11_thread.ImageD11_thread.__init__(self , myname=myname) print("Reading thread initialised", end=' ') def ImageD11_run(self): """ Read images and copy them to self.queue """ for data_object in self.file_series_obj: if self.ImageD11_stop_now(): print("Reader thread stopping") break if not hasattr( data_object, "data" ): import pdb; pdb.set_trace() # Is usually an IOError if isinstance( data_object[1], IOError): sys.stdout.write(str(data_object[1].strerror) + '\n') else: import traceback traceback.print_exception(data_object[0],data_object[1],data_object[2]) sys.exit() continue ti = timer() filein = data_object.filename + "[%d]"%( data_object.currentframe ) try: if self.OMEGAOVERRIDE: # print "Over ride due to option",self.OMEGAOVERRIDE data_object.header["Omega"] = self.OMEGA self.OMEGA += self.OMEGASTEP else: if options.omegamotor != 'Omega' and options.omegamotor in data_object.header: data_object.header["Omega"] = float(data_object.header[options.omegamotor]) if "Omega" not in data_object.header: # print "Computing omega as not in header" data_object.header["Omega"] = self.OMEGA self.OMEGA += self.OMEGASTEP self.OMEGAOVERRIDE = True # print "Omega = ", data_object.header["Omega"],data_object.filename except KeyboardInterrupt: raise except: continue ti.tick(filein) self.queue.put((filein, data_object) , block = True) ti.tock(" enqueue ") if self.ImageD11_stop_now(): print("Reader thread stopping") break # Flag the end of the series self.queue.put( (None, None) , block = True) class correct_one_to_many(ImageD11_thread.ImageD11_thread): def __init__(self, queue_read, queues_out, thresholds_list, dark = None , flood = None, myname="correct_one", monitorcol = None, monitorval = None, do_median = False): """ Using a single reading queue retains a global ordering corrects and copies images to output queues doing correction once """ self.queue_read = queue_read self.queues_out = queues_out self.dark = dark self.flood = flood self.do_median = do_median self.monitorcol = monitorcol self.monitorval = monitorval self.thresholds_list = thresholds_list ImageD11_thread.ImageD11_thread.__init__(self , myname=myname) def ImageD11_run(self): while not self.ImageD11_stop_now(): ti = timer() filein, data_object = self.queue_read.get(block = True) if filein is None: for t in self.thresholds_list: self.queues_out[t].put( (None, None) , block = True) # exit the while 1 break data_object = correct(data_object, self.dark, self.flood, do_median = self.do_median, monitorval = self.monitorval, monitorcol = self.monitorcol, ) ti.tick(filein+" correct ") for t in self.thresholds_list: # Hope that data object is read only self.queues_out[t].put((filein, data_object) , block = True) ti.tock(" enqueue ") print("Corrector thread stopping") class peaksearch_one(ImageD11_thread.ImageD11_thread): def __init__(self, q, corrfunc, threshold, li_obj, myname="peaksearch_one" ): """ This will handle a single threshold and labelimage object """ self.q = q self.corrfunc = corrfunc self.threshold = threshold self.li_obj = li_obj ImageD11_thread.ImageD11_thread.__init__( self, myname=myname+"_"+str(threshold)) def run(self): while not self.ImageD11_stop_now(): filein, data_object = self.q.get(block = True) if not hasattr( data_object, "data" ): break peaksearch( filein, data_object , self.corrfunc , [self.threshold] , { self.threshold : self.li_obj } ) self.li_obj.finalise() # 8 MB images - max 40 MB in this queue read_queue = queue.Queue(5) reader = read_only(read_queue, file_series_object, OMEGA = OMEGA, OMEGASTEP = OMEGASTEP, OMEGAOVERRIDE = OMEGAOVERRIDE ) reader.start() queues = {} searchers = {} for t in thresholds_list: print("make queue and peaksearch for threshold",t) queues[t] = queue.Queue(3) searchers[t] = peaksearch_one(queues[t], corrfunc, t, li_objs[t] ) corrector = correct_one_to_many( read_queue, queues, thresholds_list, dark=darkimage, flood=floodimage, do_median = options.median, monitorcol = options.monitorcol, monitorval = options.monitorval) corrector.start() my_threads = [reader, corrector] for t in thresholds_list[::-1]: searchers[t].start() my_threads.append(searchers[t]) nalive = len(my_threads) def empty_queue(q): while 1: try: q.get(block=False, timeout=1) except: break q.put((None, None), block=False) while nalive > 0: try: nalive = 0 for thr in my_threads: if thr.isAlive(): nalive += 1 if options.killfile is not None and \ os.path.exists(options.killfile): raise KeyboardInterrupt() time.sleep(1) except KeyboardInterrupt: print("Got keyboard interrupt in waiting loop") ImageD11_thread.stop_now = True try: time.sleep(1) except: pass empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) print("finishing from waiting loop") except: print("Caught exception in waiting loop") ImageD11_thread.stop_now = True time.sleep(1) empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) raise except ImportError: print("Probably no threading module present") raise
def main(): """ A CLI user interface """ import sys, time, os, logging start = time.time() root = logging.getLogger('') root.setLevel(logging.WARNING) try: from optparse import OptionParser parser = OptionParser() parser = get_options(parser) options, args = parser.parse_args() except SystemExit: raise except: parser.print_help() print("\nProblem with your options:") raise if options.mask is not None: fit2dmask = (1 - openimage(options.mask).data).ravel() else: fit2dmask = 1.0 first_image = True imagefiles = ImageD11_file_series.get_series_from_options(options, args) tthvals = numpy.load(options.lookup + "_tth.npy") try: for fim in imagefiles: dataim = fim.data print(fim.filename) if first_image: # allocate volume, compute k etc first_image = False dxim = openimage(options.lookup + "_dx.edf").data dyim = openimage(options.lookup + "_dy.edf").data outsum = numpy.ravel(numpy.zeros(dataim.shape, numpy.float32)) outnp = numpy.ravel(numpy.zeros(dataim.shape, numpy.float32)) e = edfimage() # C code from rsv_mapper (not intended to be obfuscated) o = blobcorrector.perfect() idealx, idealy = o.make_pixel_lut(dataim.shape) destx, desty = idealx + dxim, idealy + dyim assert destx.min() >= 0 assert destx.max() < dataim.shape[1] assert desty.min() >= 0 assert desty.max() < dataim.shape[1] imageshape = dataim.shape indices = numpy.ravel(destx).astype(numpy.intp) numpy.multiply(indices, dataim.shape[1], indices) numpy.add(indices, numpy.ravel(desty).astype(numpy.intp), indices) assert indices.min() >= 0 assert indices.max() < dataim.shape[0] * dataim.shape[1] on = numpy.ones(len(outnp), numpy.float32) if fit2dmask is not None: on = on * fit2dmask # Number of pixels and mask are constant cImageD11.put_incr(outnp, indices, on) mask = outnp < 0.1 scalar = (1.0 - mask) / (outnp + mask) flatshape = outsum.shape # arsorted = mask.copy() outmask = mask.copy() outmask = outmask * 1e6 outmask.shape = imageshape arsorted.shape = imageshape arsorted.sort(axis=1) minds = numpy.array([l.searchsorted(0.5) for l in arsorted]) # ENDIF firstimage start = time.time() numpy.multiply(outsum, 0, outsum) outsum.shape = flatshape dm = (dataim.ravel() * fit2dmask).astype(numpy.float32) cImageD11.put_incr(outsum, indices, dm) # outsum = outsum.reshape( dataim.shape ) # outnp = outnp.reshape( dataim.shape ).astype(numpy.int32) # Normalise numpy.multiply(outsum, scalar, outsum) print(dataim.max(), dataim.min(), end=' ') print(scalar.max(), scalar.min(), outsum.min(), outsum.max()) outsum.shape = imageshape # saving edf e.data = outsum e.write("r_" + fim.filename, force_type=numpy.float32) print(time.time() - start) except: raise
def peaksearch_driver(options, args): """ To be called with options from command line """ ################## debugging still for a in args: print("arg: " + str(a) + "," + str(type(a))) for o in list(options.__dict__.keys()): # FIXME if getattr(options, o) in ["False", "FALSE", "false"]: setattr(options, o, False) if getattr(options, o) in ["True", "TRUE", "true"]: setattr(options, o, True) print("option:", str(o), str(getattr(options, o)), ",", \ str(type(getattr(options, o)))) ################### print("This peaksearcher is from", __file__) if options.killfile is not None and os.path.exists(options.killfile): print("The purpose of the killfile option is to create that file") print("only when you want peaksearcher to stop") print("If the file already exists when you run peaksearcher it is") print("never going to get started") raise ValueError("Your killfile " + options.killfile + " already exists") if options.thresholds is None: raise ValueError("No thresholds supplied [-t 1234]") if len(args) == 0 and options.nexusfile is None: raise ValueError("No files to process") # What to do about spatial if options.perfect == "N" and os.path.exists(options.spline): print("Using spatial from", options.spline) corrfunc = blobcorrector.correctorclass(options.spline) else: print("Avoiding spatial correction") corrfunc = blobcorrector.perfect() # Get list of filenames to process # if len(args) > 0 : # # We no longer assume unlabelled arguments are filenames # file_series_object = file_series.file_series(args) # This is always the case now corrfunc.orientation = "edf" import h5py # Read list of files and list of motor positions from Nexus file: nexus_path = options.nexusfile nexus_file = h5py.File(nexus_path, "r") group = nexus_file[options.group_path] omega_dset = group.get(options.omega_dset) image_dset = group.get(options.image_dset) omega_list = [x for x in omega_dset[..., :]] image_list = [x.decode("utf-8") for x in image_dset[..., :]] import fabio # Output files: import fabio.file_series # Use traceback = True for debugging first_image = openimage(image_list[0]) file_series_object = fabio.file_series.new_file_series( first_image, nimages=len(image_list), traceback=True) if options.outfile[-4:] != ".spt": options.outfile = options.outfile + ".spt" print("Your output file must end with .spt, changing to ", options.outfile) # Make a blobimage the same size as the first image to process # List comprehension - convert remaining args to floats # must be unique list so go via a set thresholds_list = list(set([float(t) for t in options.thresholds])) thresholds_list.sort() li_objs = {} # label image objects, dict of s = first_image.data.shape # data array shape # Create label images for t in thresholds_list: # the last 4 chars are guaranteed to be .spt above mergefile = "%s_t%d.flt" % (options.outfile[:-4], t) spotfile = "%s_t%d.spt" % (options.outfile[:-4], t) li_objs[t] = labelimage(shape=s, fileout=mergefile, spatial=corrfunc, sptfile=spotfile) print("make labelimage", mergefile, spotfile) if options.dark is not None: print("Using dark (background)", options.dark) darkimage = openimage(options.dark).data.astype(numpy.float32) else: darkimage = None if options.darkoffset != 0: print("Adding darkoffset", options.darkoffset) if darkimage is None: darkimage = options.darkoffset else: darkimage += options.darkoffset if options.flood is not None: floodimage = openimage(options.flood).data cen0 = int(floodimage.shape[0] / 6) cen1 = int(floodimage.shape[0] / 6) middle = floodimage[cen0:-cen0, cen1:-cen1] nmid = middle.shape[0] * middle.shape[1] floodavg = numpy.mean(middle) print("Using flood", options.flood, "average value", floodavg) if floodavg < 0.7 or floodavg > 1.3: print("Your flood image does not seem to be normalised!!!") else: floodimage = None start = time.time() print("File being treated in -> out, elapsed time") # If we want to do read-ahead threading we fill up a Queue object with data # objects # THERE MUST BE ONLY ONE peaksearching thread for 3D merging to work # there could be several read_and_correct threads, but they'll have to get the order right, # for now only one if options.oneThread: # Wrap in a function to allow profiling (perhaps? what about globals??) def go_for_it(file_series_object, darkimage, floodimage, corrfunc, thresholds_list, li_objs): for inc, data_object in enumerate(file_series_object): t = timer() if not isinstance(data_object, fabio.fabioimage.fabioimage): # Is usually an IOError if isinstance(data_object[1], IOError): sys.stdout.write(data_object[1].strerror + '\n') # data_object[1].filename else: import traceback traceback.print_exception(data_object[0], data_object[1], data_object[2]) sys.exit() continue filein = data_object.filename data_object.header["Omega"] = float(omega_list[inc]) data_object = correct( data_object, darkimage, floodimage, do_median=options.median, monitorval=options.monitorval, monitorcol=options.monitorcol, ) t.tick(filein + " io/cor") peaksearch(filein, data_object, corrfunc, thresholds_list, li_objs) for t in thresholds_list: li_objs[t].finalise() go_for_it(file_series_object, darkimage, floodimage, corrfunc, thresholds_list, li_objs) else: print("Going to use threaded version!") try: # TODO move this to a module ? class read_only(ImageD11_thread.ImageD11_thread): def __init__(self, queue, file_series_obj, myname="read_only"): """ Reads files in file_series_obj, writes to queue """ self.queue = queue self.file_series_obj = file_series_obj ImageD11_thread.ImageD11_thread.__init__(self, myname=myname) print("Reading thread initialised", end=' ') def ImageD11_run(self): """ Read images and copy them to self.queue """ for inc, data_object in enumerate(self.file_series_obj): if self.ImageD11_stop_now(): print("Reader thread stopping") break if not isinstance(data_object, fabio.fabioimage.fabioimage): # Is usually an IOError if isinstance(data_object[1], IOError): # print data_object # print data_object[1] sys.stdout.write( str(data_object[1].strerror) + '\n') # ': ' + data_object[1].filename + '\n') else: import traceback traceback.print_exception( data_object[0], data_object[1], data_object[2]) sys.exit() continue ti = timer() filein = data_object.filename + "[%d]" % ( data_object.currentframe) try: data_object.header["Omega"] = float( omega_list[inc]) except KeyboardInterrupt: raise except: continue ti.tick(filein) self.queue.put((filein, data_object), block=True) ti.tock(" enqueue ") if self.ImageD11_stop_now(): print("Reader thread stopping") break # Flag the end of the series self.queue.put((None, None), block=True) class correct_one_to_many(ImageD11_thread.ImageD11_thread): def __init__(self, queue_read, queues_out, thresholds_list, dark=None, flood=None, myname="correct_one", monitorcol=None, monitorval=None, do_median=False): """ Using a single reading queue retains a global ordering corrects and copies images to output queues doing correction once """ self.queue_read = queue_read self.queues_out = queues_out self.dark = dark self.flood = flood self.do_median = do_median self.monitorcol = monitorcol self.monitorval = monitorval self.thresholds_list = thresholds_list ImageD11_thread.ImageD11_thread.__init__(self, myname=myname) def ImageD11_run(self): while not self.ImageD11_stop_now(): ti = timer() filein, data_object = self.queue_read.get(block=True) if filein is None: for t in self.thresholds_list: self.queues_out[t].put((None, None), block=True) # exit the while 1 break data_object = correct( data_object, self.dark, self.flood, do_median=self.do_median, monitorval=self.monitorval, monitorcol=self.monitorcol, ) ti.tick(filein + " correct ") for t in self.thresholds_list: # Hope that data object is read only self.queues_out[t].put((filein, data_object), block=True) ti.tock(" enqueue ") print("Corrector thread stopping") class peaksearch_one(ImageD11_thread.ImageD11_thread): def __init__(self, q, corrfunc, threshold, li_obj, myname="peaksearch_one"): """ This will handle a single threshold and labelimage object """ self.q = q self.corrfunc = corrfunc self.threshold = threshold self.li_obj = li_obj ImageD11_thread.ImageD11_thread.__init__( self, myname=myname + "_" + str(threshold)) def run(self): while not self.ImageD11_stop_now(): filein, data_object = self.q.get(block=True) if not isinstance(data_object, fabio.fabioimage.fabioimage): break peaksearch(filein, data_object, self.corrfunc, [self.threshold], {self.threshold: self.li_obj}) self.li_obj.finalise() # 8 MB images - max 40 MB in this queue read_queue = queue.Queue(5) reader = read_only(read_queue, file_series_object) reader.start() queues = {} searchers = {} for t in thresholds_list: print("make queue and peaksearch for threshold", t) queues[t] = queue.Queue(3) searchers[t] = peaksearch_one(queues[t], corrfunc, t, li_objs[t]) corrector = correct_one_to_many(read_queue, queues, thresholds_list, dark=darkimage, flood=floodimage, do_median=options.median, monitorcol=options.monitorcol, monitorval=options.monitorval) corrector.start() my_threads = [reader, corrector] for t in thresholds_list[::-1]: searchers[t].start() my_threads.append(searchers[t]) nalive = len(my_threads) def empty_queue(q): while 1: try: q.get(block=False, timeout=1) except: break q.put((None, None), block=False) while nalive > 0: try: nalive = 0 for thr in my_threads: if thr.isAlive(): nalive += 1 if options.killfile is not None and \ os.path.exists(options.killfile): raise KeyboardInterrupt() time.sleep(1) except KeyboardInterrupt: print("Got keyboard interrupt in waiting loop") ImageD11_thread.stop_now = True try: time.sleep(1) except: pass empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) print("finishing from waiting loop") except: print("Caught exception in waiting loop") ImageD11_thread.stop_now = True time.sleep(1) empty_queue(read_queue) for t in thresholds_list: q = queues[t] empty_queue(q) for thr in my_threads: if thr.isAlive(): thr.join(timeout=1) raise except ImportError: print("Probably no threading module present") raise
import fabio, numpy as np from ImageD11.labelimage import labelimage from ImageD11.peaksearcher import peaksearch from ImageD11.blobcorrector import correctorclass, perfect lines = [("tiftest%04d.tif" % (i), i - 6) for i in range(6, 17)] # give somes minimal options thresholds = [1, 10, 100] corrector = perfect() # no spatial disortion dims = fabio.open(lines[0][0]).data.shape label_ims = { t: labelimage(shape=dims, fileout="peaks_t%d.flt" % (t), sptfile="peaks_t%d.spt" % (t), spatial=corrector) for t in thresholds } for filename, omega in lines: frame = fabio.open(filename) frame.header['Omega'] = omega frame.data = frame.data.astype(np.float32) # corrections like dark/flat/normalise would be added here peaksearch(filename, frame, corrector, thresholds, label_ims) for t in thresholds: label_ims[t].finalise()
continue if inscan(s, f, o, detector_size): line = fmt % (gid, allhkls[i][0], allhkls[i][1], allhkls[i][2], alltth[i], alleta[i], allomega[i], allsc[i], allfc[i], sraw[i], fraw[i]) strs.append((allomega[i], line)) # to be omega sortable return strs if __name__ == "__main__": grains = grain.read_grain_file(sys.argv[1]) pars = parameters.read_par_file(sys.argv[2]) detector_size = (2048, 2048) spline = sys.argv[3] if spline == "perfect": spatial = blobcorrector.perfect() else: spatial = blobcorrector.correctorclass(spline) outfile = sys.argv[4] tthmax, dsmax = tth_ds_max(pars, detector_size) print( "# id h k l tth eta omega sc fc s_raw f_raw" ) peaks = [] for gid, gr in enumerate(grains): newpeaks = forwards_project(gr, pars, detector_size, spatial, dsmax, gid) peaks += newpeaks print("# Grain", gid, "npks", len(newpeaks)) peaks.sort() out = open(outfile, "w")