def getFromWorker(self): """ This function is meant to receive results from worker threads. """ # TODO: Document # TODO: fix ugly (False, False) return values log.prNot(log.INFO, "getFromWorker(): Getting data from worker.") # Check if there are requests left on the buffer: if (len(self.rreqbuf) == 0): # No requests left, so we don't expect data to return return (False, False) # Wait for data rstat = MPI.Status() wreq = MPI.Request.Waitany(self.rreqbuf.values(), status=rstat) log.prNot(log.INFO, "getFromWorker(): worker %d done crunching" % \ (rstat.source)) # Construct return value retval = (self.idbuf[rstat.source], self.recvbuf[rstat.source]) # Remove entries from buffer lists del self.recvbuf[rstat.source] del self.idbuf[rstat.source] del self.rreqbuf[rstat.source] # Add idle worker back to list self.iworkers.append(rstat.source) # Return results return retval
def getFromMaster(self): """ Receive data from the master to the worker MPI thread. """ log.prNot(log.INFO, "getFromMaster(): waiting for metadata.") # First we receive some metadata on the data we're getting after this. meta = N.empty((32), dtype=N.int32) recv = self.comm.Recv([meta, MPI.INT], source=0, tag=self.COMM_WD_W) # Buf now has the following structure: # meta[0]: number of dimensions of the data coming # meta[1:9]: size of each dimension. Only the first meta[0] elements # of this slice are valid and should be used # meta[10]: datatype. # Now we set up the buffer to receive the actual data databuf = N.empty(tuple(meta[1:1+meta[0]]), \ dtype=self.tmetatonp(meta[10])) log.prNot(log.INFO, "getFromMaster(): got metadata.") # And block to receive the data self.comm.Recv([databuf, self.tnptompi(databuf.dtype.type)], \ source=0, tag=self.COMM_WD_W) return (databuf, meta[11])
def gnuplotInit(gp, hardcopy=False, verb=False, rmfile=False): """ Set some default gnuplot options for Gnuplot instance 'gp'. If 'hardcopy' is set, an EPS file will be written to that location instead of display the plot to screen. If 'verb' is True, display some debug information. If 'rmfile' is True, delete the hardcopy file. """ # First reset gnuplot completely gp.reset() # If we want a hardcopy, do so if (hardcopy is not False): hc = os.path.realpath(hardcopy) log.prNot(log.INFO, "Saving hardcopy to '%s'" % (hc)) # Make sure the directory exists if (not os.path.exists(os.path.dirname(hc))): os.makedirs(os.path.dirname(hc)) if (rmfile == True and os.path.isfile(hc)): os.remove(hc) gp('set terminal postscript eps enhanced color size 8.8cm,5.44cm "Palatino-Roman" 10') gp('set output "%s"' % (hardcopy)) gp('set key on top left box spacing 2 samplen 6') gp('set bmargin 3.5') gp('set rmargin 2') gp('set style line 1 lt 1 lw 2.2 lc rgb "red"') gp('set style line 2 lt 2 lw 2.2 lc rgb "blue"') gp('set style line 3 lt 3 lw 2.2 lc rgb "purple"') gp('set style line 4 lt 4 lw 2.2 lc rgb "cyan"') gp('set style line 5 lt 5 lw 2.2 lc rgb "orange"') gp('set style line 6 lt 6 lw 2.2 lc rgb "black"') gp('set style line 7 lt 7 lw 2.2 lc rgb "green"') gp('set style line 8 lt 8 lw 2.2 lc rgb "brown"')
def rmFiles(filelist): """ Remove files in 'filelist', if they exist. """ for _f in filelist: if os.path.exists(_f): log.prNot(log.INFO, "rmFiles(): Removing %s" % (_f)) os.remove(_f)
def _waitForFile(fname, delay=0.3, maxw=5): """ Wait until 'fname' exists. Sort of a solution for the asynchronous Gnuplot calls, we never know when it's finished so we can't convert it to PDF and all. 'delay' is the check-interval in seconds 'maxw' is the maximum time this function will wait """ while (not os.path.exists(fname) and maxw >= 0 ): log.prNot(log.INFO, "waitForFile(): Waiting for '%s'" % (fname)) maxw -= delay time.sleep(delay) time.sleep(delay)
def saveOldFile(uri, postfix='.old', maxold=5): if (maxold == 0): return if (os.path.exists(uri)): app = 0 while (os.path.exists(uri + postfix + str(app))): app += 1 if (app >= maxold-1): break # Now rename uri+postfix+str(app-1) -> uri+postfix+str(app) # NB: range(app-1, -1, -1) for app = 5 gives [4, 3, 2, 1, 0] for i in range(app-1, -1, -1): os.rename(uri+postfix+str(i), uri+postfix+str(i+1)) # Now rename the original file to originalfile + '.old0': os.rename(uri, uri+postfix+str(0)) # the file 'uri' is now free log.prNot(log.INFO, "saveOldFile(): renamed file (%s) to prevent overwriting" % (uri))
def find_close(text, close=10, minlen=4): log.prNot(log.INFO, "find_close(text)") _text = sanitize(text) prev = ['?']*close outlist = [] outcount = 0 for word in re.compile('\s').split(_text): if (len(word) >= minlen and word in prev): dist = prev[::-1].index(word) outlist.append([outcount, dist, word, ' '.join(prev)]) outcount += 1 prev.append(word) prev.pop(0) print_close_words(outlist) return outlist
def broadcastWorkers(self, data, task): """ Broadcast something to all workers. Will block until messages are received. """ log.prNot(log.INFO, "broadcastWorkers(): Broadcasting data.") # Send metadata first, don't track the req because we don't need to meta = self.getMeta(data, task) sreqs = [] for worker in self.workers: self.comm.Isend([meta, MPI.INT], dest=worker, tag=self.COMM_WD_W) sreq = self.comm.Isend([data, self.tnptompi(data.dtype.type)], \ dest=worker, tag=self.COMM_WD_W) sreqs.append(sreq) # Wait for send to complete MPI.Request.Waitall(sreqs)
def main(*argv): # Helper string for this function parser = argparse.ArgumentParser(\ description='Analyze text for inconsistencies, such as words close together, or repeating groups of words.') # Add positional argument (filepath) parser.add_argument('file', action='store', metavar='filename',\ type=str, help='file to analyze') # Add optional arguments parser.add_argument('-c', '--cutoff', action='store', metavar='N',\ type=int, help='cutoff value for histogram counting', default=2, dest='cutoff') parser.add_argument('-d', '--mindist', action='store', metavar='L',\ type=int, help='mininum distance between words', default=10, dest='mindist') parser.add_argument('-l', '--minlen', action='store', metavar='L',\ type=int, help='mininum wordlength to parse', default=4, dest='minlen') parser.add_argument('-g', '--groupsize', action='store', metavar='S',\ type=int, help='number of words per group to compare for recurrence', default=6, dest='grpsize') parser.add_argument('-s', '--grpsentence', action='store_true', \ help='group words by sentence (override groupsize)', default=False, dest='grpsent') args = parser.parse_args() log.prNot(log.NOTICE, "%s: analyzing file %s" % (argv[0], args.file)) fd = open(args.file) text = fd.read() fd.close() # Histogram all words histwords = mk_word_histo(text, cutoff=args.cutoff) # Find words close together closewords = find_close(text, close=args.mindist, minlen=args.minlen) # Find similar sets of words similgroups = find_similar(text, groupsize=args.grpsize, bysentence=args.grpsent)
def mk_word_histo(text, cutoff=2): log.prNot(log.INFO, "mk_word_histo(text)") _text = sanitize(text) hist = {} for word in re.compile('\s').split(_text): if hist.has_key(word): hist[word] += 1 else: hist[word] = 1 # Convert to list of tuples, filter out hist_filt = filter(lambda x: x[1]>cutoff, hist.iteritems()) # Sort list hist_filtsort = sorted(hist_filt, key=operator.itemgetter(1)) print_word_histo(hist_filtsort) log.prNot(log.INFO, "mk_word_histo(text) done") return hist_filtsort
def cacheSvd(lhs, lsizes, lorigs, lcells, sasize, sapos, sfang, sffov, matroot='./matrices'): """ Precompute the reconstruction matrices, which are the singular value decomposed forward matrices. @param lhs List of layer height configurations @param lsizes List of layer sizes @param lorigs List of layer origins @param lcells List of layer cells @param sasize Subaperture size (m) @param sapos Subaperture positions (m) @param sfang Subfield pointing angle (radian) @param sffov Subfield field of view (radian) """ log.prNot(log.NOTICE, "cacheSvd(): Building up SVD cache.") # Init memory to hold all SVDs svdCache = [] for i in range(len(lhs)): log.prNot(log.NOTICE, "cacheSvd(): Computing forward matrix %d/%d" % (i+1, len(lhs))) # For each reconstruction geometry, calculate the forwardmatrix: (fwdmat, mattag) = computeFwdMatrix(lhs[i], lsizes[i], lorigs[i], \ lcells, sasize, sapos, sfang, sffov, matroot=matroot) log.prNot(log.NOTICE, "cacheSvd(): Computing SVD for forward matrix...") # SVD this forward matrix, and store this svdCache.append(computeSvd(fwdmat, mattag=mattag, matroot=matroot)) # Return the SVD cache return svdCache
def find_similar(text, groupsize=5, bysentence=False): log.prNot(log.INFO, "find_similar(text)") if (bysentence): groups = re.compile('[\.?!] ').split(text) _text = text else: groups = [] _text = sanitize(text) words = re.compile('\s').split(_text) for i in range(0, len(words), groupsize): groups.append(' '.join(words[i:i+groupsize])) simillist = [] for group in groups: #print group c = _text.count(group) if (c > 1): simillist.append([c, group]) print_simil(simillist) return simillist
def deploy(self, ack=None): """ Commit various MPI threads to their respective tasks as defined earlier. """ if (ack != None): log.prNot(log.NOTICE, "deploy(): %s" % (ack)) if (self.rank == self.watchdog): log.prNot(log.NOTICE, "deploy(): MPI thread %d running watchdog." %\ (self.rank)) self.watchdogfunc(self.watchdogargs) else: log.prNot(log.NOTICE, "deploy(): MPI thread %d running worker." %\ (self.rank)) self.workerfunc(self.workerargs) log.prNot(log.NOTICE, "deploy(): MPI thread %d done working." %\ (self.rank))
def __init__(self, cfgfile): # Static configuration options # Various communication tags self.COMM_WD_W = 1 # watchdog to worker self.COMM_W_WD = 2 # worker to watchdog self.cfgdef = { \ 'masterhost' : 'dhcp-179.astro.su.se',\ } log.prNot(log.NOTICE, "Initializing CompGrid()") # Load configuration from cfgfile self.cfg = ConfigParser.SafeConfigParser(self.cfgdef) self.cfg.read(cfgfile) # Save config filename self.cfgfile = cfgfile # Change directory to that of the configfile self.curdir = os.path.realpath(os.path.curdir) os.chdir(os.path.realpath(os.path.dirname(cfgfile))) # Parse data format and structure variables self.masterhost = self.cfg.get('compgrid', 'masterhost') self.sbuflen = self.cfg.get('compgrid', 'sbuflen') self.rbuflen = self.cfg.get('compgrid', 'rbuflen') # These will hold the various different processes self.procs = [] # List for all MPI threads self.watchdog = -1 # watchdog MPI rank self.workers = [] # list of all worker MPI threads self.iworkers = [] # list of idle worker MPI threads # Send & receive requests buffer self.sreqbuf = {} self.rreqbuf = {} # TODO: doc #self.metabuf = {} # Send & receive data buffers self.sendbuf = {} self.recvbuf = {} # Buffer for keeping track of the data ID (i.e. what data is stored in # each buffer?) self.idbuf = {} self.comm = MPI.COMM_WORLD # world communicator self.size = self.comm.Get_size() # number of slaves self.rank = self.comm.Get_rank() # current rank # Only do checks in one MPI thread if (self.rank == 0): # Check configuration file for sanity self.checkSetupSanity() # Change directory back os.chdir(self.curdir) # Setup datatype conversion self.setupTypes() # Divide tasks over MPI threads self.divideTasks()
def divideTasks(self): """ Divide the tasks over various MPI threads. """ log.prNot(log.NOTICE, "divideTasks(): dividing tasks over threads.") # Everyone sends their ip to the master thread locip = socket.gethostbyname(socket.gethostname()) sreq = self.comm.Isend([locip, MPI.CHARACTER], dest=0) # Store groups per IP here # NB: although the workers are still sorted per IP, this is not used # for anything. Sort-of deprecated I guess. groups = {} # All other threads besides rank 0 do not continue. if (self.rank != 0): # wait until we're sure the message was received log.prNot(log.INFO, "divideTasks(): waiting for send") sreq.Wait() log.prNot(log.INFO, "divideTasks(): success.") return # Only MPI thread rank 0 remains here: for proc in xrange(self.size): # Receive ip from all MPI threads, store with thread ID ('rank') buf = array.array('c', '0'*15) stat = MPI.Status() ret = self.comm.Recv([buf, MPI.CHARACTER], source=proc, \ status=stat) ip = buf.tostring() self.procs.append([proc, ip]) log.prNot(log.INFO, "divideTasks(): received '%s' from %d." % \ (ip, proc)) # Now make groups per IP if (not groups.has_key(ip)): groups[ip] = [] groups[ip].append(proc) log.prNot(log.INFO, "divideTasks(): making task groups.") foundmaster = False # Loop over groups, divide tasks for ip in groups: if (ip == self.masterhost): log.prNot(log.INFO, "divideTasks(): found master ip.") foundmaster = True # We need at least one MPI threads for the watchdog if (len(groups[ip]) < 1): raise RuntimeError("Master machine must have at least one MPI threads!") # Sort the group. The lowest ranks on the master host will be # watchdog. groups[ip].sort() self.watchdog = groups[ip][0] self.workers.extend(groups[ip][1:]) self.iworkers.extend(groups[ip][1:]) else: self.workers.extend(groups[ip]) self.iworkers.extend(groups[ip]) log.prNot(log.INFO, "divideTasks(): group %s has %d nodes." % \ (ip, len(groups[ip]))) if (not foundmaster): raise RuntimeError("Master machine '%d' not found!" % \ self.masterhost) log.prNot(log.NOTICE, "divideTasks(): watchdog @ %d, workers:" % \ (self.watchdog)) log.prNot(log.NOTICE, self.workers)
def saveData(path, data, asnpy=False, aspickle=False, asfits=False, ascsv=False, explicit=False, csvfmt='%g', csvhdr=None, old=3): # Init empty list flist = {} # Expand path path = os.path.realpath(path) log.prNot(log.INFO, "saveData(): file '%s', fits: %d, npy: %d, pickle: %d, csv: %d" %\ (os.path.basename(path), asfits, asnpy, aspickle, ascsv)) # Make dir if necessary outdir = os.path.dirname(path) if (not os.path.isdir(outdir)): log.prNot(log.INFO, "saveData(): making directory '%s'" % (outdir)) os.makedirs(outdir) # If everything is False, enable asnpy if (not asnpy and not aspickle and not ascsv): asnpy = True if (asnpy): import numpy as N # Save data in numpy format if (explicit): uri = path else: uri = path + '.npy' log.prNot(log.INFO, "saveData(): storing numpy to '%s'" % (uri)) # Save old file, if present saveOldFile(uri, postfix='.old', maxold=old) N.save(uri, data) flist['npy'] = os.path.basename(uri) if (ascsv): import numpy as N # Save data in csv format if (explicit): uri = path else: uri = path + '.csv' log.prNot(log.INFO, "saveData(): storing csv to '%s'" % (uri)) # Save old file, if present saveOldFile(uri, postfix='.old', maxold=old) if (csvhdr is not None): data = N.vstack((N.array(csvhdr), data)) N.savetxt(uri, data, fmt=csvfmt, delimiter=', ') flist['csv'] = os.path.basename(uri) if (aspickle): import cPickle as pickle if (explicit): uri = path else: uri = path + '.pickle' # Save old file, if present log.prNot(log.INFO, "saveData(): storing pickle to '%s'" % (uri)) saveOldFile(uri, postfix='.old', maxold=old) pickle.dump(data, file(uri, 'w')) flist['pickle'] = os.path.basename(uri) if (asfits): import pyfits if (explicit): uri = path else: uri = path + '.fits' # Save old file, if present log.prNot(log.INFO, "saveData(): storing fits to '%s'" % (uri)) saveOldFile(uri, postfix='.old', maxold=old) pyfits.writeto(uri, data) flist['fits'] = os.path.basename(uri) return flist
def loadData(path, asnpy=False, aspickle=False, ascsv=False, auto=False, shape=None): import numpy as N log.prNot(log.INFO, "loadData(): loading '%s', npy: %d pickle: %d, csv: %d" % (os.path.split(path)[1], asnpy, aspickle, ascsv)) if (N.sum([asnpy, aspickle, ascsv]) > 0 and auto): log.prNot(log.WARNING, "loadData(): auto-guessing and specific format set, defaulting to format.") auto = False # Make sure there is only one setting true if (N.sum([asnpy, aspickle, ascsv]) > 1): aspickle = False ascsv = False log.prNot(log.ERR, "loadData(): Cannot load more than one format at a time.") elif (N.sum([asnpy, aspickle, ascsv]) < 1 and not auto): asnpy = True log.prNot(log.WARNING, "loadData(): No format selected, enabling npy.") if (asnpy): import numpy as N #uri = path + '.npy' # Check if file exists if (not os.path.isfile(path)): log.prNot(log.WARNING, "loadData(): numpy file '%s' does not exists."%\ (os.path.split(path)[1])) return False # Load results results = N.load(path) if (aspickle): import cPickle as pickle #uri = path + '.pickle' # Check if file exists if (not os.path.isfile(path)): log.prNot(log.WARNING, "loadData(): pickle file '%s' does not exists."%\ (os.path.split(path)[1])) return False # Load results results = pickle.load(open(path)) if (ascsv): #uri = path + '.pickle' # Check if file exists if (not os.path.isfile(path)): log.prNot(log.WARNING, "loadData(): csv file '%s' does not exists."%\ (os.path.split(path)[1])) return False # Load results as csv results = N.loadtxt(path, delimiter=',') # Check if shape matches if (shape is not None and results.shape != shape): log.prNot(log.WARNING, "loadData(): shapes do not match.") return False return results
def print_close_words(list): for (c, d, word, contxt) in list: log.prNot(log.WARNING, "%d %d %s (%s)" % (c, d, word, contxt))
def print_simil(list): for sim in list: log.prNot(log.WARNING, "%d %s" % (sim[0], sim[1]))
def showSaSfLayout(outfile, sapos, sasize, sfpos=[], sfsize=[], method='ccd', coord=True, number=True, plrange=None, aptr=None, aptp=None): """ Show the subaperture/subfield layout in Gnuplot. 'outfile' will be the output EPS file of Gnuplot 'sapos' is the subaperture positions in some units 'sasize' is the subaperture size in the same units 'sfpos' is the subfield positions in the same units 'sfsize' is the subfield size in the same units 'method' is the meaning of the positions: 'ccd' or 'll' 'coord' if True, add coordinates of the subapertures 'number' if True, add numbers to the subapertures """ # TODO: implement 'll' method if (method != 'ccd'): raise NotImplemented("Not implemented yet") log.prNot(log.INFO, "showSaSfLayout(): Saving plot to '%s'." % (outfile)) # Initiate Gnuplot gp = Gnuplot.Gnuplot() # Make a nice path outfile = os.path.realpath(outfile) # Set default settings gnuplotInit(gp, hardcopy=outfile, rmfile=True) # Set plotting range if plrange == None: xran = (min(sapos[:,0]) - 0.5*sasize[0], \ max(sapos[:,0]) + 1.5*sasize[0]) yran = (min(sapos[:,1]) - 0.5*sasize[1], \ max(sapos[:,1]) + 1.5*sasize[1]) else: xran = tuple(N.array(plrange[0])*1.0) yran = tuple(N.array(plrange[1])*1.0) gp('set xrange [%f:%f]' % xran) gp('set yrange [%f:%f]' % yran) # Aspect ratio square ar = (yran[1] - yran[0])/(xran[1] - xran[0]) gp('set size ratio %f' % (ar)) # No legend gp('set key off') # Object counter obj = 0 # # Draw circular aperture if wanted # if aptr != None and aptp != None: # gp('set obj %d circle at %f,%f size %f back' % \ # (obj, aptp[0], aptp[1], aptr)) # obj += 1 # Loop over the subapertures sanum = -1 for sa in sapos: obj += 1 sanum += 1 # Set subimage box gp('set obj %d rect from %f,%f to %f,%f fs empty lw 0.8' % \ (obj, sa[0], sa[1], sa[0]+sasize[0], sa[1]+sasize[1])) caption = '' # Add number, if requested if (number): caption += '#%d' % (sanum) # Add coordinate, if requested if (coord): caption += ' (%.4g,%.4g)' % (sa[0], sa[1]) # Set caption gp('set label %d at %f,%f "%s" font "Palatino,2.5"' % \ (obj, sa[0] + sasize[0]*0.06, sa[1] + sasize[1]*0.1, caption)) # Add subfields for sf in sfpos: _sf = sa + sf obj += 1 gp('set obj %d rect from %f,%f to %f,%f fs empty lw 0.4' % \ (obj, _sf[0], _sf[1], _sf[0]+sfsize[0], _sf[1]+sfsize[1])) # Finish the plot (TODO: ugly hack, how to do this nicer?) gp('plot -99999') _waitForFile(outfile) _convertPdf(outfile)
def show_help(err=""): log.prNot(log.NOTICE, "syntax: %s <file>" % (sys.argv[0])) if (len(err)): log.prNot(log.ERR, err) sys.exit(-1)
def computeFwdMatrix(lh, lsize, lorig, lcells, sasize, sapos, sfang, sffov, matroot='./matrices/', engine='c'): """ Calculate a model forward matrix giving the WFWFS output for a given atmosphere geometry. The model is constructed as follows: for each subaperture and each subfield, trace a cone through the atmosphere. At each atmospheric layer, calculate the size and position of the meta-pupil of the cone. Using this information, calculate where the cone intersects the atmospheric layer. Since each layer consists of a finite amount of cells, the intersection can be expressed as the fractional area intersected for each cell. These intersections are stored in the matrix such that when multiplying it with a model atmosphere, each subaperture-subfield pair gets a linear sum of the intersected cells in the different atmospheric layers as measurement. @param lh Height for each layer (km) @param lsize Size for each layer (m) @param lorig Origin for each layer (m) @param lcells Number of cells for each layer @param sasize Subaperture size (m) @param sapos Subaperture positions (m) @param sfang Subfield pointing angle (radian) @param sffov Subfield field of view (radian) @param engine Code to use for matrix computation, 'c' or 'py' ('c') """ # Store all relevant parameters for this matrix in matconf: matconf = {'lh': lh, 'lsize': lsize, 'lorig': lorig, 'lcells': lcells, 'sasize': sasize, 'sapos': sapos, 'sfang': sfang, 'sffov': sffov} # Calculate unique md5sum for this configuration mattag = calcTag(matconf) # See if this matrix is already stored on disk using the unique tag matdir = matroot + mattag matfile = 'fwdmatrix' # If the file exists, load it from disk and return if os.path.exists(os.path.join(matdir, matfile)+'.npy'): log.prNot(log.NOTICE, "computeFwdMatrix(): Matrix cached on disk, restoring.") fwdmatrix = loadData(os.path.join(matdir, matfile)+'.npy', asnpy=True) return (fwdmatrix, mattag) log.prNot(log.NOTICE, "computeFwdMatrix(): Calculating forward matrix '%s'" % (mattag)) # Number of subaps, subfields, layers nsa = len(sapos) nsf = len(sfang) nl = len(lh) log.prNot(log.INFO, "computeFwdMatrix(): Got %d subaps, %d subfields and %d layers." % (nsa, nsf, nl)) # Matrix width (size of input vector, the atmosphere): n = N.product(lcells) * nl # Matrix height (size of output vector, the wfwfs data): m = nsa * nsf fwdmatrix = N.zeros((m, n), dtype=N.float32) if (engine == 'py'): # Base positions for different fields of view: basecbl = (N.tan(sfang - sffov/2.) * lh.reshape(-1,1,1) - sasize/2. - \ lorig.reshape(-1,1,2) + lsize.reshape(-1,1,2)) * \ lcells.reshape(1,1,2) / (lsize.reshape(-1,1,2)*2) basecur = (N.tan(sfang + sffov/2.) * lh.reshape(-1,1,1) + sasize/2. - \ lorig.reshape(-1,1,2) + lsize.reshape(-1,1,2)) * \ lcells.reshape(1,1,2) / (lsize.reshape(-1,1,2)*2) # Loop over all subapertures: for sa, csapos in zip(range(nsa), sapos): # Offset base positions for this subap: subcbl = basecbl + (csapos.reshape(1,1,-1) * lcells.reshape(1,1,2) / \ (lsize.reshape(-1,1,2)*2)) subcur = basecur + (csapos.reshape(1,1,-1) * lcells.reshape(1,1,2) / \ (lsize.reshape(-1,1,2)*2)) # Make sure all coordinates are within the range [0,0] -- lcells blFix = len(subcbl[subcbl < lcells*0]) urFix = len(subcur[subcur > lcells]) if (blFix + urFix > 0): log.prNot(log.WARNING, "computeFwdMatrix(): Fixing %d and %d coordinates @ sa %d." % (blFix, urFix, sa)) # subcbl[subcbl[:,:,0] > lcells[0], 0] = lcells[0] # subcbl[subcbl[:,:,1] > lcells[1], 1] = lcells[1] subcbl[subcbl[:,:,0] < 0, 0] = 0 subcbl[subcbl[:,:,1] < 0, 1] = 0 subcur[subcur[:,:,0] > lcells[0], 0] = lcells[0] subcur[subcur[:,:,1] > lcells[1], 1] = lcells[1] # Loop over all subfields: for sf, csfang in zip(range(nsf), sfang): #csfang = sfang[sf] # Offset in the model matrix woff = sa * nsf + sf # Loop over all atmosphere layers: for lay in range(nl): # Investigate the which atmospheric cells in layer 'lay' # influence subfield 'sf' in subaperture 'sa': # Old: #cpos2 = sasfBounds(csapos, csfang, sasize, sffov, \ # lh[lay], units='cell', rtype='bounds', lorig=lorig[lay], \ # lsize=lsize[lay], lcells=lcells) #xr = cpos[:, 0] #yr = cpos[:, 1] xr0 = subcbl[lay,sf,0] xr1 = subcur[lay,sf,0] yr0 = subcbl[lay,sf,1] yr1 = subcur[lay,sf,1] off = (lay * lcells[0] * lcells[1]) # Old # Calculate the coordinates of the cells intersected and the # amount of intersection #(isectc, isect) = calcIsect(xr, yr, lcells[0]) # Flatten & insert this in the full matrix #fwdmatrix[sa * nsf + sf, \ # (isectc + (lay * product(lcells))).reshape(-1).tolist()]=\ # isect.reshape(-1) #print int(xr0), int(N.ceil(xr1)), int(yr0), int(N.ceil(yr1)) for cx in xrange(int(xr0), int(N.ceil(xr1))): for cy in xrange(int(yr0), int(N.ceil(yr1))): isect = (min(cx+1, xr1) - max(cx, xr0)) * \ (min(cy+1, yr1) - max(cy, yr0)) fwdmatrix[woff, off + cy * lcells[0] + cx] = isect elif (engine == 'c'): code = """ #line 175 "libtomo.py" #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif int sa, sf, lay; int xc, yc, off, woff; int cx, cy; double xll, yll, xur, yur; double xllc, yllc, xurc, yurc; double isect; int lcellsx = lcells(0); int lcellsy = lcells(1); // Loop over all subapertures for (sa=0; sa<nsa; sa++) { // Loop over all subfields for (sf=0; sf<nsf; sf++) { // Loop over all layers woff = sa * nsf + sf; for (lay=0; lay<nl; lay++) { // Calculate intersection of subfield 'sf' in subaperture 'sa' with // atmospheric layer 'lay' // Lower-left position of the subfield-subaperture pair at this layer: xll = tan(sfang(sf, 0) - sffov(0)/2.0) * lh(lay) + sapos(sa, 0) - \\ sasize(0)/2.0; yll = tan(sfang(sf, 1) - sffov(1)/2.0) * lh(lay) + sapos(sa, 1) - \\ sasize(1)/2.0; // Upper-right position: xur = tan(sfang(sf, 0) + sffov(0)/2.0) * lh(lay) + sapos(sa, 0) + \\ sasize(0)/2.0; yur = tan(sfang(sf, 1) + sffov(1)/2.0) * lh(lay) + sapos(sa, 1) + \\ sasize(1)/2.0; // Now convert to cell coordinates xllc = (xll - lorig(lay, 0) + lsize(lay, 0))/(lsize(lay, 0)*2.0) * \\ lcellsx; yllc = (yll - lorig(lay, 1) + lsize(lay, 1))/(lsize(lay, 1)*2.0) * \\ lcellsy; xurc = (xur - lorig(lay, 0) + lsize(lay, 0))/(lsize(lay, 0)*2.0) * \\ lcellsx; yurc = (yur - lorig(lay, 1) + lsize(lay, 1))/(lsize(lay, 1)*2.0) * \\ lcellsy; // Clip ranges to (0,0) -- (lcellsx, lcellsy) xllc = max(0, xllc); yllc = max(0, yllc); xurc = min(lcellsx, xurc); yurc = min(lcellsy, yurc); // Now we know that our subfield cone intersects the area (xllc, yllc) // -- (xurc, yurc) in cell coordinates. Calculate the fractional // intersection for each cell in this range. off = (lay * (lcellsx) * (lcellsy)); //printf("off: %d, lsize: %d,%d, sa: %d sf: %d, x: %g--%g, y: %g--%g.\\n", off, lcellsx, lcellsy, sa, sf, xllc, xurc, yllc, yurc); for (cx = floor(xllc); cx < ceil(xurc); cx++) { for (cy = floor(yllc); cy < ceil(yurc); cy++) { isect = (min(cx+1, xurc) - max(cx, xllc)) * (min(cy+1, yurc) - max(cy, yllc)); fwdmatrix(woff, off + (cy * lcellsx) + cx) = isect; //printf("(%d,%d) = %g\\n", woff, off + (cy * lcellsx) + cx, isect); } } } } } """ one = S.weave.inline(code, \ ['fwdmatrix', 'nsa', 'nsf', 'nl', 'sfang', 'sffov', 'sapos', 'sasize', 'lh', 'lorig', 'lsize', 'lcells'], \ extra_compile_args= [__COMPILE_OPTS], \ type_converters=S.weave.converters.blitz) # log.prNot(log.NOTICE, "computeFwdMatrix(): difference between C and Python: sum: %g abs mean: %g allclose: %d" % \ # (N.sum(fwdmatrixC-fwdmatrix), \ # N.mean(abs(fwdmatrixC-fwdmatrix)), \ # N.allclose(fwdmatrix, fwdmatrixC)) ) # Simple sanity checks (elements should be between 0 and 1) if N.amax(fwdmatrix) > 1.001: raise ArithmeticError("Maximum in forward matrix greater than 1:", \ amax(fwdmatrix), "at: ", N.where(fwdmatrix == amax(fwdmatrix))) if N.amin(fwdmatrix) < 0: raise ArithmeticError("Minimum in forward matrix smaller than 0:", \ amin(fwdmatrix), "at: ", N.where(fwdmatrix == amin(fwdmatrix))) # if not N.allclose(fwdmatrix.sum(1),1): # raise ArithmeticError("Some rows have sums different than 1.") log.prNot(log.INFO, "computeFwdMatrix(): Simple sanity checks passed.") # Store the matrix to disk for later use try: os.makedirs(matdir) except OSError: pass except: log.prNot(log.ERR, "computeFwdMatrix(): Error creating directory") saveData(os.path.join(matdir, matfile), fwdmatrix, asnpy=True) saveData(os.path.join(matdir, 'matconf'), matconf, aspickle=True) return (fwdmatrix, mattag)
def sendToWorker(self, data, dataid, task, rbuf): """ This function sends a frame out to an idle worker thread. This should be called from the watchDog MPI thread parsing the files. 'task' should be a valid MPI tag (integer) and describe one of the tasks that a worker can perform on a frame. The frame is sent with 'task' as tag, and a receive with the same task is scheduled. 'rbuf' should be a receive buffer for the data expected. """ # If there are no idle workers, everyone is busy and we should get # results back first. if (len(self.iworkers) == 0): return False # If the receive buffer is full, we should wait for data first if (len(self.recvbuf) >= self.rbuflen): return False # If the send buffer is full, wait until a send operation has # completed before continuing if (len(self.sendbuf) >= self.sbuflen): log.prNot(log.INFO, "sendToWorker(): %d: sendbuf full (%d elems)" %\ (rank, len(self.sendbuf))) sstat = MPI.Status() wreq = MPI.Request.Waitany(self.sreqbuf.values(), status=sstat) # At least one request must be done now, test all to see which # requests that are for sreq in self.sreqbuf: if MPI.Request.Test(self.sreqbuf[sreq]): log.prNot(log.INFO, "sendToWorker(): %d: found '%d' is done, removing" % (rank, sreq)) del self.sreqbuf[sreq] del self.sendbuf[sreq] #del self.metabuf[sreq] break # Get an idle worker w = self.iworkers.pop(0) # Send metadata first, don't track the req because we don't need to meta = self.getMeta(data, task) log.prNot(log.INFO, "sendToWorker(): Sending out data.") self.comm.Isend([meta, MPI.INT], dest=w, tag=self.COMM_WD_W) # Request async send for data sreq = self.comm.Isend([data, self.tnptompi(data.dtype.type)], \ dest=w, tag=self.COMM_WD_W) # Store request and data in buffers self.sreqbuf[w] = sreq self.sendbuf[w] = data #self.metabuf[w] = meta # Request async receive from worker rdtype = self.tnptompi(rbuf.dtype.type) rreq = self.comm.Irecv([rbuf, rdtype], source=w, tag=self.COMM_W_WD) # Keep track of what we're receiving and what buffers we use self.rreqbuf[w] = rreq self.recvbuf[w] = rbuf self.idbuf[w] = dataid # Submit was successful, return True log.prNot(log.INFO, "sendToWorker(): Success.") return True
def optSubapConf(img, sapos, sasize, saifac): """ Optimize subaperture mask position using a (flatfield) image. To optimize the subaperture pattern, take the initial origin positions of subapertures (from 'pos'), find the origin of the subaperture, and cut a horizontal an vertical slice of pixels out the image which are twice as large as the size of the subaperture (from 'size'). These two slices should then give an intensity profile across the subimage, and since there is a dark band between the subimages, the dimensions of each of these can be determined by finding the minimum intensity in the slices. Positions will be rounded to whole pixels. N.B.: This methods is slightly sensitive to specks of dust on flatfields. @param img a 2-d numpy array @param sapos a list of lower-left pixel positions of the subapertures @param sasize a list of the pixel size of the subapertures @param saifac the intensity reduction factor counting as 'dark' @return (<# of subaps>, <subap pixelpos on CCD>, <subap pixelsize on CCD>) """ # Init optimium position and size variables optsapos = [] # Store the optimized subap position allsizes = [] # Stores all optimized subaperture sizes for pos in sapos: # Calculate the ranges for the slices (make sure we don't get # negative indices and stuff like that). The position (pos) # is the origin of the subap. This means we take origin + # width*1.5 pixels to the right and origin - width*0.5 pixels to # the left to get a slice across the subap. Same for height. ## SAORIGIN slxran = N.array([max(0, pos[0]-sasize[0]*0.5), \ min(img.shape[0], pos[0]+sasize[0]*1.5)]) slyran = N.array([max(0, pos[1]-sasize[1]*0.5), \ min(img.shape[1], pos[1]+sasize[1]*1.5)]) # Get two slices in horizontal and vertical direction. Slices are # the same width and height as the subapertures are estimated to # be, then averaged down to a one-pixel profile # NB: image indexing goes reverse: pixel (x,y) is at data[y,x] ## SAORIGIN xslice = img[pos[1]:pos[1]+sasize[1], slxran[0]:slxran[1]] xslice = xslice.mean(axis=0) yslice = img[slyran[0]:slyran[1], pos[0]:pos[0]+sasize[0]] yslice = yslice.mean(axis=1) # Find the first index where the intensity is lower than saifac # times the maximum intensity in the slices *in slice coordinates*. slmax = N.max([xslice.max(), yslice.max()]) cutoff = slmax * saifac cutoff = N.mean(img[pos[1]+0.4*sasize[1]:pos[1]+0.6*sasize[1], \ pos[0]+0.4*sasize[0]:pos[0]+0.6*sasize[0]]) * saifac saxran = N.array([ \ N.argwhere(xslice[:slxran.ptp()/2.] < cutoff)[-1,0], \ N.argwhere(xslice[slxran.ptp()/2.:] < cutoff)[0,0] + slxran.ptp()/2. ]) sayran = N.array([ \ N.argwhere(yslice[:slyran.ptp()/2.] < cutoff)[-1,0], \ N.argwhere(yslice[slyran.ptp()/2.:] < cutoff)[0,0] + slyran.ptp()/2. ]) log.prNot(log.DEBUG, "optSubapConf(): ranges: (%d,%d), (%d,%d) cutoff: %g" % \ (slxran[0], slxran[1], slyran[0], slyran[1], cutoff)) # The size of the subaperture is sa[x|y]ran[1] - sa[x|y]ran[0]: _sass = N.array([saxran.ptp(), sayran.ptp()]) # The final origin pixel position in the large image (img) of # the subaperture is the position we found in the slice # (saxran[0], sayran[0]), plus the coordinate where the slice # began in the big dataset (slxran[0], slyran[0]). ## SAORIGIN # To get the centroid position: add half the size of the subimage _sapos = N.array([saxran[0] + slxran[0], \ sayran[0] + slyran[0]]) log.prNot(log.INFO, \ "optSubapConf(): subap@(%d, %d), size: (%d, %d), pos: (%d, %d) end: (%d, %d)" % \ (pos[0], pos[1], _sass[0], _sass[1], _sapos[0], _sapos[1], \ _sapos[0] + _sass[0], _sapos[1] + _sass[1])) # The subimage size should be the same for all subimages. Store # all subaperture sizes found during looping and then take the # mean afterwards. allsizes.append(_sass) optsapos.append(_sapos) # Calculate the average optimal subaperture size in pixels + standard dev optsize = N.array(allsizes).mean(axis=0) tmpstddev = (N.array(allsizes)).std(axis=0) optsapos = N.round(optsapos).astype(N.int) optsize = N.round(optsize).astype(N.int) log.prNot(log.NOTICE, "optSubapConf(): subimage size optimized to (%d,%d), stddev: (%.3g, %.3g) (was (%.3g, %.3g))" % \ (optsize[0], optsize[1], tmpstddev[0], tmpstddev[1], \ sasize[0], sasize[1])) if (tmpstddev.mean() > 1.0): log.prNot(log.WARNING, \ "optSubapConf(): size standarddeviation rather high, check results!") return (len(optsapos), optsapos, optsize)
def calcSubaptConf(rad, size, pitch, shape='circular', xoff=[0,0.5], disp=(0,0), scl=1.0): """ Generate subaperture (sa) positions for a given configuration. @param rad radius of the sa pattern (before scaling) (in pixels) @param shape shape of the sa pattern ('circular' or 'square') @param size size of the sa's (in pixels) @param pitch pitch of the sa's (in pixels) @param xoff the horizontal position offset of odd rows (in units of 'size') @param disp global displacement of the sa positions (in pixels) @param scl global scaling of the sa positions (in pixels) @return (<# of subaps>, <subap pixelpos on CCD>, <subap pixelsize on CCD>) Raises ValueError if shape is unknown and RuntimeError if no subapertures we found using the specified configuration. """ disp = N.array(disp) # (half) width and height of the subaperture array sa_arr = (N.ceil(rad/pitch)+1).astype(int) # Init empty list to store positions pos = [] # Loop over all possible subapertures and see if they fit inside the # aperture shape. We loop y from positive to negative (top to bottom # in image coordinates) and x from negative to positive (left to # right) for say in range(sa_arr[1], -sa_arr[1]-1, -1): for sax in range(-sa_arr[0], sa_arr[0]+1, 1): # Centroid coordinate for this possible subaperture is: sac = [sax, say] * pitch # 'say % 2' gives 0 for even rows and 1 for odd rows. Use this # to apply a row-offset to even and odd rows # If we're in an odd row, check saccdoddoff sac[0] -= xoff[say % 2] * pitch[0] # Check if we're in the apterture bounds, and store the subapt # position in that case if (shape == 'circular'): if (sum((abs(sac)+size/2.0)**2) < rad**2): pos.append(sac) log.prNot(log.INFO, "calcSubaptConf(): adding sa @ (%.3g, %.3g)." % \ (sac[0], sac[1])) elif shape == 'square': if (abs(sac)+size/2.0 < rad).all: pos.append(sac) log.prNot(log.INFO, "calcSubaptConf(): adding sa @ (%.3g, %.3g)." % \ (sac[0], sac[1])) else: raise ValueError("Unknown aperture shape '%s'" % (apts)) if (len(pos) <= 0): raise RuntimeError("Didn't find any subapertures for this configuration.") # Apply scaling and displacement to the pattern before returning # NB: pos gives the *centroid* position of the subapertures here cpos = (N.array(pos) * scl) + disp # Convert symmetric centroid positions to origin positions: llpos = cpos - size/2.0 nsa = len(llpos) log.prNot(log.NOTICE, "calcSubaptConf(): found %d subapertures." % (nsa)) return (nsa, llpos, cpos, size)
def calcShifts(img, saccdpos, saccdsize, sfccdpos, sfccdsize, method=COMPARE_ABSDIFFSQ, extremum=EXTREMUM_2D9PTSQ, refmode=REF_BESTRMS, refopt=1, shrange=[3,3], mask=None, refaps=None): # Make sure datatypes are correct img = img.astype(N.float32) saccdpos = saccdpos.astype(N.int32) saccdsize = saccdsize.astype(N.int32) sfccdpos = sfccdpos.astype(N.int32) sfccdsize = sfccdsize.astype(N.int32) shrange = N.array(shrange, dtype=N.int32) # Refopt should *always* be a 1-d list. This trick ensures that refopt = N.array([refopt]).flatten()[0] # Sanity check for subfield windows if (N.min(sfccdpos, 0) - shrange < 0).any(): log.prNot(log.NOTICE, "%d,%d - %d,%d < 0,0" % \ (tuple(N.min(sfccdpos, 0)) + tuple(shrange))) log.prNot(log.ERR, "calcShifts(): Error, subfield position - shift range smaller than 0!") if (N.max(sfccdpos, 0) + sfccdsize + shrange > saccdsize).any(): log.prNot(log.NOTICE, "%d,%d + %d,%d + %d,%d > %d,%d" % \ (tuple(N.max(sfccdpos, 0)) + tuple(sfccdsize) + tuple(shrange) + \ tuple(saccdsize))) log.prNot(log.ERR, "calcShifts(): Error, subfield position + subfield size + shift range bigger than subaperture!") # Check & setup a mask if we need it ("circular" or "none") if (mask == MASK_CIRC): maskc = N.indices(sfccdsize) - ((sfccdsize-1)/2.).reshape(2,1,1) mask = (N.sum(maskc**2.0, 0) < (sfccdsize[0]/2.0)**2.0).astype(N.int32) elif (mask): log.prNot(log.ERR, "calcShifts(): Error, unknown mask!") else: mask = N.ones(sfccdsize, dtype=N.int32) # Call C library with the pre-processed parameters ret = _libshifts.calcShifts(img, saccdpos, saccdsize, sfccdpos, sfccdsize, shrange, mask, method, extremum, refmode, refopt) # Return reference apertures used if requested if (refaps is not None): refaps.extend(ret['refapts']) # Clip shifts, use float32 shrange, otherwise shifts is upcasted to float64. clrn = shrange.astype(N.float32) ret['shifts'] = N.clip(ret['shifts'], -clrn, clrn) # Give stats on the shifts just calculated log.prNot(log.INFO, "calcShifts(): shift: (%g,%g) +- (%g,%g)." % \ (tuple(ret['shifts'].reshape(-1,2).mean(0)) + \ tuple(ret['shifts'].reshape(-1,2).std(0))) ) log.prNot(log.INFO, "calcShifts(): clipped: %d/%d, %g%%, refaps used: %s" %\ (N.sum(abs(ret['shifts']) >= shrange), ret['shifts'].size, \ 100*N.sum(abs(ret['shifts']) >= shrange)/ret['shifts'].size, \ str(ret['refapts'])) ) # Return shifts return ret['shifts']
def loadSaSfConf(safile): """ Try to load 'filename', if it exists. This should hold information on the subaperture or subfield positions and sizes on the CCD. Positions should be the absolute lower-left corner of the subaperture, or relative LL corner of the subfield. Syntax of the file should be: 1: <INT number of coordinates> 2: <FLOAT xsize [m]> <FLOAT ysize [m]> <FLOAT xoff [m]> <FLOAT yoff [m]> 3: <INT xsize [pix]> <INT ysize [pix]> <INT xoff [pix]> <INT yoff [pix]> 4--: <INT xpos n [pix]> <INT ypos n [pix]> with 'xoff' and 'yoff' a global offsets for the positions, if necessary. N.B. Line 2 is currently not used, but provided for backwards compatibility. @param filename file holding the configuration as CSV. @return (<# of coords>, <pixelpos on CCD>, <pixelsize on CCD>) Raises IOError if file could not be found or RuntimeError if parsing did not go as expected. """ if (not os.path.isfile(safile)): raise IOError("loadSaSfConf(): File '%s' does not exist." % (safile)) reader = csv.reader(open(safile), delimiter=',') try: # Number of coordinates [int] nsa = int((reader.next())[0]) # Box size at aperture [float, float] line = reader.next() sallsize = N.array([float(line[0]), float(line[1])]) # Try to read the lenslet offset, set to 0 if not present try: salloff = N.array([float(line[2]), float(line[3])]) except: salloff = N.array([0, 0]) # Box pixel size [int, int] and offset [int, int] line = reader.next() ccdsize = N.array([float(line[0]), float(line[1])]) # Try to read the ccd offset, set to 0 if not present try: saccdoff = N.array([float(line[2]), float(line[3])]) except: saccdoff = N.array([0, 0]) except: raise RuntimeError("loadSaSfConf(): Could not parse file header.") ccdpos = [] for line in reader: try: _pos = [float(line[0]), float(line[1])] ccdpos.append(_pos) except: raise RuntimeError("loadSaSfConf(): Could not parse file.") log.prNot(log.INFO, "loadSaSfConf(): In '%s': found %d coordinates, (expected %d)."% (os.path.split(safile)[1], len(ccdpos), nsa)) if (len(ccdpos) != nsa): log.prNot(log.WARNING, "loadSaSfConf(): Found %d coordinates, expected %d. Using all positions found (%d)." % (len(ccdpos), nsa, len(ccdpos))) nsa = len(ccdpos) ccdsize = (N.array(ccdsize)).astype(N.float) ccdpos = (N.array(ccdpos)+saccdoff).astype(N.float) return (nsa, ccdpos, ccdsize)
def computeSvd(matrix, mattag=None, matroot='./matrices/', checkSanity=True, eps=0.1): """ Compute the singular value decomposition of 'matrix' and optionally do some sanity checking of the decomposition. @param matrix Matrix to SVD @param mattag Unique tag for 'matrix', can be used to load cache @param matroot Directory where to save/load matrix cache @param checkSanity Do some sanity checks if set to True @param eps Cut-off value for singular values @return Dict with U, S, 1/S and V^H as values. """ if (mattag is not None): matdir = os.path.join(matroot, mattag) if os.path.exists(os.path.join(matdir, "fwdmatrix-svd-s.npy")): log.prNot(log.NOTICE, "computeSvd(): SVD stored on disk, restoring") s = loadData(os.path.join(matdir, "fwdmatrix-svd-s.npy"), asnpy=True) s_inv = loadData(os.path.join(matdir, "fwdmatrix-svd-s-inv.npy"), \ asnpy=True) u = loadData(os.path.join(matdir, "fwdmatrix-svd-u.npy"), asnpy=True) vh = loadData(os.path.join(matdir, "fwdmatrix-svd-vh.npy"), asnpy=True) return {'u':u, 's':s, 's_inv':s_inv, 'vh': vh} else: log.prNot(log.WARNING, "computeSvd(): Please supply the parameter mattag to prevent unnecessary recomputation of the SVD") # Perform the decomposition, do not use full_matrices, this takes up # *a lot* of memory in certain very non-square matrices (u,s,vh) = N.linalg.svd(matrix, full_matrices=False) # Exclude (potentially) bad singular values when inverting them goodVals = N.where(s > eps) s_inv = N.zeros(len(s)) s_inv[goodVals] = 1.0/s[goodVals] # In numpy: # matrix == dot(u, dot(identity(len(s)) * s, vh)) # matrix^-1 == dot(v.T, dot(si, u.T)) if checkSanity: # Calculate si = diag(1/s) sd = N.identity(len(s)) * s si = N.identity(len(s)) * s_inv log.prNot(log.INFO, "computeSvd(): SVD shapes: u: (%dx%d) s: (%d,%d) vh: (%d,%d)." % (u.shape + si.shape + vh.shape)) if N.allclose(matrix, N.dot(u, N.dot(sd, vh))): log.prNot(log.NOTICE, "computeSvd(): Reconstruction seems to have worked") else: log.prNot(log.WARNING, "computeSvd(): Reconstruction did not work, got inaccurate SVD.") # Try to obtain identity matrix through matrix * matrix^-1 using the # SVD components to calculate the inverse. idresid = N.dot(N.dot(vh.T, N.dot(si, u.T)), matrix) - N.identity(len(s)) log.prNot(log.NOTICE, "computeSvd(): Reconstruction residual, sum: %0.4g avg: %0.4g +- %0.4g" % (idresid.sum(), idresid.mean(), idresid.std())) if (mattag is not None): # Save the SVD components to disk matdir = os.path.join(matroot, mattag) try: os.makedirs(matdir) except OSError: pass except: log.prNot(log.ERR, "computeSvd(): Error creating directory '%s'" % (matdir)) saveData(os.path.join(matdir, "fwdmatrix-svd-s"), s, asnpy=True) saveData(os.path.join(matdir, "fwdmatrix-svd-s-inv"), s_inv, asnpy=True) saveData(os.path.join(matdir, "fwdmatrix-svd-u"), u, asnpy=True) saveData(os.path.join(matdir, "fwdmatrix-svd-vh"), vh, asnpy=True) else: log.prNot(log.NOTICE, "computeSvd(): Please supply the parameter mattag to computeSvd() to prevent unnecessary recomputation of the SVD.") # Return SVD components as a dict return {'u':u, 's':s, 's_inv':s_inv, 'vh': vh}
def overlayMask(img, saccdpos, saccdsize, filename, number=True, coord=True, norm=0.5, crop=False, border=True): """ Generate a 'fancy' image from 'img' with an overlay of the subaperture mask (positions in 'saccdpos' and size in 'saccdsize'). Optional parameters include: 'number' to number the subapertures [True] 'coord' to show the subaperture coordinates [True] 'crop' sets everything outside the subapertures to white [False] 'norm' scale all the data outside the subapertures to the top 'norm' part of the range of the data within the subapertures. [0.5] """ log.prNot(log.INFO, "overlayMask(): rendering subap mask over image.") ### Process data ### ============ # Image should be float during processing img = img.astype(N.float) # Make a mask first mask = N.zeros((img.shape), dtype=N.bool) maskborder = N.zeros((img.shape), dtype=N.bool) for pos in saccdpos: # Again remember the reverse indexing of numpy arrays: mask[\ pos[1]:pos[1]+saccdsize[1], \ pos[0]:pos[0]+saccdsize[0]] = 1 maskborder[\ pos[1]-1:pos[1]+saccdsize[1]+1, \ pos[0]-1:pos[0]+saccdsize[0]+1] = 1 maskborder[\ pos[1]:pos[1]+saccdsize[1], \ pos[0]:pos[0]+saccdsize[0]] = 0 # Get the range of the interesting part: maxval = N.max(img[mask]) minval = N.min(img[mask]) # Copy the image masked = img # If we're cropping, set everything outside the subapertures to the # maximum value found inside the subapertures, 'maxval' (making it white) if (crop): masked[mask == False] = maxval # Normalize the data outside the subapertures if we're not cropping: elif (norm is not False): # Set everything *outside* the mask to the range of everything inside # the maxed, but scaled down by a factor 'norm', using the upper part # of the range. If the interesting data has a range 0-1, and norm is # 0.7, everthing outside the subapertures will be scaled to 0.3--1.0 # Absolute dynamic range for outside the subaps crange = (maxval-minval) * norm # Minimum value (offset) cmin = minval + ((maxval-minval) * (1-norm)) # Old minimum and maximum omin = N.min(masked[mask == False]) omax = N.max(masked[mask == False]) # Scale data masked[mask == False] = ((masked[mask == False] - omin) * crange / \ (omax - omin)) + cmin else: # If norm is False, use the whole image: don't crop and don't rescale maxval = N.max(img) minval = N.min(img) # Draw white borders around the subapertures if (border): masked[maskborder] = maxval ### Use Cairo to make the image ### =========================== import cairo # For making nice PNG stuff # Scale the values to 0-255 masked = (255*(masked - minval)/(maxval - minval)) # Init a new empty Cairo surface as target surface caidata = N.empty(masked.shape, dtype=N.uint8) destsurf = cairo.ImageSurface.create_for_data(caidata, \ cairo.FORMAT_A8, caidata.shape[0], caidata.shape[1]) # Create a context from the empty surface ctx = cairo.Context(destsurf) # Init a new Cairo surface from the masked imaage imgsurf = cairo.ImageSurface.create_for_data(masked.astype(N.uint8), \ cairo.FORMAT_A8, masked.shape[0], masked.shape[1]) # Mirror the image vertically, so we use a FITS-like origin (first # quadrant of a graph) instead of an image like origin (where we see # the 2nd quadrant of a graph) ctx.save() mat = cairo.Matrix(1, 0, 0, -1, 0, imgsurf.get_height()) ctx.transform(mat) # Use the image as source, paint it ctx.set_source_surface(imgsurf, 0,0) ctx.paint() ctx.restore() # Choose a font ctx.set_font_size(12) ctx.select_font_face('Serif', cairo.FONT_SLANT_NORMAL, \ cairo.FONT_WEIGHT_NORMAL) # Loop over the subapertures and put some text there sanum = 0 for pos in saccdpos: # Move the 'cursor', show some text # NOTE: we have to perform the position transform ourselves here, # because if we would use ctx.transform(), the text would be # transformed as well (which we do not want) ctx.move_to(pos[0] +1, imgsurf.get_height()- (pos[1]+1)) txt = '' if (number is True): txt += '%d' % (sanum) sanum += 1 if (coord is True): txt += ' @ (%d,%d)' % (pos[0], pos[1]) ctx.show_text(txt) # Done, save as PNG destsurf.write_to_png(filename + '.png') # And as FITS file import pyfits pyfits.writeto(filename + '.fits', masked, clobber=True) log.prNot(log.INFO, "overlayMask(): done, wrote image as fits and png.")
def calcShifts(img, saccdpos, saccdsize, sfccdpos, sfccdsize, method=COMPARE_ABSDIFFSQ, extremum=EXTREMUM_2D9PTSQ, refmode=REF_BESTRMS, refopt=None, shrange=[3,3], subfields=None, corrmaps=None, refaps=None): """ Calculate the image shifts for subapertures/subfields in 'img'. Subapertures must be located at pixel positions 'saccdpos' with sizes saccdsize. The subapertures are then located at 'sfccdpos' (relative to saccdpos), with sizes 'sfccdsize' pixelsize. 'method' defines the method to compare the subimages, 'extremum' defines the method to find the best subpixel shift, i.e. what interpolation should be used. 'shrange' defines the possible shifts to test (actual number of distances checked is 2*shrange+1). 'refmode' sets method to choose a reference subaperture, 'refopt' is the reference subaperture used (index) if 'refmode' is set to REF_STATIC. If an empty list is passed to 'subfields' and/or 'corrmaps', these will contain the subfields analysed and the correlation maps calculated on return. For regular (non-wide-field) SH WFS, set 'saccdpos' to the subaperture positions, 'sfccdpos' to [[0,0]], and 'sfccdsize' to the subimage pixelsize. For wide-field SH WFS, set 'saccdpos' similarly, but set 'sfccdpos' to an array of pixel positions relative to 'saccdpos' for the subfields to compare. Set 'sfccdsize' not to the complete subimage size, but to the size of the subfield you want to use. """ #=============== # Initialisation #=============== beg = time.time() # Parse the 'method' argument if (method == COMPARE_XCORR): log.prNot(log.INFO, "calcShifts(): Using direct cross correlation") mfunc = crossCorrWeave elif (method == COMPARE_SQDIFF): log.prNot(log.INFO, "calcShifts(): Using square difference") mfunc = sqDiffWeave elif (method == COMPARE_ABSDIFFSQ): log.prNot(log.INFO, "calcShifts(): Using absolute difference squared") mfunc = absDiffSqWeave elif (hasattr(method, '__call__')): log.prNot(log.INFO, "calcShifts(): Using custom image comparison") mfunc = method else: raise RuntimeError("'method' must be either one of the predefined image comparison methods, or a function doing that.") # Parse the 'extremum' argument if (extremum == EXTREMUM_2D9PTSQ): log.prNot(log.INFO, "calcShifts(): Using 2d parabola interpolation") extfunc = quadInt2dWeave elif (extremum == EXTREMUM_MAXVAL): log.prNot(log.INFO, "calcShifts(): Using maximum value") extfunc = maxValPython elif (hasattr(extremum, '__call__')): log.prNot(log.INFO, "calcShifts(): Using custom interpolation") extfunc = extremum else: raise RuntimeError("'extremum' must be either one of the predefined extremum finding methods, or a function doing that.") # Convert image to float32 for easier processing img = img.astype(N.float32) # Find reference subaperture(s) reflist = findRefIdx(img, saccdpos, saccdsize, refmode=refmode, \ refopt=refopt) if (refaps is not None): refaps.extend(reflist) # Init shift vectors (use a list so we can append()) # Shape will be: ((len(refopt), saccdpos.shape[0], sfccdpos.shape[0], 2)) disps = [] shrange = N.array(shrange, dtype=N.int32) #========================= # Begin shift measurements #========================= # Loop over the reference subapertures #------------------------------------- for _refsa in reflist: log.prNot(log.INFO, "calcShifts(): Using subap #%d as reference [%d:%d, %d:%d]" % \ (_refsa, saccdpos[_refsa][0], saccdpos[_refsa][0]+saccdsize[0], \ saccdpos[_refsa][1], saccdpos[_refsa][1]+saccdsize[1])) # Cut out the reference subaperture ref = img[saccdpos[_refsa][1]:saccdpos[_refsa][1]+saccdsize[1], \ saccdpos[_refsa][0]:saccdpos[_refsa][0]+saccdsize[0]] ref = (ref/N.float32(ref.mean())) # Expand lists to store measurements in disps.append([]) if (subfields is not None): subfields.append([]) if (corrmaps is not None): corrmaps.append([]) # Loop over the subapertures #--------------------------- for _sapos in saccdpos: log.prNot(log.DEBUG, "calcShifts(): -Subimage @ (%d, %d), (%dx%d)"% \ (_sapos[0], _sapos[1], saccdsize[0], saccdsize[1])) # Expand lists to store measurements in disps[-1].append([]) if (subfields is not None): subfields[-1].append([]) if (corrmaps is not None): corrmaps[-1].append([]) # Cut out subimage _subimg = img[_sapos[1]:_sapos[1]+saccdsize[1], \ _sapos[0]:_sapos[0]+saccdsize[0]] _subimg = (_subimg/N.float32(_subimg.mean())) # Loop over the subfields #------------------------ for _sfpos in sfccdpos: # Current pixel coordinates # _pos = _sapos + _sfpos # _end = _pos + sfccdsize # log.prNot(log.DEBUG, \ # "calcShifts(): --subfield @ (%d, %d), (%dx%d) [%d:%d, %d:%d]" % \ # (_sfpos[0], _sfpos[1], sfccdsize[0], sfccdsize[1], \ # _pos[1], _end[1], _pos[0], _end[0])) # Get the current subfield (remember, the pixel at (x,y) is # img[y,x]) _subfield = _subimg[_sfpos[1]:_sfpos[1]+sfccdsize[1], \ _sfpos[0]:_sfpos[0]+sfccdsize[0]] #_subfield = img[_pos[1]:_end[1], _pos[0]:_end[0]] # if (_subfield.shape != _subfieldo.shape): # print _sfpos, sfccdsize # print _subfield.shape, _subfieldo.shape # raise RuntimeError("Shapes wrong") # if (not N.allclose(_subfield,_subfieldo)): # print _sfpos, sfccdsize # print _pos, _end # raise RuntimeError("Not close, diff: %g" % \ # ((_subfieldo-_subfield).sum())) # Compare the image with the reference image diffmap = mfunc(_subfield, ref, _sfpos, shrange) #165, 139 #log.prNot(log.DEBUG, "calcShifts(): got map, interpolating maximum") # Find the extremum, store to list shift = extfunc(diffmap, range=shrange, limit=shrange) disps[-1][-1].append(shift[::-1]) # Store subfield and correlation map, if requested # if (subfields != None): subfields[-1][-1].append(_subfield) # if (corrmaps != None): corrmaps[-1][-1].append([diffmap]) # # log.prNot(log.DEBUG, "calcShifts(): --Shift @ (%d,%d): (%.3g, %.3g)" % \ # (_sfpos[0], _sfpos[1], shift[1], shift[0])) # Reform the shift vectors, crop to an numpy array and return it ret = N.array(disps, dtype=N.float32) # ret[ret > shrange] = shrange # ret[ret < -shrange] = -shrange dur = time.time() - beg log.prNot(log.INFO, "calcShifts(): done, took %.3g seconds." % (dur)) return ret