def generate_stackable_id( dataset, version = "1_0" ): ''' Generate an ID from which all similar stackable data will have in common. @param dataset: Input AstroData instance or fits filename. @type dataset: AstroData instances or str @param version: The version from which to run. @type version: string @return: A stackable id. @rtype: string ''' if version != version_index['stackID']: try: # designed to call generateStackableID_ idFunc = getattr( globals()['IDFactory'], 'generateStackableID_' + version ) except: raise "Version: '" + version + "' is either invalid or not supported." return idFunc( inputf, version ) """ shaObj = hashlib.sha1() phu = pf.getheader( inputf[0], 0 ) shaObj.update( phu['OBSID'] ) shaObj.update( phu['OBJECT'] ) """ # print "IDF57: %s"%type(dataset) try: if type(dataset) == str: ad = AstroData(dataset) ID = version + str(ad.group_id()) elif type(dataset) == AstroData.AstroData: ID = version + str(dataset.group_id()) except: print "Filename:", dataset.filename ID = make_id_safe_for_filename(ID) return ID
def generate_astro_data_id( dataset, version="1_0" ): """ An id to be used to identify AstroData types. This is used for: 1) Calibrations: Let's say a recipe performs getProcessedBias prepare biasCorrect Because of the prepare step, the calibration key determined at getProcessedBias will not match biasCorrect because (N2009..., bias) will not match (gN2009..., bias). By using an astro_id, you can avoid this issue as you will have (DATALAB, bias). So, any steps inbetween getProcessedBias and biasCorrect will have no impact. 2) Fringe: Fringe uses this as a FringeID, which is based off the first input of the list. @param dataset: Input AstroData instance or fits filename. @type dataset: AstroData instances or str @param version: The version from which to run. @type version: string @return: An astrodata id. @rtype: string """ if type(dataset) == str: ad = AstroData( dataset ) return ad.data_label().as_pytype() elif isinstance( dataset, AstroData.AstroData): return dataset.data_label().as_pytype() else: print raise "BAD ARGUMENT TYPE: "+type(dataset)
def datasetwalk(self, directory = ".", only = "all", pheads = None, showinfo = False, onlyStatus = False, onlyTypology = False, # generic descriptors interface showDescriptors = None, # string of comma separated descriptor names (function names!) filemask = None, showCals = False, incolog = True, stayTop = False, recipe = None, raiseExcept = False, where = None, batchnum = None, opti = None): """ Recursively walk a given directory and put type information to stdout """ # About the DirDict class """ The DirDict class represents a single directory, and all it's contents that are relevant. It is filled by the client code (datasetwalk) so that only "relevant" files are added, and only directories containing relevant files are shown. Allows iteration to, for example, populate a tree control. Note, the path given is the root path, the user has no access to any parent or sibling directories. However... also note, it is a locally running action, it just happens to use a web interface rather than tk, qt, etc. Actions may be final. """ dirdict = DirDict(os.path.abspath(directory)) global verbose global debug global batchno if batchnum != None: batchno = batchnum onlylist = only.split(",") if (verbose): print "onlylist:",repr(onlylist) print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> DATA SPI' verbose = True ldebug = True dirnum = 0 if stayTop == True: walkfunc = shallow_walk if opti: print "Doing a shallow walk" else: walkfunc = os.walk if opti: print "Doing an os.walk" for root,dirn,files in walkfunc(directory): #dirdict.adddir(root) if opti: print "Analyzing:", root dirnum += 1 if (verbose) : print "root:", root print "dirn:", dirn if verbose: print "DS92:",root, repr(dirn), repr(file) if (".svn" not in root): width = 10 ## !!!!! ## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!! ## !!!!! fullroot = os.path.abspath(root) if root == ".": rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}" else: rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}" firstfile = True # print "DS472:", repr(files) for tfile in files: if tfile == None: raise str(files) # we have considered removing this check in place of a # pyfits open but that was not needed, the pyfits open # is down lower, this is just to avoid checking files # that are not named correctly to be FITS, so why check them? # especially on a command recursing directories and potentially # looking at a lot of files. if filemask == None: # @@NAMING: fits file mask for typewalk mask = r".*?\.(fits|FITS)$" else: mask = filemask try: matched = re.match(mask, tfile) except: print "BAD FILEMASK (must be a valid regular expression):", mask return str(sys.exc_info()[1]) sys.stdout.write(".") if (re.match(mask, tfile)) : if (ldebug) : print "FITS:", tfile fname = os.path.join(root, tfile) try: fl = AstroData(fname) except KeyboardInterrupt: raise except: mes = "Could not open %s as AstroData" % fname continue gain = 0 stringway = False if (stringway): if (onlyTypology == onlyStatus): dtypes = self.classification_library.discover_types(fname) elif (onlyTypology): dtypes = self.classification_library.discover_typology(fname) elif (onlyStatus): dtypes = self.classification_library.discover_status(fname) else: # this is the AstroData Class way # to ask the file itself if (onlyTypology == onlyStatus): dtypes = fl.discover_types() elif (onlyTypology): dtypes = fl.discover_typology() elif (onlyStatus): dtypes = fl.discover_status() if verbose: print "DS130:", repr(dtypes) # print "after classification" if (dtypes != None) and (len(dtypes)>0): #check to see if only is set #only check for given type found = False if (only == "all"): found=True else: # note: only can be split this way with no worry about # whitespace because it's from the commandline, no whitespace # allowed in that argument, just "," as a separator ol = only.split(",") # print ol found = False for tpname in dtypes: if (verbose): print "DS148", " in ", repr(ol), if (tpname in ol): found = True break if (verbose): print "yes, found = ", str(found) if (found == True): if where != None: # let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes cleanwhere = re.sub("_"," ", where) ad = fl try: found = eval(cleanwhere) except: print "can't execute where:\n\t" + where + "\n\t" +cleanwhere print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info()) sys.exit(1) if (found != True): continue if (firstfile == True): pass # print rootln firstfile = False #dirdict tending dirdict.add_dir(fullroot) dirdict.add_file(tfile, root=fullroot) sys.stdout.write("+") sys.stdout.flush() if tfile != "": dirdict.add_file_prop(tfile, root= fullroot, propname="types", propval=dtypes) # new line at the end of the output # print "" # show descriptors if (showDescriptors != None): sdl = showDescriptors.split(",") # print ol # get maxlen maxlen = 0 for sd in sdl: maxlen = max(len(sd),maxlen) # print "DS595:", repr(fl.gain(as_dict=True)) # print "DS596:", repr(fl.amp_read_area(asList = True)) for sd in sdl: #print "DS242:", sd try: if "(" not in sd: dval = eval("fl."+sd+"(asList=True)") else: dval = eval("fl."+sd) pad = " " * (maxlen - len(sd)) sd = str(sd) + pad print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval)) except AttributeError: pad = " " * (maxlen - len(sd)) sd = str(sd) + pad exinfo = sys.exc_info() print " ${BOLD}%s${NORMAL} = ${RED}NO SUCH DESCRIPTOR${NORMAL}" % (sd) if raiseExcept: raise except: pad = " " * (maxlen - len(sd)) sd = str(sd) + pad print (" ${BOLD}%s${NORMAL} = ${RED}FAILED${NORMAL}: %s") % (sd, str(sys.exc_info()[1])) raise if raiseExcept: raise # if phead then there are headers to print per file if (pheads != None): #print " -----------"sys.exec print " ${UNDERLINE}PHU Headers${NORMAL}" #print " -----------" #print "pheads", pheads hlist = pyfits.open(fname) pheaders = pheads.split(",") for headkey in pheaders: #if in phu, this is the code try: print " %s = (%s)" % (headkey, hlist[0].header[headkey]) except KeyError: print " %s not present in PHU of %s" % (headkey, tfile) hlist.close() if (showCals == True): adr = AstroDataRecord(fl) for caltyp in ["bias", "twilight"]: rq = self.calDefLib.get_cal_req([adr],caltyp)[0] try: cs = "%s" % (str(self.calService.search(rq)[0])) except: cs = "No %s found, %s " % ( caltyp, str(sys.exc_info()[1])) raise print " %10s: %s" % (caltyp, cs) if (recipe): banner = ' Running Recipe "%s" on %s ' % (recipe, fname) print "${REVERSE}${RED}" + " "*len(banner) print banner print " "*len(banner)+"${NORMAL}" if recipe == "default": rs = "" else: rs = "-r %s" % recipe subprocess.call("reduce %s %s" % (rs, fname), shell=True) else: if (verbose) : print "%s is not a FITS file" % tfile if False: # done with walk function switching if stayTop == True: # cheap way to not recurse. break; print "" return dirdict
def testNside(lower, upper): """ Test different Nsides resolutions of the maps, see what is best. Input: - lower, int-scalar, - upper, int-scalar, Both must follow 2**p. """ if lower == False and upper == False: print('No input! Need lower and upper power of 2, in Nside=2**p') sys.exit() elif lower == False: print('No lower limit, type in first argument. Nside=2**p') sys.exit() elif upper == False: print('No upper limit, type in second argument. Nside=2**p') sys.exit() else: pass if lower < 0 or upper < 0: print('Power must be positive') sys.exit() elif lower >= 12 or upper > 12: print( 'Power too high, max p=12. Will use too long time to generate maps' ) else: pass savepath = '' f1 = 'RightAscension.h5' f2 = 'Declination.h5' for p in range(lower, upper + 1): t1 = time.time() Ns = 2**p print('Nside={}'.format(Ns)) # check if files exist: fil = Path("SPC_Nside_{}.h5".format(Ns)) print(fil) if fil.exists(): # read the pixel file: print('Read coordinate file for Nside = {}'.format(Ns)) f = h5py.File(fil, 'r') pixpos = np.asarray(f['Healpix coordinates']) f.close() else: print('Create coordinate file for Nside = {}'.format(Ns)) pixpos = ad.PixelCoord(Ns, savepath + 'RightAscension.h5', savepath + 'Declination.h5', savepath) Npix = hp.nside2npix(Ns) map, b = Tools.Map(pixpos, Npix) # plot hp.mollview(map, coord=['C', 'G'], nest=False, title='All stars with Nside={}'.format(Ns), unit='N_stars') plt.savefig('Figures2/AllStars_diff_Nside{}.png'.format(Ns)) t2 = time.time() print('Time used for Nside {} is: {}'.format(Ns, t2 - t1)) print('_______________') ##### plt.show()
def typewalk(self, directory = ".", only = "all", pheads = None, showinfo = False, onlyStatus = False, onlyTypology = False, # generic descriptors interface showDescriptors = None, # string of comma separated descriptor names (function names!) filemask = None, showCals = False, incolog = True, stayTop = False, recipe = None, raiseExcept = False, where = None, batchnum = None, opti = None): """ Recursively walk a given directory and put type information to stdout """ global verbose global debug global batchno if batchnum != None: batchno = batchnum if raiseExcept: from astrodata.debugmodes import set_descriptor_throw set_descriptor_throw(True) onlylist = only.split(",") if (verbose): print "onlylist:",repr(onlylist) verbose = False ldebug = False dirnum = 0 if stayTop == True: walkfunc = shallow_walk if opti: print "Doing a shallow walk" else: walkfunc = os.walk if opti: print "Doing an os.walk" for root,dirn,files in walkfunc(directory): verbose = False if opti: print "Analyzing:", root dirnum += 1 if (verbose) : print "DS90:",root,dirn,files #print "root:", root #print "dirn:", dirn #if verbose: # print "DS92:",root, repr(dirn), repr(file) if (".svn" not in root): width = 10 ## !!!!! ## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!! ## !!!!! fullroot = os.path.abspath(root) if verbose: print 'DS91:',fullroot if root == ".": rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}" else: rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}" firstfile = True for tfile in files: # we have considered removing this check in place of a # pyfits open but that was not needed, the pyfits open # is down lower, this is just to avoid checking files # that are not named correctly to be FITS, so why check them? # especially on a command recursing directories and potentially # looking at a lot of files. if filemask == None: # @@NAMING: fits file mask for typewalk mask = r".*?\.(fits|FITS)$" else: mask = filemask try: matched = re.match(mask, tfile) except: print "BAD FILEMASK (must be a valid regular expression):", mask return str(sys.exc_info()[1]) if (re.match(mask, tfile)) : if (ldebug) : print "FITS:", tfile fname = os.path.join(root, tfile) try: # NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE # fl is the astrodata instance of tfile/fname fl = AstroData(fname) # # NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE except KeyboardInterrupt: raise except: mes = "Could not open file: %s as AstroData" % fname print mes # raise Errors.AstroDataError(mes) continue gain = 0 stringway = False if (stringway): if (onlyTypology == onlyStatus): dtypes = self.classification_library.discover_types(fname) elif (onlyTypology): dtypes = self.classification_library.discover_typology(fname) elif (onlyStatus): dtypes = self.classification_library.discover_status(fname) else: # this is the AstroData Class way # to ask the file itself if (onlyTypology == onlyStatus): dtypes = fl.discover_types() elif (onlyTypology): dtypes = fl.discover_typology() elif (onlyStatus): dtypes = fl.discover_status() if verbose: print "DS130:", repr(dtypes) # print "after classification" if (dtypes != None) and (len(dtypes)>0): #check to see if only is set #only check for given type found = False if (only == "all"): found=True else: # note: only can be split this way with no worry about # whitespace because it's from the commandline, no whitespace # allowed in that argument, just "," as a separator ol = only.split(",") # print ol found = False for tpname in dtypes: if (verbose): print "DS148", " in ", repr(ol), if (tpname in ol): found = True break if (verbose): print "yes, found = ", str(found) if (found == True): if where != None: # let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes cleanwhere = re.sub("_"," ", where) ad = fl try: found = eval(cleanwhere) except: print "can't execute where:\n\t" + where + "\n\t" +cleanwhere print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info()) sys.exit(1) if (found != True): continue if (firstfile == True): print rootln firstfile = False # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !!!!PRINTING OUT THE FILE AND TYPE INFO!!!! # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! indent = 5 pwid = 40 fwid = pwid - indent # print start of string #print "DS270:", len(tfile) while len(tfile)>= fwid-1: if False: part = tfile[:fwid] print " ${BG_WHITE}%s${NORMAL}" % part tfile = tfile[fwid-1:] else: print " ${BG_WHITE}%s${NORMAL}" % tfile tfile = "" if len(tfile)>0: prlin = " %s " % tfile prlincolor = " ${BG_WHITE}%s${NORMAL} " % tfile else: prlin = " " prlincolor = " " empty = " "*indent + "."*fwid fwid = pwid+indent lp = len(prlin) nsp = pwid - ( lp % pwid ) # print out indent, filename, and "..." to justify types area" # there is a way to do with with a comprehension? print prlincolor+("."*nsp)+"${NORMAL}", # print dtypes tstr = "" termsize = terminal.getTerminalSize() maxlen = termsize[0] - pwid -1 printed = False dtypes.sort() for dtype in dtypes: if (dtype != None): newtype = "(%s) " % dtype else: newtype = "(Unknown) " # print "(%s)N20091027S0133.fits" % dtype , astr = tstr + newtype if len(astr) >= maxlen: print "${BLUE}"+ tstr + "${NORMAL}" tstr = newtype print empty, else: tstr = astr if tstr != "": print "${BLUE}"+ tstr + "${NORMAL}" tstr = "" astr = "" printed = True # new line at the end of the output # print "" if (showinfo == True): print "-"*40 print "AstroData.info():" fl.info() print "-"*40 print "pyfits.info():" fl.hdulist.info() print "-"*40 #hlist = pyfits.open(fname) #hlist.info() #hlist.close() # print descriptors # show descriptors if (showDescriptors != None): sdl = showDescriptors.split(",") if verbose: print "DS320:", repr(sdl) # print ol # get maxlen if "err" in sdl: errOnly = True sdl.remove("err") else: errOnly = False maxlen = 0 for sd in sdl: maxlen = max(len(sd),maxlen) for sd in sdl: #print "DS242:", sd try: if "(" not in sd: dval = eval("fl."+sd+"(asList=True)") else: #print "DS333:", repr(sd) dval = eval("fl."+sd) pad = " " * (maxlen - len(sd)) sd = str(sd) + pad if dval: if (not errOnly): print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval)) else: print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}returned None${NORMAL}' % (sd) except AttributeError: exinfo = sys.exc_info() print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}NO SUCH DESCRIPTOR${NORMAL}' % (sd) #if raiseExcept: # raise except KeyboardInterrupt: raise except: # pad = " " * (maxlen - len(sd)) # sd = str(sd) + pad exinfo = sys.exc_info() print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}%s${NORMAL}' % (sd, repr(exinfo[1]).strip()) #if raiseExcept: # raise # if phead then there are headers to print per file if (pheads != None): #print " -----------"sys.exec print " ${UNDERLINE}PHU Headers${NORMAL}" #print " -----------" #print "pheads", pheads hlist = pyfits.open(fname) pheaders = pheads.split(",") for headkey in pheaders: #if in phu, this is the code try: print " %s = (%s)" % (headkey, hlist[0].header[headkey]) except KeyError: print " %s not present in PHU of %s" % (headkey, tfile) hlist.close() if (showCals == True): from astrodata.adutils.adccutils.calutil import localCalibrationSearch from astrodata.adutils.adccutils.calutil import geminiCalibrationSearch calurls = localCalibrationSearch(fl) print " ${BOLD}Local Calibration Search${NORMAL}" if calurls != None: for caltyp in calurls.keys(): print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp]) else: print " ${RED}No Calibrations Found${NORMAL}" calurls = geminiCalibrationSearch(fl) print " ${BOLD}Gemini Calibration Search${NORMAL}" if calurls != None: for caltyp in calurls.keys(): print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp]) else: print " ${RED}No Calibrations Found${NORMAL}" if (recipe): banner = ' Running Recipe "%s" on %s ' % (recipe, fname) print "${REVERSE}${RED}" + " "*len(banner) print banner print " "*len(banner)+"${NORMAL}" if recipe == "default": rs = "" else: rs = "-r %s" % recipe subprocess.call("reduce %s %s" % (rs, fname), shell=True) else: if (verbose) : print "%s is not a FITS file" % tfile if False: # done with walk function switching if stayTop == True: # cheap way to not recurse. break;