def photzpmain(): args = parseCommandLine() if args.site is not None: sites = [site for site in args.site.split(',')] else: sites = ('lsc', 'cpt', 'ogg', 'coj', 'tfn', 'elp') print('DATES: ', args.date) for date in args.date: _logger.info("Processing DAY-OBS {}".format(date)) if args.cameratype is not None: # crawl by camera type cameratypes = [x for x in args.cameratype.split(',')] for site in sites: for cameratype in cameratypes: inputlist = es_aws_imagefinder.get_frames_for_photometry(date, site, cameratype=cameratype, mintexp=args.mintexp) if inputlist is None: _logger.info("None list returned for date {}. Nothing to do here.".format(date)) continue imagedb = photdbinterface(args.imagedbPrefix) _logger.info("Processing image list N={} for type {} at site {} for date {}".format(len(inputlist), cameratype, site, date)) process_imagelist(inputlist, imagedb, args) imagedb.close() elif args.camera is not None: # crawl for a specific camera inputlist = es_aws_imagefinder.get_frames_for_photometry(date, site=None, camera=args.camera, mintexp=args.mintexp) if inputlist is None: _logger.info("None list returned for date {}. Nothing to do here.".format(date)) continue _logger.info( "Processing image list N={} for camera {} at for date {}".format(len(inputlist), args.camera, date)) imagedb = photdbinterface(args.imagedbPrefix) process_imagelist(inputlist, imagedb, args) imagedb.close() else: print("Need to specify either a camera, or a camera type.") if args.crawldirectory is not None: # Crawl files in a local directory print(f"Not tested {args.crawldirectory}") redlevel = "91" if not args.fromraw else "00" inputlist = (glob.glob(f"{args.crawldirectory}/*[es]{redlevel}.fits.fz")) inputlist = Table([inputlist, [-1] * len(inputlist)], names=['filename', 'frameid']) imagedb = photdbinterface("sqlite:///%s/%s" % (args.crawldirectory, 'imagezp.db')) process_imagelist(inputlist, imagedb, args, rewritetoarchivename=False) imagedb.close() sys.exit(0)
def longtermphotzp(): plt.style.use('ggplot') matplotlib.rcParams['savefig.dpi'] = 300 matplotlib.rcParams['figure.figsize'] = (8.0, 6.0) filenames = [] args = parseCommandLine() if args.site is not None: crawlsites = [args.site, ] else: crawlsites = telescopedict if args.pertelescopeplots: db = photdbinterface(args.database) for site in crawlsites: if args.telescope is None: crawlScopes = telescopedict[site] else: crawlScopes = [args.telescope, ] for telescope in crawlScopes: _logger.info( "Now plotting and fitting mirror model for %s %s in filter %s" % (site, telescope, args.filter)) result = plotlongtermtrend(site, telescope, args.filter, args, cacheddb=db) if result is not None: filenames += result db.close() # Generate mirror model plots for all telscopes in a single plot if args.createsummaryplots: filenames += plotallmirrormodels(args, type=['2m0', '1m0']) filenames += plotallmirrormodels(args, type=['0m4'], range=[20, 23]) # Make a fancy HTML page if args.renderhtml: renderHTMLPage(args, filenames) sys.exit(0)
def getCombineddataByTelescope(site, telescope, context, instrument=None, cacheddb=None): """ Concatenate all zeropoint data for a site, and select by telescope and instrument. :param site: :param telescope: string slecting dome *& telescope: 'domb:1m0a' :param context: :param instrument: :return: concatenated data for a site / tel / isntrument selection. """ if cacheddb is None: db = photdbinterface(context.database) else: db = cacheddb _logger.debug("Getting all photometry data for %s %s %s" % (site, telescope, instrument)) dome, tel = telescope.split("-") results = db.readRecords(site, dome, tel, instrument) if cacheddb is None: db.close() return results
def plotallmirrormodels(context, type=['2m0a', '1m0a'], range=[22.5, 25.5], cacheddb=None): ''' Fetch mirror model from database for a selected class of telescopes, and put them all into one single plot. Returns a list of figure names (S3 keys) ''' filenames = [] if cacheddb is None: db = photdbinterface(context.database) else: db = cacheddb myfilter = context.filter modellist = [] for t in type: modellist.extend(db.findmirrormodels(t, myfilter)) plot_referencethoughput(starttime, endtime, myfilter, t[0:3]) modellist.sort(key=lambda x: x[0:3] + x[-1:-5].replace('2', '0') + x[4:8]) _logger.info("Plotting several models in a single plot. These are the models returned from search %s: %s" % ( type, modellist)) plt.rc('lines', linewidth=1) prop_cycle = cycle(['-', '-.']) for model in modellist: _logger.debug("Plotting mirror model %s" % model) data = db.readmirrormodel(model, myfilter) plt.gcf().autofmt_xdate() plt.plot(data['dateobs'], data['zp'], next(prop_cycle), label=model.replace('-', ':'), ) plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', ncol=1) plt.xlabel('DATE-OBS') plt.ylabel("phot zeropoint %s" % myfilter) dateformat(starttime, endtime) plt.ylim(range) plt.title("Photometric zeropoint model in filter %s" % myfilter) plt.grid(True, which='both') name = "" for ii in type: name += str(ii) with io.BytesIO() as fileobj: # create the plot into an in-memory Fileobj plt.gcf().set_size_inches(12, 6) plt.savefig(fileobj, format='png', bbox_inches='tight') plt.close() # save the plot onto stable storage filename = 'allmodels_{}_{}.png'.format(name, context.filter) filenames.append(filename) write_to_storage_backend(context.imagedbPrefix, filename, fileobj.getvalue()) if cacheddb is None: db.close() return filenames
def plotlongtermtrend(select_site, select_telescope, select_filter, context, instrument=None, cacheddb=None): filenames = [] data = getCombineddataByTelescope(select_site, select_telescope, context, instrument, cacheddb=cacheddb) mystarttime = starttime # if (select_site == 'elp') and (select_telescope=='doma-1m0a'): # mystarttime = datetime.datetime(2014, 1, 1) if data is None: return # down-select data by viability and camera / filer combination selection = np.ones(len(data['name']), dtype=bool) if select_filter is not None: if (select_filter == 'zp') or (select_filter == 'zs'): selection = selection & ( (data['filter'] == 'zs') | (data['filter'] == 'zp')) else: selection = selection & (data['filter'] == select_filter) if instrument is not None: selection = selection & (data['camera'] == instrument) # weed out bad data selection = selection & np.logical_not(np.isnan(data['zp'])) selection = selection & np.logical_not(np.isnan(data['airmass'])) if len(selection) == 0: _logger.warning("No data points left after down selection. Not wasting time on empty plots.") return zpselect = data['zp'][selection] dateselect = data['dateobs'][selection] airmasselect = data['airmass'][selection] cameraselect = data['camera'][selection] zpsigselect = data['zpsig'][selection] ymax = 25.5 # good starting point for 2m:spectral cameras photzpmaxnoise = 0.2 if select_telescope is not None: if '0m4' in select_telescope: # 0.4m sbigs ymax = 22.5 photzpmaxnoise = 0.5 # Calculate air-mass corrected photometric zeropoint; corrected to airmass of 1 zp_air = zpselect + airmasscorrection[select_filter] * airmasselect - airmasscorrection[select_filter] # find the overall trend of zeropoint variations, save to output file. if len(dateselect[zpsigselect < photzpmaxnoise]) > 0: _x, _y = findUpperEnvelope(dateselect[zpsigselect < photzpmaxnoise], zp_air[zpsigselect < photzpmaxnoise], ymax=ymax) db = photdbinterface(context.database) if cacheddb is None else cacheddb db.storemirrormodel("%s-%s" % (select_site, select_telescope), select_filter, _x, _y) if cacheddb is None: db.close() else: _x = None _y = None # now we are starting to plot stuff plt.figure() plot_referencethoughput(mystarttime, endtime, select_filter, select_telescope[-4:-1]) # mark mirror cleaning events. for telid in telescopecleaning: _site, _enc, _tel = telid.split("-") if (_site == select_site) and (select_telescope == '%s-%s' % (_enc, _tel)): for event in telescopecleaning[telid]: plt.axvline(x=event, color='grey', linestyle=':') for telid in mirrorreplacmenet: _site, _enc, _tel = telid.split("-") if (_site == select_site) and (select_telescope == '%s-%s' % (_enc, _tel)): for event in mirrorreplacmenet[telid]: plt.axvline(x=event, color='orange', linestyle='--') # plot all the zeropoint measurements, but label different cameras differently. uniquecameras = np.unique(cameraselect) for uc in uniquecameras: plt.plot(dateselect[(zpsigselect <= photzpmaxnoise) & (cameraselect == uc)], zp_air[(zpsigselect <= photzpmaxnoise) & (cameraselect == uc)], 'o', markersize=2, label=uc) plt.plot(dateselect[zpsigselect > photzpmaxnoise], zp_air[zpsigselect > photzpmaxnoise], '.', markersize=1, c="grey", label='_nolegend_') if _x is not None: plt.plot(_x, _y, "-", c='red', label='upper envelope') for telid in mirrorreplacmenet: _site, _enc, _tel = telid.split("-") if (_site == select_site) and (select_telescope == '%s-%s' % (_enc, _tel)): events = mirrorreplacmenet[telid] events.append(datetime.datetime.utcnow()) print(events) for ii in range(len(events) - 1): start = mirrorreplacmenet[telid][ii] end = mirrorreplacmenet[telid][ii + 1] fittrendtomirrormodel(_x, _y, start, end, plot=True) else: _logger.warning("Mirror model failed to compute. not plotting !") # prettify, decorations, etc plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylim([ymax - 4.5, ymax]) dateformat(mystarttime, endtime) plt.xlabel("DATE-OBS") plt.ylabel("Photometric Zeropoint %s" % select_filter) plt.title("Long term throughput %s:%s in %s" % (select_site, select_telescope, select_filter)) plt.gcf().set_size_inches(12, 6) # and finally safe the plot. with io.BytesIO() as fileobj: plt.savefig(fileobj, format='png', bbox_inches='tight') plt.close() filename = 'photzptrend-{}-{}-{}.png'.format(select_site, select_telescope, select_filter) filenames.append(filename) write_to_storage_backend(context.imagedbPrefix, filename, fileobj.getvalue()) # for internal use: generate error plots. if context.errorhistogram: plt.figure() plt.hist(zpsigselect, 50, range=[0, 1], normed=True) with io.BytesIO() as fileobj: plt.savefig(fileobj, format='png') plt.close() filename = 'errorhist-%s-%s-%s.png'.format(select_site, select_telescope, select_filter) filenames.append(filename) write_to_storage_backend(context.imagedbPrefix, filename, fileobj.getvalue()) # plot airmass vs zeropoint as a sanity check tool plt.figure() plt.plot(airmasselect, zpselect, ".", c="grey") plt.plot(airmasselect, zp_air, ".", c="blue") plt.xlabel("Airmass") plt.ylabel("Photomertic Zeropoint %s" % select_filter) plt.title("Global airmass trend and correction check") meanzp = np.nanmedian(zpselect) if math.isfinite(meanzp): plt.ylim([meanzp - 0.5, meanzp + 0.5]) else: plt.ylim ([20,26]) with io.BytesIO() as fileobj: plt.savefig(fileobj, format='png') plt.close() filename = 'airmasstrend-{}-{}-{}.png'.format(select_site, select_telescope, select_filter) filenames.append(filename) write_to_storage_backend(context.imagedbPrefix, filename, fileobj.getvalue()) # Color terms plt.figure() selection = selection & np.logical_not(np.isnan(data['colorterm'])) selection = selection & (np.abs(data['colorterm']) < 0.3) selection_lonoise = selection & (data['zpsig'] < 0.2) selection_hinoise = selection & (data['zpsig'] >= 0.2) plt.plot(data['dateobs'][selection_hinoise], data['colorterm'][ selection_hinoise], '.', markersize=2, c="grey", label="color term [ hi sigma] %s " % select_filter) colortermselect = data['colorterm'][selection_lonoise] dateselect = data['dateobs'][selection_lonoise] meancolorterm = np.median(colortermselect) plt.plot(dateselect, colortermselect, 'o', markersize=2, c="blue", label="color term [low sigma] %s " % select_filter) plt.axhline(y=meancolorterm, color='r', linestyle='-') _logger.info("Color term in filter %s : % 5.3f" % (select_filter, meancolorterm)) # store the color terms if select_filter not in colorterms: colorterms[select_filter] = {} colorterms[select_filter][instrument] = meancolorterm dateformat(mystarttime, endtime) plt.ylim([-0.2, 0.2]) plt.title("Color term (g-r) %s:%s in %s" % (select_site, select_telescope, select_filter)) plt.xlabel("DATE-OBS") plt.ylabel("Color term coefficient (g-r)") with io.BytesIO() as fileobj: plt.savefig(fileobj, format='png') plt.close() filename = 'colortermtrend-{}-{}-{}.png'.format(select_site, select_telescope, select_filter) filenames.append(filename) write_to_storage_backend(context.imagedbPrefix, filename, fileobj.getvalue()) # thats it, some day please refactor(select_filter, this into smaller chunks. return filenames
return args def divide_chunks(l, n): # looping till length l for i in range(0, len(l), n): yield l[i:i + n] if __name__ == '__main__': args = parseCommandLine() start = 435 print(f"Copy from {args.inputurl} -> {args.outputurl}") input = photdbinterface(args.inputurl) output = photdbinterface(args.outputurl) q = input.session.query(PhotZPMeasurement) print("Found {} records to copy.".format(q.count())) newdata = [PhotZPMeasurement(e) for e in q.all()] print("Now doing bulk insert in chunks") chunks = list(divide_chunks(newdata, 1000)) print(f'Divided data ino {len(chunks)} chunks') for ii in range(start, len(chunks)): print(f'Start write chunk {ii} at {datetime.datetime.utcnow()}') output.session.bulk_save_objects(chunks[ii]) output.session.commit() print(f'Done write chunk {ii} at {datetime.datetime.utcnow()}')