def Compare(filename): '''For a downloaded PAIRITEL quick reduced file, compare the location that the telescope thinks it is at to the actual location determined by astrometry.net. ''' # Load file header info try: header = pyfits.getheader(filename) except: print 'Cannot obtain header info. Exiting' return # Grab telescope pointing RA and Dec from header # in the quickreduced file - RA and DEC are overwritten by some # untrustworthy wcs fit attempt. Instead, take the RAS and DECS and # Convert by using q.sex2dec. Verified from raw file that this is the # same that the RA, DEC used to be try: point_ra_s = header['RAS'] point_dec_s = header['DECS'] except: print 'File does not contain RAS and DECS (pointing) keywords.' return point_pos = sex2dec((point_ra_s,point_dec_s)) point_ra = point_pos[0] point_dec = point_pos[1] try: anet_id = header['AN_JOBID'] except: print "Astrometry.net failed to solve the field. You're on your own." return # Obtain RA and Dec of center pixel? from astrometry.net info # Use xy2sky from wcstools. -d option outputs in decimal degrees. xy2sky_command = "xy2sky -n 6 -d %s 128 128" % (filename) try: (a,b,c) = os.popen3(xy2sky_command) except: print "Cannot run xy2sky. Make sure wcstools installed." return a.close() c.close() xy2sky_output=b.readlines() ref_radec = " ".join([x.split("J2000")[0] for x in xy2sky_output]) ref_xy = " ".join([x.split("J2000")[1] for x in xy2sky_output]) fit_ra = float(ref_radec.split(" ")[0]) fit_dec = float(ref_radec.split(" ")[1]) print "Telescope pointing: ", point_ra, point_dec print "Actual location: ", fit_ra, fit_dec dist_asec = sphere_dist(fit_ra,fit_dec,point_ra,point_dec) dist_amin = dist_asec/60.0 print "Distance: %f arcsecs" % (dist_asec) print "This is %f arcmin away; half the FOV is 4.25'." % (dist_amin)
def Compare(filename): '''For a downloaded PAIRITEL quick reduced file, compare the location that the telescope thinks it is at to the actual location determined by astrometry.net. ''' # Load file header info try: header = pyfits.getheader(filename) except: print 'Cannot obtain header info. Exiting' return # Grab telescope pointing RA and Dec from header # in the quickreduced file - RA and DEC are overwritten by some # untrustworthy wcs fit attempt. Instead, take the RAS and DECS and # Convert by using q.sex2dec. Verified from raw file that this is the # same that the RA, DEC used to be try: point_ra_s = header['RAS'] point_dec_s = header['DECS'] except: print 'File does not contain RAS and DECS (pointing) keywords.' return point_pos = sex2dec((point_ra_s, point_dec_s)) point_ra = point_pos[0] point_dec = point_pos[1] try: anet_id = header['AN_JOBID'] except: print "Astrometry.net failed to solve the field. You're on your own." return # Obtain RA and Dec of center pixel? from astrometry.net info # Use xy2sky from wcstools. -d option outputs in decimal degrees. xy2sky_command = "xy2sky -n 6 -d %s 128 128" % (filename) try: (a, b, c) = os.popen3(xy2sky_command) except: print "Cannot run xy2sky. Make sure wcstools installed." return a.close() c.close() xy2sky_output = b.readlines() ref_radec = " ".join([x.split("J2000")[0] for x in xy2sky_output]) ref_xy = " ".join([x.split("J2000")[1] for x in xy2sky_output]) fit_ra = float(ref_radec.split(" ")[0]) fit_dec = float(ref_radec.split(" ")[1]) print "Telescope pointing: ", point_ra, point_dec print "Actual location: ", fit_ra, fit_dec dist_asec = sphere_dist(fit_ra, fit_dec, point_ra, point_dec) dist_amin = dist_asec / 60.0 print "Distance: %f arcsecs" % (dist_asec) print "This is %f arcmin away; half the FOV is 4.25'." % (dist_amin)
def MakeFindingChart(ra=198.40130, dec=8.09730, uncertainty=1.8, src_name='GRB090313', pos_label='XRT', survey='dss2red', cont_str='Contact: Test', size=3.0, err_shape='cross', incl_scale=True, return_svg=False): '''if return_svg, actually return the svg text rather than saving it to file. Used for the online finding chart generator.''' fc = qImage() # define pixel scale from side size try: uncertainty = float(uncertainty) ra = float(ra) dec = float(dec) except: try: float_ra_dec = sex2dec((ra, dec)) uncertainty = float(uncertainty) ra = float(float_ra_dec[0]) dec = float(float_ra_dec[1]) except: raise ValueError('RA/Dec or uncertainty misformatted.') try: size = float(size) except: pass #assume we wanted auto if size: try: if size.upper() == 'AUTO' and uncertainty > 0: # Auto sets error circle size to ~10 pixels if not uncertainty < 1.0: side_size_arcmin = round(uncertainty / 10.0 / 60.0 * 600.0) else: side_size_arcmin = 1.0 else: print 'Invalid string for size; defaulting to 3 arcmin.' side_size_arcmin = 3.0 except (AttributeError): try: side_size_arcmin = float(size) except: print 'Invalid entry for size; defaulting to 3 arcmin' side_size_arcmin = 3.0 # There's a limit to how big an image you can request; set it here to 20' if side_size_arcmin > 20.0: side_size_arcmin = 20.0 side_size = side_size_arcmin * 60.0 # arcseconds img_size = 600 #pixels pixel_scale = side_size / img_size #arcsec/pixel if survey != 'sdss': fc.dss_grab(ra, dec, side_size_arcmin, survey) fc.invert_and_resize(img_size) else: fc.sdss_grab(ra, dec, pixel_scale, img_size) fc.invert_and_resize(img_size) if str(incl_scale).lower() == 'false' or str(incl_scale).lower() == 'no': suppress_scale = True else: suppress_scale = False if not return_svg: svgout = fc.overlay_finding_chart(ra, dec, uncertainty, src_name, pos_label, cont_str, err_shape, suppress_scale) outname = storepath + src_name + '_fc' outnamesvg = outname + '.svg' outnamepng = outname + '.png' f = open(outnamesvg, 'w') f.write(svgout) f.close() magickcommand = "convert %s %s" % (outnamesvg, outnamepng) try: os.system(magickcommand) except: print "Do not have Imagemagick, cannot convert svg to png" print storepath + outname + '*' outlist = glob.glob(outname + '*') return outlist if return_svg: return fc.overlay_finding_chart(ra, dec, uncertainty, src_name, pos_label, cont_str, err_shape, suppress_scale)
def _parse_psn_format(psn_string): ''' Columns 1-21: Designation (three-letter designation in columns 1-3 describes the type of variable: PSN = (possible) supernova; PNV = (possible) nova; TCP = some other type of variable (or unknown). Columns 25-39: Date in Universal Time (given as Year Month Date). Column 40: Note column (* = discovery observation; # or lower-case letters indicate follow-up observation, with # = single line, a = line 1 of multiple lines, b = line 2 of multiple lines, etc.). Columns 43-65: Postion (right ascension and declination, for equinox 2000.0), given to 0s.01 of R.A. and to 0".1 of Decl. Columns 68-71: magnitude of object at time specified (column 73 gives the bandpass, thus: U = unfiltered CCD; v = visual; V = CCD V-band; R = CCD R-band; etc.) Columns 75-79: offset of potential supernova in R.A. from presumed host galaxy, in arc seconds (maximum 9999), with column 79 the direction (E = east, W = west). Columns 80-84: offset of potential supernova in Decl. from presumed host galaxy, in arc seconds (maximum 9999), with column 79 the direction (N = north, S = south). Columns 87-95: "Locale", meaning the presumed host galaxy if a non-Milky-Way variable, or the 3-letter IAU constellation abbreviation if a Milky-Way variable. For presumed host galaxies, only use single upper-case letters in column 87, followed by several digits as appropriate, and only give galaxies for these catalogues, and in this order of usage: M = Messier; N = NGC; I = IC; U = UGC; G = MCG; P = PGC; E = ESO. Column 97: 1-digit character specifying either the experience of the discoverer (0 = no previous CBAT-confirmed discoveries, 1 = one previous CBAT-confirmed discovery, ..., 9 = nine or more previous CBAT-confirmed discoveries) or the discovery group (given as Roman letters, to be assigned on a case-by-case basis as groups request that they be given a letter code to note that their group made the discovery). A key to such letter codes is given here: B = Tom Boles (Coddenham, England) C = CHASE program (Cerro Tololo, Chile) D = Catalina Real-time Transient Survey H = Kamil Hornoch (Ondrejov Observatory, Czech Rep.) I = Koichi Itagaki (Yamagata, Japan) J = Brazilian Supernovae Search (Cristovao Jacques et al.) L = Lick Observatory Supernova Search M = Berto Monard (Pretoria, South Africa) N = Koichi Nishiyama and Fujio Kabashima (Japan) P = Tim Puckett's Supernova Search Program R = Guoyuo Sun and Jiangao Ruan (China) S = La Sagra Sky Survey (Spain) Column 99: 1-digit character to specify the number of separate nights (arc) that the discoverer has positive images of the discovered object (may include images by other observers that the discoverer knows of), with a dash (-) meaning a single image on a single night, a zero (0) meaning multiple images on a single night, a "1" indicating a one-day arc (i.e., two nights), ..., and a "9" indicating an arc of nine or more days. 1 2 3 4 5 6 7 8 9 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 Object Designation Date (UT) R.A. (2000.0) Decl. Mag. p Offset Locale D A PSN J15111485+4609115 2013 07 13.91 * 15 11 14.85 +46 09 11.5 17.8 U 15E 8N U9761 9 1 ''' prefix = psn_string[0:3] if prefix == 'PSN': obj_type = '(possible) supernova' elif prefix == 'PNV': obj_type = '(possible) nova' else: obj_type = 'unknown' designation = psn_string[4:21] date_string = psn_string[24:39] date_split = date_string.split('.') date_ymd = date_split[0] if len(date_split) == 2: fraction_of_day = int(date_string.split('.')[-1].strip())/100. elif len(date_split) == 1: fraction_of_day = 0 try: date_parsed = datetime.datetime.strptime(date_ymd,'%Y %m %d') + datetime.timedelta(fraction_of_day) except: date_parsed = 'ParseERROR' ra = psn_string[42:54] dec = psn_string[54:65] ra_deg, dec_deg = sex2dec((ra,dec)) mag = psn_string[67:71] filt = psn_string[72] if filt == 'U': filt = 'unfiltered' ra_offset_string = psn_string[74:79].strip() try: ra_offset_value = int(psn_string[74:78]) except: ra_offset_value = 'Unknown' ra_offset_direction = psn_string[78] dec_offset_string = psn_string[79:84].strip() try: dec_offset_value = int(psn_string[79:83]) except: dec_offset_value = 'Unknown' dec_offset_direction = psn_string[83] locale = psn_string[86:95] if locale.strip() == '': locale = 'UNKNOWN' discoverer_key = psn_string[96] disc_dict={ "B":"Tom Boles (Coddenham, England)", "C":"CHASE program (Cerro Tololo, Chile)", "D":"Catalina Real-time Transient Survey", "H":"Kamil Hornoch (Ondrejov Observatory, Czech Rep.)", "I":"Koichi Itagaki (Yamagata, Japan)", "J":"Brazilian Supernovae Search (Cristovao Jacques et al.)", "L":"Lick Observatory Supernova Search", "M":"Berto Monard (Pretoria, South Africa)", "N":"Koichi Nishiyama and Fujio Kabashima (Japan)", "P":"Tim Puckett's Supernova Search Program", "R":"Guoyuo Sun and Jiangao Ruan (China)", "S":"La Sagra Sky Survey (Spain)" } if discoverer_key in disc_dict.keys(): discoverer_string = disc_dict[discoverer_key] elif discoverer_key == '9': discoverer_string = 'Unknown observer with nine or more previous CBAT-confirmed discoveries' elif discoverer_key in '012345678': discoverer_string = 'Unknown observer with %s previous CBAT-confirmed discoveries' % (discoverer_key) else: discoverer_string = 'Unknown discoverer; cannot parse discoverer_key' arc_key = psn_string[98] if arc_key == '-': arc_string = 'A single image on a single night' elif arc_key == '0': arc_string = 'Multiple images on a single night' elif arc_key == '9': arc_string = 'An arc of nine or more days' elif arc_key in '12345678': arc_string = 'A %s day arc' % (arc_key) else: arc_string = 'Unknown arc; cannot parse arc_key' psn_dict = { 'ra':ra, 'dec':dec, 'ra_deg':ra_deg, 'dec_deg':dec_deg, 'prefix':prefix, 'obj_type':obj_type, 'designation':designation, 'date_string':date_string.strip(), 'date_parsed':date_parsed, 'mag':mag, 'filter':filt, 'locale':locale, 'discoverer':discoverer_string, 'arc':arc_string, 'psn_string':psn_string, 'dec_offset':dec_offset_string, 'ra_offset':ra_offset_string } return psn_dict
def MakeFindingChart(ra=198.40130,dec=8.09730,uncertainty=1.8,src_name='GRB090313',pos_label='XRT',survey='dss2red',cont_str='Contact: Test', size=3.0,err_shape='cross',incl_scale=True,return_svg=False): '''if return_svg, actually return the svg text rather than saving it to file. Used for the online finding chart generator.''' fc = qImage() # define pixel scale from side size try: uncertainty = float(uncertainty) ra = float(ra) dec = float(dec) except: try: float_ra_dec = sex2dec((ra,dec)) uncertainty = float(uncertainty) ra=float(float_ra_dec[0]) dec=float(float_ra_dec[1]) except: raise ValueError('RA/Dec or uncertainty misformatted.') try: size = float(size) except: pass #assume we wanted auto if size: try: if size.upper() == 'AUTO' and uncertainty > 0: # Auto sets error circle size to ~10 pixels if not uncertainty < 1.0: side_size_arcmin = round(uncertainty/10.0/60.0*600.0) else: side_size_arcmin = 1.0 else: print 'Invalid string for size; defaulting to 3 arcmin.' side_size_arcmin = 3.0 except(AttributeError): try: side_size_arcmin = float(size) except: print 'Invalid entry for size; defaulting to 3 arcmin' side_size_arcmin = 3.0 # There's a limit to how big an image you can request; set it here to 20' if side_size_arcmin > 20.0: side_size_arcmin = 20.0 side_size = side_size_arcmin * 60.0 # arcseconds img_size = 600 #pixels pixel_scale = side_size/img_size #arcsec/pixel if survey != 'sdss': fc.dss_grab(ra,dec,side_size_arcmin,survey) fc.invert_and_resize(img_size) else: fc.sdss_grab(ra,dec,pixel_scale,img_size) fc.invert_and_resize(img_size) if str(incl_scale).lower()=='false' or str(incl_scale).lower()=='no': suppress_scale=True else: suppress_scale=False if not return_svg: svgout = fc.overlay_finding_chart(ra,dec,uncertainty,src_name,pos_label,cont_str,err_shape,suppress_scale) outname = storepath+src_name+'_fc' outnamesvg = outname+'.svg' outnamepng = outname+'.png' f=open(outnamesvg,'w') f.write(svgout) f.close() magickcommand = "convert %s %s" % (outnamesvg, outnamepng) try: os.system(magickcommand) except: print "Do not have Imagemagick, cannot convert svg to png" print storepath+outname+'*' outlist = glob.glob(outname+'*') return outlist if return_svg: return fc.overlay_finding_chart(ra,dec,uncertainty,src_name,pos_label,cont_str,err_shape,suppress_scale)
def parseswiftcat(swiftcat=loadpath + 'grb_table_current.txt'): # Read a tab delimited file print "Opening %s" % swiftcat bork = csv.reader(open(swiftcat), delimiter='\t') borklist = [] for row in bork: borklist.append(row) # This creates a list of all the GRBs # Check that there are the right number of entries in each list: # for entries in bork: # print len(entries) # all should come out as the same number # Format: # GRB, T90, Fluence, PeakFlux, XRT_RA, XRT_Dec, XRT_Column, UVOT V Mag, # UVOT Other Mags, Redshift grbdict = {} # Now go through the list of objects read in and put them into a dictionary for grbs in borklist: if len(grbs) >= 30 and grbs[0][0:3] != 'GRB': subdict={grbs[0]:{'burst_time_str':grbs[1],'triggerid_str':grbs[2],'t90_str':grbs[6],'fluence_str':grbs[7], 'peakflux_str':grbs[9], \ 'xrt_ra_str':grbs[13], 'xrt_dec_str':grbs[14], 'xrt_time_delta_str':grbs[16], 'xrt_column_str':grbs[21], \ 'uvot_time_delta_str':grbs[25], 'v_mag_str':grbs[26], 'uvot_list':grbs[27], 'z_str':grbs[29]}} grbdict.update(subdict) elif grbs[0][0:3] != 'GRB': print "line length is not what is expected for line:" print grbs print "Expected length of 34 (at least 31), got %i. Not including" % len( grbs) else: pass # Update the dictonary to parse the crap and make it better for entry in grbdict.keys(): print entry try: z_str = grbdict[entry]['z_str'] except: print grbdict[entry] sys.exit(1) # Make XRT RA, Dec into decimal degree tuple. This will create a tuple # keyword called 'xrt_pos' which is in decimal degrees. sex_pos_tup = (grbdict[entry]['xrt_ra_str'], grbdict[entry]['xrt_dec_str']) if sex_pos_tup[0] != 'TBD' and sex_pos_tup[0] != 'n/a': xrt_pos = {'xrt_pos': sex2dec(sex_pos_tup)} grbdict[entry].update(xrt_pos) else: print 'COULD NOT PARSE XRT_POS for entry %s' % entry # TODO: Convert into a distance above the galactic plane # Convert time_deltas to a float try: # Try to convert uvot_time_delta to a float uvot_time_delta = { 'uvot_time_delta': float(grbdict[entry]['uvot_time_delta_str']) } grbdict[entry].update(uvot_time_delta) except: print 'COULD NOT PARSE UVOT_TIME_DELTA for entry %s' % entry # try: # Try to convert xrt_time_delta to a float xrt_time_delta = { 'xrt_time_delta': float(grbdict[entry]['xrt_time_delta_str']) } grbdict[entry].update(xrt_time_delta) except: print 'COULD NOT PARSE XRT_TIME_DELTA for entry %s' % entry # Convert T90 to float try: # Try to convert t90 to a float t90 = {'t90': float(grbdict[entry]['t90_str'])} grbdict[entry].update(t90) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['t90_str'][0] == '~': print 'CONVERTING APPROXIMATE t90 TO ABSOLUTE for %s' % entry if grbdict[entry]['t90_str'][0] == '>': print 'CONVERTING LOWER LIMIT t90 TO ABSOLUTE for %s' % entry t90 = {'t90': float(grbdict[entry]['t90_str'][1:])} grbdict[entry].update(t90) except: print 'COULD NOT PARSE T90 for entry %s' % entry # Convert fluence to a float try: # Try to convert fluence to a float fluence = {'fluence': float(grbdict[entry]['fluence_str'])} grbdict[entry].update(fluence) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['fluence_str'][0] == '~': print 'CONVERTING APPROXIMATE fluence TO ABSOLUTE for %s' % entry if grbdict[entry]['fluence_str'][0] == '>': print 'CONVERTING LOWER LIMIT fluence TO ABSOLUTE for %s' % entry fluence = {'fluence': float(grbdict[entry]['fluence_str'][1:])} grbdict[entry].update(fluence) except: print 'COULD NOT PARSE fluence for entry %s' % entry # Convert peakflux to a float try: # Try to convert peakflux to a float peakflux = {'peakflux': float(grbdict[entry]['peakflux_str'])} grbdict[entry].update(peakflux) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['peakflux_str'][0] == '~': print 'CONVERTING APPROXIMATE peakflux TO ABSOLUTE for %s' % entry if grbdict[entry]['peakflux_str'][0] == '>': print 'CONVERTING LOWER LIMIT peakflux TO ABSOLUTE for %s' % entry peakflux = { 'peakflux': float(grbdict[entry]['peakflux_str'][1:]) } grbdict[entry].update(peakflux) except: print 'COULD NOT PARSE peakflux for entry %s' % entry # Convert xrt_column to a float try: # Try to convert xrt_column to a float xrt_column = { 'xrt_column': float(grbdict[entry]['xrt_column_str']) } grbdict[entry].update(xrt_column) except: try: # If that didn't work, assume that it starts with a ~ or < and try again if grbdict[entry]['xrt_column_str'][0] == '~': print 'CONVERTING APPROXIMATE xrt_column TO ABSOLUTE for %s' % entry if grbdict[entry]['xrt_column_str'][0] == '<': print 'CONVERTING UPPER LIMIT xrt_column TO ABSOLUTE for %s' % entry xrt_column = { 'xrt_column': float(grbdict[entry]['xrt_column_str'][1:]) } grbdict[entry].update(xrt_column) except: print 'COULD NOT PARSE xrt_column for entry %s' % entry # Convert UVOT_V_Mag to float & check if its upper limit # *** Maybe include time to observation, and if not within a certain # time, then don't include it in the training set. *** if grbdict[entry]['v_mag_str'][0] == 'V': if grbdict[entry]['v_mag_str'][1] == '>': v_mag_isupper = {'v_mag_isupper': 'yes'} grbdict[entry].update(v_mag_isupper) elif grbdict[entry]['v_mag_str'][1] == '=': v_mag_isupper = {'v_mag_isupper': 'no'} grbdict[entry].update(v_mag_isupper) elif grbdict[entry]['v_mag_str'][1] == '<': v_mag_isupper = {'v_mag_isupper': 'no'} grbdict[entry].update(v_mag_isupper) print 'We have a LOWER limit for v_mag for grb %s' % entry else: print 'COULD NOT PARSE v_mag_str for entry %s. Starts with V though.' % entry else: print 'COULD NOT PARSE v_mag_str for entry %s' % entry # Convert Other UVOT magnitudes to a list and check if upper limit # *** Maybe include time to observation, and if not within a certain # time, then don't include it in the training set. *** uvot_split = grbdict[entry]['uvot_list'].split('|') for uvot_ent in uvot_split: # Loop through each entry # Looks like someone typoed in the catalog and put in UWM2 instead of UVM2 for 3 entries if uvot_ent.find('U') != -1 and uvot_ent.find( 'UV') == -1 and uvot_ent.find('UW') == -1: if uvot_ent.find('U>') != -1: u_mag_isupper = {'u_mag_isupper': 'yes'} grbdict[entry].update(u_mag_isupper) elif uvot_ent.find('U=') != -1: u_mag_isupper = {'u_mag_isupper': 'no'} grbdict[entry].update(u_mag_isupper) elif uvot_ent.find('U<') != -1: u_mag_isupper = {'u_mag_isupper': 'no'} grbdict[entry].update(u_mag_isupper) print 'We have a LOWER limit for u_mag for grb %s' % entry else: print 'COULD NOT PARSE u_mag for entry %s. Starts with U though.' % entry if uvot_ent.find('B') != -1 and uvot_ent.find('TBD') == -1: if uvot_ent.find('B>') != -1: b_mag_isupper = {'b_mag_isupper': 'yes'} grbdict[entry].update(b_mag_isupper) elif uvot_ent.find('B=') != -1: b_mag_isupper = {'b_mag_isupper': 'no'} grbdict[entry].update(b_mag_isupper) elif uvot_ent.find('B<') != -1: b_mag_isupper = {'b_mag_isupper': 'no'} grbdict[entry].update(b_mag_isupper) print 'We have a LOWER limit for b_mag for grb %s' % entry else: print 'COULD NOT PARSE b_mag for entry %s. Starts with B though.' % entry if uvot_ent.find('W1') != -1: if uvot_ent.find('W1>') != -1: w1_mag_isupper = {'w1_mag_isupper': 'yes'} grbdict[entry].update(w1_mag_isupper) elif uvot_ent.find('W1=') != -1: w1_mag_isupper = {'w1_mag_isupper': 'no'} grbdict[entry].update(w1_mag_isupper) elif uvot_ent.find('W1<') != -1: w1_mag_isupper = {'w1_mag_isupper': 'no'} grbdict[entry].update(w1_mag_isupper) print 'We have a LOWER limit for w1_mag for grb %s' % entry else: print 'COULD NOT PARSE w1_mag for entry %s. Starts with W1 though.' % entry if uvot_ent.find('W2') != -1: if uvot_ent.find('W2>') != -1: w2_mag_isupper = {'w2_mag_isupper': 'yes'} grbdict[entry].update(w2_mag_isupper) elif uvot_ent.find('W2=') != -1: w2_mag_isupper = {'w2_mag_isupper': 'no'} grbdict[entry].update(w2_mag_isupper) elif uvot_ent.find('W2<') != -1: w2_mag_isupper = {'w2_mag_isupper': 'no'} grbdict[entry].update(w2_mag_isupper) print 'We have a LOWER limit for w2_mag for grb %s' % entry else: print 'COULD NOT PARSE w2_mag for entry %s. Starts with W2 though.' % entry # Note 3 typos in catalog: UWM2 instead of UVM2. Ignore this by just searching for M2 if uvot_ent.find('M2') != -1: if uvot_ent.find('M2>') != -1: m2_mag_isupper = {'m2_mag_isupper': 'yes'} grbdict[entry].update(m2_mag_isupper) elif uvot_ent.find('M2=') != -1: m2_mag_isupper = {'m2_mag_isupper': 'no'} grbdict[entry].update(m2_mag_isupper) # One instance of typo: 'UVM2 =' instead of 'UVM2=' elif uvot_ent.find('M2 =') != -1: m2_mag_isupper = {'m2_mag_isupper': 'no'} grbdict[entry].update(m2_mag_isupper) elif uvot_ent.find('M2<') != -1: m2_mag_isupper = {'m2_mag_isupper': 'no'} grbdict[entry].update(m2_mag_isupper) print 'We have a LOWER limit for m2_mag for grb %s' % entry else: print 'COULD NOT PARSE m2_mag for entry %s. Starts with M2 though.' % entry if uvot_ent.find('White') != -1: if uvot_ent.find('White>') != -1: wh_mag_isupper = {'wh_mag_isupper': 'yes'} grbdict[entry].update(wh_mag_isupper) elif uvot_ent.find('White=') != -1: wh_mag_isupper = {'wh_mag_isupper': 'no'} grbdict[entry].update(wh_mag_isupper) # There was one instance of typo: + instead of = elif uvot_ent.find('White+') != -1: wh_mag_isupper = {'wh_mag_isupper': 'no'} grbdict[entry].update(wh_mag_isupper) elif uvot_ent.find('White<') != -1: wh_mag_isupper = {'wh_mag_isupper': 'no'} grbdict[entry].update(wh_mag_isupper) print 'We have a LOWER limit for wh_mag for grb %s' % entry else: print 'COULD NOT PARSE wh_mag for entry %s. Starts with White though.' % entry # Convert z to float and check if it is photometric # First we see if there's an absolute redshift available that's not # upper limit or approximate. Preferentially use abs or emis over photo. # First split up via the | parser that the table gives for different z entries # Make sure not a blank z_str if z_str != '': z_split = z_str.split('|') for z_ent in z_split: # if it's a photometric redshift and there's already a redshift, don't update if z_ent.find('photometric') != -1 and grbdict[entry].has_key( 'z'): continue else: # Split further into a list z_split_split = z_ent.split(' ') for z_ent_ent in z_split_split: # If there is a number in the sub string, assume it is the z try: z = {'z': float(z_ent_ent)} grbdict[entry].update(z) # If the redshift is photometric, mark it as such. if z_ent.find('photometric') == -1: z_isphot = {'z_isphot': 'no'} else: z_isphot = {'z_isphot': 'yes'} grbdict[entry].update(z_isphot) z_isupper = {'z_isupper': 'no'} grbdict[entry].update(z_isupper) except: pass # If that didn't find us a redshift, try again accepting ~,>,< if not grbdict[entry].has_key('z'): z_split_split = z_ent.split(' ') for z_ent_ent in z_split_split: if z_ent_ent == '': # entry has no length; continue print 'z_ent_ent has no length; continuing' continue iii = 0 if z_ent_ent[0] == 'Z' or z_ent_ent[0] == 'z': iii = 1 if z_ent_ent[iii] == '~': print 'CONVERTING APPROXIMATE redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper': 'no'} if z_ent_ent[iii] == '<': print 'CONVERTING UPPER LIMIT redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper': 'yes'} if z_ent_ent[iii] == '>': print 'CONVERTING LOWER LIMIT redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper': 'islower'} # If the redshift is photometric, mark it as such if z_ent.find('photometric') == -1: z_isphot = {'z_isphot': 'no'} if z_ent.find('photometric') != -1: z_isphot = {'z_isphot': 'yes'} try: iii += 1 z = {'z': float(z_ent_ent[iii:])} grbdict[entry].update(z) grbdict[entry].update(z_isphot) grbdict[entry].update(z_isupper) except: # cannot do anything.. pass # Manually insert for the ultra-high-z 090423 if entry == '090423': grbdict[entry]['z'] = 8.2 grbdict[entry]['z_isphot'] = 0 grbdict[entry]['z_isupper'] = 0 # Now assign the appropriate z_class if grbdict[entry].has_key('z'): if grbdict[entry]['z'] > 0: z_class = {'z_class': 'unknown'} # if grbdict[entry]['z'] > 2: # commented out; moved class assignment to CollectGRBInfo # z_class = {'z_class':'medium_z'} # if grbdict[entry]['z'] > 4: # z_class = {'z_class':'high_z'} grbdict[entry].update(z_class) else: if not grbdict[entry].has_key('z_class'): print '**COULD NOT PARSE REDSHIFT for entry %s** This is bad' % entry print '**Finished reading in Swift Catalog**' return grbdict
def parseswiftcat(swiftcat=loadpath+'grb_table_current.txt'): # Read a tab delimited file print "Opening %s" % swiftcat bork=csv.reader(open(swiftcat),delimiter='\t') borklist=[] for row in bork: borklist.append(row) # This creates a list of all the GRBs # Check that there are the right number of entries in each list: # for entries in bork: # print len(entries) # all should come out as the same number # Format: # GRB, T90, Fluence, PeakFlux, XRT_RA, XRT_Dec, XRT_Column, UVOT V Mag, # UVOT Other Mags, Redshift grbdict = {} # Now go through the list of objects read in and put them into a dictionary for grbs in borklist: if len(grbs) >= 30 and grbs[0][0:3] != 'GRB': subdict={grbs[0]:{'burst_time_str':grbs[1],'triggerid_str':grbs[2],'t90_str':grbs[6],'fluence_str':grbs[7], 'peakflux_str':grbs[9], \ 'xrt_ra_str':grbs[13], 'xrt_dec_str':grbs[14], 'xrt_time_delta_str':grbs[16], 'xrt_column_str':grbs[21], \ 'uvot_time_delta_str':grbs[25], 'v_mag_str':grbs[26], 'uvot_list':grbs[27], 'z_str':grbs[29]}} grbdict.update(subdict) elif grbs[0][0:3] != 'GRB': print "line length is not what is expected for line:" print grbs print "Expected length of 34 (at least 31), got %i. Not including" % len(grbs) else: pass # Update the dictonary to parse the crap and make it better for entry in grbdict.keys(): print entry try: z_str = grbdict[entry]['z_str'] except: print grbdict[entry] sys.exit(1) # Make XRT RA, Dec into decimal degree tuple. This will create a tuple # keyword called 'xrt_pos' which is in decimal degrees. sex_pos_tup = (grbdict[entry]['xrt_ra_str'],grbdict[entry]['xrt_dec_str']) if sex_pos_tup[0] != 'TBD' and sex_pos_tup[0] != 'n/a': xrt_pos = {'xrt_pos':sex2dec(sex_pos_tup)} grbdict[entry].update(xrt_pos) else: print 'COULD NOT PARSE XRT_POS for entry %s' % entry # TODO: Convert into a distance above the galactic plane # Convert time_deltas to a float try: # Try to convert uvot_time_delta to a float uvot_time_delta = {'uvot_time_delta':float(grbdict[entry]['uvot_time_delta_str'])} grbdict[entry].update(uvot_time_delta) except: print 'COULD NOT PARSE UVOT_TIME_DELTA for entry %s' % entry # try: # Try to convert xrt_time_delta to a float xrt_time_delta = {'xrt_time_delta':float(grbdict[entry]['xrt_time_delta_str'])} grbdict[entry].update(xrt_time_delta) except: print 'COULD NOT PARSE XRT_TIME_DELTA for entry %s' % entry # Convert T90 to float try: # Try to convert t90 to a float t90 = {'t90':float(grbdict[entry]['t90_str'])} grbdict[entry].update(t90) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['t90_str'][0] == '~': print 'CONVERTING APPROXIMATE t90 TO ABSOLUTE for %s' % entry if grbdict[entry]['t90_str'][0] == '>': print 'CONVERTING LOWER LIMIT t90 TO ABSOLUTE for %s' % entry t90 = {'t90':float(grbdict[entry]['t90_str'][1:])} grbdict[entry].update(t90) except: print 'COULD NOT PARSE T90 for entry %s' % entry # Convert fluence to a float try: # Try to convert fluence to a float fluence = {'fluence':float(grbdict[entry]['fluence_str'])} grbdict[entry].update(fluence) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['fluence_str'][0] == '~': print 'CONVERTING APPROXIMATE fluence TO ABSOLUTE for %s' % entry if grbdict[entry]['fluence_str'][0] == '>': print 'CONVERTING LOWER LIMIT fluence TO ABSOLUTE for %s' % entry fluence = {'fluence':float(grbdict[entry]['fluence_str'][1:])} grbdict[entry].update(fluence) except: print 'COULD NOT PARSE fluence for entry %s' % entry # Convert peakflux to a float try: # Try to convert peakflux to a float peakflux = {'peakflux':float(grbdict[entry]['peakflux_str'])} grbdict[entry].update(peakflux) except: try: # If that didn't work, assume that it starts with a ~ or > and try again if grbdict[entry]['peakflux_str'][0] == '~': print 'CONVERTING APPROXIMATE peakflux TO ABSOLUTE for %s' % entry if grbdict[entry]['peakflux_str'][0] == '>': print 'CONVERTING LOWER LIMIT peakflux TO ABSOLUTE for %s' % entry peakflux = {'peakflux':float(grbdict[entry]['peakflux_str'][1:])} grbdict[entry].update(peakflux) except: print 'COULD NOT PARSE peakflux for entry %s' % entry # Convert xrt_column to a float try: # Try to convert xrt_column to a float xrt_column = {'xrt_column':float(grbdict[entry]['xrt_column_str'])} grbdict[entry].update(xrt_column) except: try: # If that didn't work, assume that it starts with a ~ or < and try again if grbdict[entry]['xrt_column_str'][0] == '~': print 'CONVERTING APPROXIMATE xrt_column TO ABSOLUTE for %s' % entry if grbdict[entry]['xrt_column_str'][0] == '<': print 'CONVERTING UPPER LIMIT xrt_column TO ABSOLUTE for %s' % entry xrt_column = {'xrt_column':float(grbdict[entry]['xrt_column_str'][1:])} grbdict[entry].update(xrt_column) except: print 'COULD NOT PARSE xrt_column for entry %s' % entry # Convert UVOT_V_Mag to float & check if its upper limit # *** Maybe include time to observation, and if not within a certain # time, then don't include it in the training set. *** if grbdict[entry]['v_mag_str'][0] == 'V': if grbdict[entry]['v_mag_str'][1] == '>': v_mag_isupper = {'v_mag_isupper':'yes'} grbdict[entry].update(v_mag_isupper) elif grbdict[entry]['v_mag_str'][1] == '=': v_mag_isupper = {'v_mag_isupper':'no'} grbdict[entry].update(v_mag_isupper) elif grbdict[entry]['v_mag_str'][1] == '<': v_mag_isupper = {'v_mag_isupper':'no'} grbdict[entry].update(v_mag_isupper) print 'We have a LOWER limit for v_mag for grb %s' % entry else: print 'COULD NOT PARSE v_mag_str for entry %s. Starts with V though.' % entry else: print 'COULD NOT PARSE v_mag_str for entry %s' % entry # Convert Other UVOT magnitudes to a list and check if upper limit # *** Maybe include time to observation, and if not within a certain # time, then don't include it in the training set. *** uvot_split = grbdict[entry]['uvot_list'].split('|') for uvot_ent in uvot_split: # Loop through each entry # Looks like someone typoed in the catalog and put in UWM2 instead of UVM2 for 3 entries if uvot_ent.find('U') != -1 and uvot_ent.find('UV') == -1 and uvot_ent.find('UW') == -1: if uvot_ent.find('U>') != -1: u_mag_isupper = {'u_mag_isupper':'yes'} grbdict[entry].update(u_mag_isupper) elif uvot_ent.find('U=') != -1: u_mag_isupper = {'u_mag_isupper':'no'} grbdict[entry].update(u_mag_isupper) elif uvot_ent.find('U<') != -1: u_mag_isupper = {'u_mag_isupper':'no'} grbdict[entry].update(u_mag_isupper) print 'We have a LOWER limit for u_mag for grb %s' % entry else: print 'COULD NOT PARSE u_mag for entry %s. Starts with U though.' % entry if uvot_ent.find('B') != -1 and uvot_ent.find('TBD') == -1: if uvot_ent.find('B>') != -1: b_mag_isupper = {'b_mag_isupper':'yes'} grbdict[entry].update(b_mag_isupper) elif uvot_ent.find('B=') != -1: b_mag_isupper = {'b_mag_isupper':'no'} grbdict[entry].update(b_mag_isupper) elif uvot_ent.find('B<') != -1: b_mag_isupper = {'b_mag_isupper':'no'} grbdict[entry].update(b_mag_isupper) print 'We have a LOWER limit for b_mag for grb %s' % entry else: print 'COULD NOT PARSE b_mag for entry %s. Starts with B though.' % entry if uvot_ent.find('W1') != -1: if uvot_ent.find('W1>') != -1: w1_mag_isupper = {'w1_mag_isupper':'yes'} grbdict[entry].update(w1_mag_isupper) elif uvot_ent.find('W1=') != -1: w1_mag_isupper = {'w1_mag_isupper':'no'} grbdict[entry].update(w1_mag_isupper) elif uvot_ent.find('W1<') != -1: w1_mag_isupper = {'w1_mag_isupper':'no'} grbdict[entry].update(w1_mag_isupper) print 'We have a LOWER limit for w1_mag for grb %s' % entry else: print 'COULD NOT PARSE w1_mag for entry %s. Starts with W1 though.' % entry if uvot_ent.find('W2') != -1: if uvot_ent.find('W2>') != -1: w2_mag_isupper = {'w2_mag_isupper':'yes'} grbdict[entry].update(w2_mag_isupper) elif uvot_ent.find('W2=') != -1: w2_mag_isupper = {'w2_mag_isupper':'no'} grbdict[entry].update(w2_mag_isupper) elif uvot_ent.find('W2<') != -1: w2_mag_isupper = {'w2_mag_isupper':'no'} grbdict[entry].update(w2_mag_isupper) print 'We have a LOWER limit for w2_mag for grb %s' % entry else: print 'COULD NOT PARSE w2_mag for entry %s. Starts with W2 though.' % entry # Note 3 typos in catalog: UWM2 instead of UVM2. Ignore this by just searching for M2 if uvot_ent.find('M2') != -1: if uvot_ent.find('M2>') != -1: m2_mag_isupper = {'m2_mag_isupper':'yes'} grbdict[entry].update(m2_mag_isupper) elif uvot_ent.find('M2=') != -1: m2_mag_isupper = {'m2_mag_isupper':'no'} grbdict[entry].update(m2_mag_isupper) # One instance of typo: 'UVM2 =' instead of 'UVM2=' elif uvot_ent.find('M2 =') != -1: m2_mag_isupper = {'m2_mag_isupper':'no'} grbdict[entry].update(m2_mag_isupper) elif uvot_ent.find('M2<') != -1: m2_mag_isupper = {'m2_mag_isupper':'no'} grbdict[entry].update(m2_mag_isupper) print 'We have a LOWER limit for m2_mag for grb %s' % entry else: print 'COULD NOT PARSE m2_mag for entry %s. Starts with M2 though.' % entry if uvot_ent.find('White') != -1: if uvot_ent.find('White>') != -1: wh_mag_isupper = {'wh_mag_isupper':'yes'} grbdict[entry].update(wh_mag_isupper) elif uvot_ent.find('White=') != -1: wh_mag_isupper = {'wh_mag_isupper':'no'} grbdict[entry].update(wh_mag_isupper) # There was one instance of typo: + instead of = elif uvot_ent.find('White+') != -1: wh_mag_isupper = {'wh_mag_isupper':'no'} grbdict[entry].update(wh_mag_isupper) elif uvot_ent.find('White<') != -1: wh_mag_isupper = {'wh_mag_isupper':'no'} grbdict[entry].update(wh_mag_isupper) print 'We have a LOWER limit for wh_mag for grb %s' % entry else: print 'COULD NOT PARSE wh_mag for entry %s. Starts with White though.' % entry # Convert z to float and check if it is photometric # First we see if there's an absolute redshift available that's not # upper limit or approximate. Preferentially use abs or emis over photo. # First split up via the | parser that the table gives for different z entries # Make sure not a blank z_str if z_str != '': z_split = z_str.split('|') for z_ent in z_split: # if it's a photometric redshift and there's already a redshift, don't update if z_ent.find('photometric') != -1 and grbdict[entry].has_key('z'): continue else: # Split further into a list z_split_split = z_ent.split(' ') for z_ent_ent in z_split_split: # If there is a number in the sub string, assume it is the z try: z={'z':float(z_ent_ent)} grbdict[entry].update(z) # If the redshift is photometric, mark it as such. if z_ent.find('photometric') == -1: z_isphot = {'z_isphot':'no'} else: z_isphot = {'z_isphot':'yes'} grbdict[entry].update(z_isphot) z_isupper = {'z_isupper':'no'} grbdict[entry].update(z_isupper) except: pass # If that didn't find us a redshift, try again accepting ~,>,< if not grbdict[entry].has_key('z'): z_split_split = z_ent.split(' ') for z_ent_ent in z_split_split: if z_ent_ent == '': # entry has no length; continue print 'z_ent_ent has no length; continuing' continue iii = 0 if z_ent_ent[0] == 'Z' or z_ent_ent[0] == 'z': iii = 1 if z_ent_ent[iii] == '~': print 'CONVERTING APPROXIMATE redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper':'no'} if z_ent_ent[iii] == '<': print 'CONVERTING UPPER LIMIT redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper':'yes'} if z_ent_ent[iii] == '>': print 'CONVERTING LOWER LIMIT redshift TO ABSOLUTE for %s' % entry z_isupper = {'z_isupper':'islower'} # If the redshift is photometric, mark it as such if z_ent.find('photometric') == -1: z_isphot = {'z_isphot':'no'} if z_ent.find('photometric') != -1: z_isphot = {'z_isphot':'yes'} try: iii += 1 z = {'z':float(z_ent_ent[iii:])} grbdict[entry].update(z) grbdict[entry].update(z_isphot) grbdict[entry].update(z_isupper) except: # cannot do anything.. pass # Manually insert for the ultra-high-z 090423 if entry == '090423': grbdict[entry]['z'] = 8.2 grbdict[entry]['z_isphot'] = 0 grbdict[entry]['z_isupper'] = 0 # Now assign the appropriate z_class if grbdict[entry].has_key('z'): if grbdict[entry]['z'] > 0: z_class = {'z_class':'unknown'} # if grbdict[entry]['z'] > 2: # commented out; moved class assignment to CollectGRBInfo # z_class = {'z_class':'medium_z'} # if grbdict[entry]['z'] > 4: # z_class = {'z_class':'high_z'} grbdict[entry].update(z_class) else: if not grbdict[entry].has_key('z_class'): print '**COULD NOT PARSE REDSHIFT for entry %s** This is bad' % entry print '**Finished reading in Swift Catalog**' return grbdict
def _parse_psn_format(psn_string): ''' Columns 1-21: Designation (three-letter designation in columns 1-3 describes the type of variable: PSN = (possible) supernova; PNV = (possible) nova; TCP = some other type of variable (or unknown). Columns 25-39: Date in Universal Time (given as Year Month Date). Column 40: Note column (* = discovery observation; # or lower-case letters indicate follow-up observation, with # = single line, a = line 1 of multiple lines, b = line 2 of multiple lines, etc.). Columns 43-65: Postion (right ascension and declination, for equinox 2000.0), given to 0s.01 of R.A. and to 0".1 of Decl. Columns 68-71: magnitude of object at time specified (column 73 gives the bandpass, thus: U = unfiltered CCD; v = visual; V = CCD V-band; R = CCD R-band; etc.) Columns 75-79: offset of potential supernova in R.A. from presumed host galaxy, in arc seconds (maximum 9999), with column 79 the direction (E = east, W = west). Columns 80-84: offset of potential supernova in Decl. from presumed host galaxy, in arc seconds (maximum 9999), with column 79 the direction (N = north, S = south). Columns 87-95: "Locale", meaning the presumed host galaxy if a non-Milky-Way variable, or the 3-letter IAU constellation abbreviation if a Milky-Way variable. For presumed host galaxies, only use single upper-case letters in column 87, followed by several digits as appropriate, and only give galaxies for these catalogues, and in this order of usage: M = Messier; N = NGC; I = IC; U = UGC; G = MCG; P = PGC; E = ESO. Column 97: 1-digit character specifying either the experience of the discoverer (0 = no previous CBAT-confirmed discoveries, 1 = one previous CBAT-confirmed discovery, ..., 9 = nine or more previous CBAT-confirmed discoveries) or the discovery group (given as Roman letters, to be assigned on a case-by-case basis as groups request that they be given a letter code to note that their group made the discovery). A key to such letter codes is given here: B = Tom Boles (Coddenham, England) C = CHASE program (Cerro Tololo, Chile) D = Catalina Real-time Transient Survey H = Kamil Hornoch (Ondrejov Observatory, Czech Rep.) I = Koichi Itagaki (Yamagata, Japan) J = Brazilian Supernovae Search (Cristovao Jacques et al.) L = Lick Observatory Supernova Search M = Berto Monard (Pretoria, South Africa) N = Koichi Nishiyama and Fujio Kabashima (Japan) P = Tim Puckett's Supernova Search Program R = Guoyuo Sun and Jiangao Ruan (China) S = La Sagra Sky Survey (Spain) Column 99: 1-digit character to specify the number of separate nights (arc) that the discoverer has positive images of the discovered object (may include images by other observers that the discoverer knows of), with a dash (-) meaning a single image on a single night, a zero (0) meaning multiple images on a single night, a "1" indicating a one-day arc (i.e., two nights), ..., and a "9" indicating an arc of nine or more days. 1 2 3 4 5 6 7 8 9 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 Object Designation Date (UT) R.A. (2000.0) Decl. Mag. p Offset Locale D A PSN J15111485+4609115 2013 07 13.91 * 15 11 14.85 +46 09 11.5 17.8 U 15E 8N U9761 9 1 ''' prefix = psn_string[0:3] if prefix == 'PSN': obj_type = '(possible) supernova' elif prefix == 'PNV': obj_type = '(possible) nova' else: obj_type = 'unknown' designation = psn_string[4:21] date_string = psn_string[24:39] date_split = date_string.split('.') date_ymd = date_split[0] if len(date_split) == 2: fraction_of_day = int(date_string.split('.')[-1].strip()) / 100. elif len(date_split) == 1: fraction_of_day = 0 try: date_parsed = datetime.datetime.strptime( date_ymd, '%Y %m %d') + datetime.timedelta(fraction_of_day) except: date_parsed = 'ParseERROR' ra = psn_string[42:54] dec = psn_string[54:65] ra_deg, dec_deg = sex2dec((ra, dec)) mag = psn_string[67:71] filt = psn_string[72] if filt == 'U': filt = 'unfiltered' ra_offset_string = psn_string[74:79].strip() try: ra_offset_value = int(psn_string[74:78]) except: ra_offset_value = 'Unknown' ra_offset_direction = psn_string[78] dec_offset_string = psn_string[79:84].strip() try: dec_offset_value = int(psn_string[79:83]) except: dec_offset_value = 'Unknown' dec_offset_direction = psn_string[83] locale = psn_string[86:95] if locale.strip() == '': locale = 'UNKNOWN' discoverer_key = psn_string[96] disc_dict = { "B": "Tom Boles (Coddenham, England)", "C": "CHASE program (Cerro Tololo, Chile)", "D": "Catalina Real-time Transient Survey", "H": "Kamil Hornoch (Ondrejov Observatory, Czech Rep.)", "I": "Koichi Itagaki (Yamagata, Japan)", "J": "Brazilian Supernovae Search (Cristovao Jacques et al.)", "L": "Lick Observatory Supernova Search", "M": "Berto Monard (Pretoria, South Africa)", "N": "Koichi Nishiyama and Fujio Kabashima (Japan)", "P": "Tim Puckett's Supernova Search Program", "R": "Guoyuo Sun and Jiangao Ruan (China)", "S": "La Sagra Sky Survey (Spain)" } if discoverer_key in disc_dict.keys(): discoverer_string = disc_dict[discoverer_key] elif discoverer_key == '9': discoverer_string = 'Unknown observer with nine or more previous CBAT-confirmed discoveries' elif discoverer_key in '012345678': discoverer_string = 'Unknown observer with %s previous CBAT-confirmed discoveries' % ( discoverer_key) else: discoverer_string = 'Unknown discoverer; cannot parse discoverer_key' arc_key = psn_string[98] if arc_key == '-': arc_string = 'A single image on a single night' elif arc_key == '0': arc_string = 'Multiple images on a single night' elif arc_key == '9': arc_string = 'An arc of nine or more days' elif arc_key in '12345678': arc_string = 'A %s day arc' % (arc_key) else: arc_string = 'Unknown arc; cannot parse arc_key' psn_dict = { 'ra': ra, 'dec': dec, 'ra_deg': ra_deg, 'dec_deg': dec_deg, 'prefix': prefix, 'obj_type': obj_type, 'designation': designation, 'date_string': date_string.strip(), 'date_parsed': date_parsed, 'mag': mag, 'filter': filt, 'locale': locale, 'discoverer': discoverer_string, 'arc': arc_string, 'psn_string': psn_string, 'dec_offset': dec_offset_string, 'ra_offset': ra_offset_string } return psn_dict