Example #1
0
 def loadMultipleVoTables(self,locations):
     #Loads the data of multiple VOTables, generating the definitions from the first VOTable in the location list
     #It assumes every VOTables has the same columns.
     tbl = parse_single_table(locations[0])
     if isinstance(tbl,pTable):
     # tbl.array contiene los datos
     # tbl.field contiene la metadata
         self.loadFields(tbl.fields)
         self.genTable()
         for place in locations:
             currentTable = parse_single_table(place)
             self.insertData(currentTable.array)
Example #2
0
def getHifiMeta(obsid, cache = False, useTemp=True):
    #
    # Only metadata search is performed and raNominal,decNominal are xtracted from the VOT
    #
    archive = 'hsa'
    archiveMetaUrl = "http://archives.esac.esa.int/%s/aio/jsp/metadata.jsp"%archive
    haioMetaRequest = "%s?RESOURCE_CLASS=OBSERVATION&INSTRUMENT=%%%%27HIFI%%%%27&OBSERVATION_ID=%i"%(archiveMetaUrl,obsid)
    #
    # VOTable to keep the metadata queryresults
    #
    if (useTemp):
        voFile = "/Users/ivaltchanov/Tmp/hifi_meta_temp.vot"
    else:
        voFile =  "/Users/ivaltchanov/Tmp/%i_hifi_meta.vot"%obsid
    if (cache and (not useTemp)):
        if (os.path.isfile(voFile)):
            print("Found an already existing VOTable with metadata: %s"%voFile)
    else:
        r = requests.get(haioMetaRequest)
        with open(voFile, "wb") as tmp:
            tmp.write(r.content)
    #
    # now red the table
    #
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        votable = parse_single_table(voFile, pedantic=False)
    #
    # extract the data
    data = votable.array
    #
    return data
Example #3
0
def IRSA_TAP(ra,dec,search_radius,irsa_table,pandas=False,sql_SELECT=None,sql_WHERE=None,verbose=False):
    '''
    '''
    j2000 ="'"+"J2000"+"'"
    sURL = 'http://irsa.ipac.caltech.edu/TAP/sync?QUERY='
    str_from = 'FROM+'+irsa_table+'+'
    if sql_SELECT == None :
        str_select = 'SELECT+*+' #select all
    else:
        str_select = 'SELECT+'+sql_SELECT+'+'
    if sql_WHERE == None :
        str_where = 'WHERE+'
    else:
        str_where = 'WHERE+'+sql_WHERE+'+'
    str_coord1 = 'CONTAINS(POINT('+j2000+',ra,dec),'
    str_coord2 = 'CIRCLE('+j2000+','+str(ra)+','+str(dec)+','+str(search_radius)+'))=1'
    str_coord=str_coord1+str_coord2
    urlquery = sURL+str_select+str_from+str_where+str_coord
    if verbose:
        print(urlquery)
    response=urllib2.urlopen(urlquery)
    if pandas:
        tab_out = votble.parse_single_table(response).to_pandas()
    else:
        tab_out = votable.parse_single_table(response)
    return tab_out
Example #4
0
 def getTableFromCDS(self, url):
     '''
     It retrieves a votable from CDS. 
     example: "url=http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=J/A+A/534/A102"
     '''
     self.url_cds = url
     self.cdstable = parse_single_table(url)
Example #5
0
 def table(self):
     if self.__table is None:
         self.bytes = BytesIO(self.data.encode('utf8'))
         tbl = votable.parse_single_table(self.bytes, pedantic=False)
         self.__table = tbl.to_table()
         self.__table.convert_bytestring_to_unicode()
     return self.__table
Example #6
0
File: moc.py Project: tboch/mocpy
    def _query(self, resource_id, max_rows):
        """
        Internal method to query Simbad or a VizieR table
        for sources in the coverage of the MOC instance
        """
        from astropy.io.votable import parse_single_table

        if max_rows is not None and max_rows >= 0:
            max_rows_str = str(max_rows)
        else:
            max_rows_str = str(9999999999)

        tmp_moc = tempfile.NamedTemporaryFile(delete=False)

        self.write(tmp_moc.name)
        r = requests.post('http://cdsxmatch.u-strasbg.fr/QueryCat/QueryCat',
                          data={'mode': 'mocfile',
                                'catName': resource_id,
                                'format': 'votable',
                                'limit': max_rows_str},
                          files={'moc': open(tmp_moc.name, 'rb')},
                          headers={'User-Agent': 'MOCPy'},
                          stream=True)

        tmp_vot = BytesIO()
        tmp_vot.write(r.content)

        table = parse_single_table(tmp_vot).to_table()

        # finally delete temp files
        os.unlink(tmp_moc.name)

        return table
Example #7
0
    def query_usnob1(self):
    
        #ra, dec = coordinates_conversor.hour2deg(f[0].header['RA'], f[0].header['DEC'])
        #SEDM FoV is 6.5 arcmin, due to uncertainties in the position, 4 arcmin radius assumed.
        # Download USNO-B1 catalog for the position
    
        timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
    
    
        catalog_url = 'http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1&RA=%.5f&DEC=%.5f&SR=%.4f&VERB=1' % (self.ra, self.dec, self.rad)
        self.logger.info( "Downloading USNO-B1 catalog...")
        self.logger.info(catalog_url)
        
        tmp_file = '/tmp/tmp_usnob1_%s.cat'%timestamp
        
        urlretrieve(catalog_url, tmp_file)
        
        # Read RA, Dec and magnitude from XML format USNO catalog
        catalog = votable.parse_single_table(tmp_file).to_table()
        
        #Clean temporary file.
        if (os.path.isfile(tmp_file)):
            os.remove(tmp_file)

        return catalog.as_array().data
    def __init__(self, url=None, votable=None, protocol=None, version=None):
        """
        Initialize the result set.  This constructor is not typically called 
        by directly applications; rather an instance is obtained from calling 
        a DALQuery's execute().  An exception is when creating a DALResults
        instance from a saved VOTable using the "votable" parameter.
        """
        self._queryurl = url
        self._protocol = protocol
        self._version = version
	self._rowcount = 0
        table = libvot.simpleGetURL(url, 'dummy.xml')
	try :
	    from astropy.io import votable
	    self._use_astropy = True
	except :
	    print  "failed to load astropy"
	if(self._use_astropy) :
	    self.votable = votable.parse_single_table('dummy.xml')
	
	votab = libvot.openVOTABLE('dummy.xml')
	rc = libvot.getRESOURCE(votab)
	if(votable != 0) :
	    self._tc = libvot.getTABLE(rc)
	    if(self._tc != 0) :
	        dc = libvot.getDATA(self._tc) 
	        if(dc != 0) :
	            self._tdc = libvot.getTABLEDATA(dc)
                    self._rowcount = libvot.getNRows(self._tdc)
Example #9
0
def load_votable(votable):
    if isinstance(votable,basestring):
        base,ext = os.path.splitext(votable)
        if ext in ('.npy'):
            votable = np.load(votable)
        if ext in ('.vot'):
            votable = vot.parse_single_table(votable)
    return vot2npy(votable)
Example #10
0
def IRSA_TAP(ra,dec,search_radius,irsa_table,pandas=True,sql_SELECT=None,sql_WHERE=None,verbose=False):
    '''
    Querying the designated IRSA table using the native IRSA API
    This would result in tables with all of the columns available.
    Use astroquery if you're only interested in typical queries.
    input: ra, dec(J2000) & search_radius (deg)
    Some of the most commonly used tables are:
    wise_allwise_p3as_psd (ALL WISE)
    fp_xsc (2MASS extended source catalog)
    fp_psc (2MASS point source catalog)
    '''
    j2000 ="'"+"J2000"+"'"
    sURL = 'http://irsa.ipac.caltech.edu/TAP/sync'
    str_from = 'FROM+'+irsa_table+'+'
    if sql_SELECT == None :
        str_select = 'SELECT+*+' #select all
    else:
        str_select = 'SELECT+'+sql_SELECT+'+'
    if sql_WHERE == None :
        str_where = 'WHERE+'
    else:
        str_where = 'WHERE+'+sql_WHERE+'+'
    str_coord1 = 'CONTAINS(POINT('+j2000+',ra,dec),'
    str_coord2 = 'CIRCLE('+j2000+','+str(ra)+','+str(dec)+','+str(search_radius)+'))=1'
    str_coord=str_coord1+str_coord2
    str_sql = 'QUERY='+str_select+str_from+str_where+str_coord+'&FORMAT=CSV'
    r = requests.get(sURL,params=str_sql)
    if verbose:print(r.url)
    csvtxt = StringIO(r.text)
    out = pd.read_csv(csvtxt,comment='#')
    if len(out) == 0:
        print('No objects have been found for RA='+str(ra)+', DEC='+str(dec)+' within '+str(search_radius)+' arcmin')
        return out
    elif not pandas:
        out = tab.from_pandas(out)
        return out
    else:
        return out
    if verbose:
        print(urlquery)
    response=urllib2.urlopen(urlquery)
    if pandas:
        tab_out = votable.parse_single_table(response).to_table().to_pandas()
    else:
        tab_out = votable.parse_single_table(response).to_table()
    return tab_out
Example #11
0
 def loadVoTable(self,location,allowed = []):
     #Generates the tables in the DB and loads the data.
     tbl = parse_single_table(location)
     if isinstance(tbl,pTable):
     # tbl.array contains the data
     # tbl.field contains the metadata
         self.loadFields(tbl.fields)
         self.genTable(allowed)
         self.insertData(tbl.array,allowed)
Example #12
0
def get_votable(query):
    """ Get the VOTable from NOAO. """
    data = request_votable(query)
    
    fileobj = BytesIO()
    fileobj.write(data)
    votable = vot.parse_single_table(fileobj)

    return votable
Example #13
0
    def parse_votable(self, text):
        """extract file names form votable."""

        _file = StringIO(text)  # create file for parse_single_table func
        _table = parse_single_table(_file, pedantic=False)
        _date = (_table.array["ssa_dateObs"]).data
        _url = (_table.array["accref"]).data
        _name = (_table.array["ssa_dstitle"]).data
        return _date, _url, _name
Example #14
0
def twomass_conesearch(ra, dec, radius):
    """
    Returns arrays of ra and dec from a 2MASS conesearch
    """
    #url = 'http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=psc'
    url = 'http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_psc&'
    url += '&RA={0}&DEC={1}&SR={2}'.format(ra, dec, radius)
    response = urllib2.urlopen(url)
    tf = tempfile.NamedTemporaryFile()
    tf.write(response.read())
    table = votable.parse_single_table(tf)
    return table.array['ra'], table.array['dec']
Example #15
0
    def get_data_urls(self, query_result, include_auxiliaries=False):
        """
        Function to map the results of a CADC query into URLs to
        corresponding data that can be later downloaded.

        The function uses the IVOA DataLink Service
        (http://www.ivoa.net/documents/DataLink/) implemented at the CADC.
        It works directly with the results produced by Cadc.query_region and
        Cadc.query_name but in principle it can work with other query
        results produced with the Cadc query as long as the results
        contain the 'caomPublisherID' column. This column is part of the
        caom2.Plane table.

        Parameters
        ----------
        query_result : result returned by Cadc.query_region() or
                    Cadc.query_name(). In general, the result of any
                    CADC TAP query that contains the 'caomPublisherID' column
                    can be use here.
        include_auxiliaries : boolean
                    True to return URLs to auxiliary files such as
                    previews, False otherwise

        Returns
        -------
        A list of URLs to data.
        """

        if not query_result:
            raise AttributeError('Missing metadata argument')

        try:
            publisher_ids = query_result['caomPublisherID']
        except KeyError:
            raise AttributeError(
                'caomPublisherID column missing from query_result argument')
        result = []
        for pid in publisher_ids:
            response = self._request('GET', self.data_link_url,
                                     params={'ID': pid})
            response.raise_for_status()
            buffer = BytesIO(response.content)

            # at this point we don't need cutouts or other SODA services so
            # just get the urls from the response VOS table
            tb = parse_single_table(buffer)
            for row in tb.array:
                semantics = row['semantics'].decode('ascii')
                if semantics == '#this':
                    result.append(row['access_url'].decode('ascii'))
                elif row['access_url'] and include_auxiliaries:
                    result.append(row['access_url'].decode('ascii'))
        return result
    def _process(self, data):
        count, ra, dec, votable_xml = data
        table = parse_single_table(StringIO(votable_xml))
        results = [count, ra, dec]
	
        for c in self.columns:
            try:
                value = table.array[c].data[0] # we assume that there's only one row in the table
            except:
                value = None
            results.append(value)
            self.log('extracted column: %s = %s' % (c, value))
        return results
Example #17
0
def vot2npy(votable):
    if isinstance(votable,basestring):
        data = vot.parse_single_table(votable).array
    else:
        data = votable

    expnum = create_expnum(data)
    nite = create_nite(data)
    out = np.empty(len(data),dtype=data.dtype.descr+[('expnum',int),('nite',int)])
    out[:] = data[:]
    out['expnum'] = expnum
    out['nite'] = nite
    return out
Example #18
0
def load_UDFmastertable(votablename):
    """

    Loading the master ID list VOTable
    NB that there is now also a fits version of this table

    --- EXAMPLE OF USE ---
    import MUSE_TDOSEvsUDF as mtu
    votablename = '/Volumes/DATABCKUP1/UDFvsMUSEWide/master_idlist_20170214.vot'
    table = mtu.load_UDFmastertable(votablename)

    """
    table = parse_single_table(votablename)

    return table.array
Example #19
0
 def setUp(self):
     ### Online version of the AMIGA VOTable
     # self.vo_url = "http://amiga.iaa.csic.es/amigasearch/search?RA=180.&DEC=90.&SR=180."
     # self.vo_table = parse_single_table(url_file(self.vo_url)).array
     
     ### Local cached version of the AMIGA VOTable
     self.vo_table = parse_single_table("../data/amigasearch.xml").array
     
     # Create temporary storage directory for online AMIGA CDS data
     self.local_cache = "data_AMIGA_CDS"
     if not os.path.exists(self.local_cache):
         os.makedirs(self.local_cache)
     
     #Load AMIGA0 data
     url_coordinates = "http://amiga.iaa.es/FCKeditor/UserFiles/File/ASCII/AMIGA_0/table1.dat"
     url_coordinates_readme = "http://amiga.iaa.es/FCKeditor/UserFiles/File/ASCII/AMIGA_0/ReadMe"
     local_coordinates = "%s/table1.dat"%self.local_cache
     local_coordinates_readme = "%s/Readme"%self.local_cache
     get_url(url_coordinates,local_coordinates)
     get_url(url_coordinates_readme,local_coordinates_readme)
     self.table = ascii.read(local_coordinates,readme=local_coordinates_readme)
Example #20
0
def load_catalog(filename):
    """
    Load a catalogue and extract the source positions (only)

    Parameters
    ----------
    filename : str
        Filename to read. Supported types are csv, tab, tex, vo, vot, and xml.

    Returns
    -------
    catalogue : list
        A list of [ (ra, dec), ...]

    """
    supported = get_table_formats()

    fmt = os.path.splitext(filename)[-1][1:].lower()  # extension sans '.'

    if fmt in ['csv', 'tab', 'tex'] and fmt in supported:
        log.info("Reading file {0}".format(filename))
        t = ascii.read(filename)
        catalog = list(zip(t.columns['ra'], t.columns['dec']))

    elif fmt in ['vo', 'vot', 'xml'] and fmt in supported:
        log.info("Reading file {0}".format(filename))
        t = parse_single_table(filename)
        catalog = list(zip(t.array['ra'].tolist(), t.array['dec'].tolist()))

    else:
        log.info("Assuming ascii format, reading first two columns")
        lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')]
        try:
            catalog = [(float(a[0]), float(a[1])) for a in lines]
        except:
            log.error("Expecting two columns of floats but failed to parse")
            log.error("Catalog file {0} not loaded".format(filename))
            raise Exception("Could not determine file format")

    return catalog
Example #21
0
def votable_parse(page):
    '''Given HTML output from CRTS with VOTable requested, find
    the download link, extract the VOTable, and return a 2D array.'''

    # create BeautifulSoup object
    soup = BeautifulSoup(page)
    
    # get href of download link
    for link in soup.find_all("a"):
        if link.string == "download":
            url = link.get("href")

    # get XML output
    request = urllib2.Request(url)
    response = urllib2.urlopen(request)
    votable_xml = response.read()
    
    # get numpy array - convert string to file for votable function
    table = parse_single_table(StringIO.StringIO(votable_xml))
    data = table.array

    return data
def get_votable(ra, dec, size=1):

	"""
	Returns an astropy Table object containing the result of a
	query of the UCAC4 catalog, centered on <ra>, <dec> 
	(in decimal degrees), for the given <size> box in square degrees.
	"""

	filename = '{0:0.4f}_{1:0.4f}_{2:0.2f}.vot'.format(ra, dec, size)
	filepath = 'tmp/'+filename
	# if filename in os.listdir('tmp'):
	# 	votable = parse_single_table(filepath)
	# 	return votable
	url = get_url(ra, dec, size)
	handler = urllib2.urlopen(url)
	raw = handler.read()
	if 'tmp' not in os.listdir('.'):
		os.mkdir('tmp')
	with open(filepath,'wb') as f:
		f.write(raw)
	votable = parse_single_table(filepath)
	return votable
Example #23
0
def load_catalog(filename):
    """
    load a catalog and extract the source positions
    acceptable formats are:
    csv,tab,tex - from astropy.io.ascii
    vo,vot,xml - votable format
    cat - format created by Aegean
    returns [(ra,dec),...]
    """
    supported = get_table_formats()

    fmt = os.path.splitext(filename)[-1][1:].lower()  #extension sans '.'

    if fmt in ['csv', 'tab', 'tex'] and fmt in supported:
        log.info("Reading file {0}".format(filename))
        t = ascii.read(filename)
        catalog = zip(t.columns['ra'], t.columns['dec'])

    elif fmt in ['vo', 'vot', 'xml'] and fmt in supported:
        log.info("Reading file {0}".format(filename))
        t = parse_single_table(filename)
        catalog = zip(t.array['ra'].tolist(), t.array['dec'].tolist())

    elif fmt == 'cat':
        log.info("Reading ra/dec columns of Aegean catalog")
        lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')]
        catalog = [(float(a[5]), float(a[7])) for a in lines]
    else:
        log.info("Assuming ascii format, reading first two columns")
        lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')]
        try:
            catalog = [(float(a[0]), float(a[1])) for a in lines]
        except:
            log.error("Expecting two columns of floats but failed to parse")
            log.error("Catalog file {0} not loaded".format(filename))
            sys.exit()

    return catalog
Example #24
0
def fetch_filter(telescope,instrument,filt):

    # Downloads filter transmission curves as VOTables from Spanish Virtual Observatory

    # Use -81C for HST, operating temperature since 2006
    if instrument == 'ACS_WFC':
        filt += '_81'

    url = 'http://svo2.cab.inta-csic.es/svo/theory/fps3/fps.php?ID={0}/{1}.{2}'.format(telescope,instrument,filt)

    if not os.path.exists('downloads'):
        os.makedirs('downloads')

    loc = os.path.join('downloads', '{0}.{1}.{2}.vot'.format(telescope,instrument,filt))
    if not os.path.exists(loc):
        print "downloading from %s" % url
        F = urllib2.urlopen(url)
        open(loc, 'w').write(F.read())

    table = parse_single_table(loc)
    data = table.array
        
    return data
Example #25
0
def query(ra ,dec, rad=0.1, query=None):
    """Query the CADC TAP service to determine the list of images for the
NewHorizons Search.  Things to determine:
   

   a- Images to have the reference subtracted from.
   b- Image to use as the 'REFERENCE' image.
   c- Images to be used for input into the reference image

Logic: Given a particular Image/CCD find all the CCDs of the same field that
overlap that CCD but are taken more than 7 days later or earlier than
that image.

    """
    if query is None:
       query=( """ SELECT """
               """ "II/246/out".raj2000 as ra, "II/246/out".dej2000 as dec, "II/246/out".jmag as jmag """
               """ FROM "II/246/out" """
               """ WHERE """
               """ CONTAINS(POINT('ICRS', raj2000, dej2000), CIRCLE('ICRS', {}, {}, {})) = 1 """.format(ra,dec,rad) )

    tapURL = "http://TAPVizieR.u-strasbg.fr/TAPVizieR/tap/sync"


    ## Some default parameters for that TAP service queries.
    tapParams={'REQUEST': 'doQuery',
               'LANG':    'ADQL',
               'FORMAT':  'votable',
               'QUERY':   query}

    response = requests.get(tapURL, params=tapParams)
    data = StringIO(response.text)
    data.seek(0)
    data.seek(0)
    T = votable.parse_single_table(data).to_table()

    return T
Example #26
0
def vot2points(filename):
    '''
    It produces a dictionary with the arrays for each of the fields we use in the other functions
    '''
    # Work around for AMDA tables
    vot = votable.parse(filename, pedantic = False)
    if vot.description == 'Generated by CDPP/AMDA':
        return _vot2points_amda(vot)
    # back to normal...
    vot = votable.parse_single_table(filename, pedantic = False)
    axis = ['x', 'y', 'z']
    types = ['pos.cartesian.'+l for l in axis] + ['phys.veloc', 'phys.mass', 'phys.atmol.charge', 'time']
    points = {}
    for column in vot.iter_fields_and_params():
        ucd = str(column.ucd)
        if ucd.lower() in types:
            if ucd.lower() == types[3]:
                # Create the points for the velocity 
                findaxis = lambda x: x in column.name.lower()
                mask = map(findaxis, axis)
                if True in mask:
                    column_name = 'v'+axis[mask.index(True)]
                    column_unit = column.unit if column.unit is not None else fields_props[column_name]['units']
                    column_values = vot.array[column.ID].data * column_unit
                    points[column_name] = column_values.si.value.reshape((len(column_values),))
            else:
                # Create the points for the rest
                for key in fields_props.keys():
                    if ucd.lower() == fields_props[key]['ucd']:
                        #column_unit = column.unit if column.unit is not None else fields_props[key]['units']
                        if (column.unit is not None and ucd.lower() != 'time'):
                            column_values = (vot.array[column.ID].data * column.unit).si.value  
                        else:
                            column_values = vot.array[column.ID].data
                        points[key] = column_values.reshape((len(column_values),))
    return points
Example #27
0
    help="The name of the spectral index alpha-term column (default=alpha).",
    default="alpha")
# parser.add_option('--betacol',type="string", dest="betacol",
#                    help="The name of the spectral index beta-term column (default=beta).", default="beta")
(options, args) = parser.parse_args()

if options.catalogue is None:
    print "must specify input catalogue"
    sys.exit(1)
else:
    filename, file_extension = os.path.splitext(options.catalogue)
    if file_extension == ".fits":
        temp = fits.open(options.catalogue)
        data = temp[1].data
    elif file_extension == ".vot":
        temp = parse_single_table(options.catalogue)
        data = temp.array

# Generate an output in Andre's sky model format
gformatter = "source {{\n  name \"{Name:s}\"\n  component {{\n    type {shape:s}\n    position {RA:s} {Dec:s}\n    shape {a:2.1f} {b:2.1f} {pa:4.1f}\n    sed {{\n      frequency {freq:3.0f} MHz\n      fluxdensity Jy {flux:4.7f} 0 0 0\n      spectral-index {{ {alpha:2.2f} {beta:2.2f} }}\n    }}\n  }}\n}}\n"
pformatter = "source {{\n  name \"{Name:s}\"\n  component {{\n    type {shape:s}\n    position {RA:s} {Dec:s}\n    sed {{\n      frequency {freq:3.0f} MHz\n      fluxdensity Jy {flux:4.7f} 0 0 0\n      spectral-index {{ {alpha:2.2f} {beta:2.2f} }}\n    }}\n  }}\n}}\n"

shape = np.empty(shape=data[options.namecol].shape, dtype="S8")
shape.fill("gaussian")

indices = np.where(data[options.acol] == 0.)
shape[indices] = "point"

bigzip = zip(data[options.namecol], data[options.racol], data[options.decol],
             data[options.acol], data[options.bcol], data[options.pacol],
             data[options.fluxcol], data[options.alphacol], shape,
Example #28
0
sources = query_info['Source']
radii = query_info['Radius']

see_list = raw_input(
    "Would you like to view your list of sources? (yes/no[DEFAULT]) ")
if see_list == "yes":
    print sources

#Define path to your VOTable data file(s).
data_path = ''
star = raw_input("Which star would you like to look at? ")

#Define data variable and save information to a table.
#THIS IS CASE SENSITIVE. MAKE SURE FILE NAME HAS PROPER CAPITALIZATION
data_orig = parse_single_table(data_path + star + ".vot").to_table()

filt_RA = data_orig['_RAJ2000'][0]
filt_DEC = data_orig['_DEJ2000'][0]

#Create array to store names [Not entirely necessary...]
names = data_orig.colnames

#Define variable from Table of data
freq_orig = data_orig['sed_freq']  #in GHz
lam_orig = 299792.458 / freq_orig  #convert to microns
flux_orig = data_orig['sed_flux']  #in Jansky
eflux_orig = data_orig['sed_eflux']  #ERROR in Jansky
f_lam_orig = 3.e-9 * flux_orig / (lam_orig * lam_orig
                                  )  #units of erg/s/cm2/micron
ef_lam_orig = 3.e-9 * eflux_orig / (lam_orig * lam_orig
Example #29
0
def search(image, headinfo, target_coords, syntax, catalog_syntax, filter_):
    """
    Search area around transient/target location in photometric catalogs

    Current catalog (selectable in syntax):

        - Skymapper: Southern Hemisphere
        - Pan Starrs: North of declination -30 degree
        - Apass: All-sky survey
        - 2MASS: JHK all sky survey

    Future:
        - SDSS: Future implemtation
        - Ability to make custom catalog from different surveys


    Input:

        - Image: Numpy 2D array
        - headinfo: astropy.io.fits.header.Header
        - target_coords: astropy.coordinates.sky_coordinate.SkyCoord
        - syntax: dict
        - catalog_syntax: dict
        - filter_: str

    Output:

        - data:  pandas DataFrame

    """

    import warnings

    if not syntax['catalog_warnings'] or syntax['master_warnings']:
        warnings.filterwarnings("ignore")

    import numpy as np
    import os, sys
    import requests
    import pathlib
    import shutil
    import os.path
    import logging
    from functools import reduce
    import pandas as pd
    from autophot.packages.functions import gauss_sigma2fwhm, gauss_2d, gauss_fwhm2sigma

    from autophot.packages.functions import moffat_2d, moffat_fwhm

    from astropy.table import Table
    from astropy.wcs import wcs
    from astroquery.vizier import Vizier
    from astropy.io.votable import parse_single_table
    from astropy.coordinates import Angle

    # from autophot.packages.functions import pix_dist
    logger = logging.getLogger(__name__)

    try:

        # Get wxs information
        w1 = wcs.WCS(headinfo)

        # Radius around target
        radius = float(syntax['radius'])

        # Target name, if applicable
        target = syntax['target_name']

        # Get workdirectory location,, create directory if needed
        dirname = os.path.join(syntax['wdir'], 'catalog_queries')
        pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
        '''
        Getting target Ra and Dec

        - if target is none but a Ra and DEC is given, create new target name

        - if ra and dec not given us center of image as location - for quick reduction of image

        '''
        # if target or it's ra/dec - set target name
        if target == None:
            if syntax['target_ra'] != None and syntax['target_dec'] != None:
                target = 'target_ra_' + str(round(
                    syntax['target_ra'])) + '_dec_' + str(
                        round(syntax['target_dec']))
                logger.info('New target name: %s' % target)
            else:
                #  if not just call target
                target = 'target'

        # Search limitation with Pan Starrs rlimited to 0.5 deg
        if radius > 0.5 and syntax['catalog'] == 'pan_starrs':
            logger.warning(
                'Search Limitation with PanStarrs API -> Radius = 0.5 [deg] ')
            radius = 0.5

        # Choosen catalog for input.yml, create directory for catalog if needed
        catalog_dir = syntax['catalog']
        pathlib.Path(os.path.join(dirname, catalog_dir)).mkdir(parents=True,
                                                               exist_ok=True)

        # Folder for target, create directory if needed
        target_dir = reduce(
            os.path.join,
            [dirname, catalog_dir, target.lower()])
        pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True)

        # Filename of fetchec catalog
        fname = str(target) + '_r_' + str(radius)

        # Can force to use certain catalog - untested 03-10-19
        if syntax['force_catalog_csv']:
            logger.info('Using ' + syntax['force_catalog_csv_name'] +
                        ' as catalog')
            fname = str(syntax['force_catalog_csv_name']) + '_r_' + str(radius)

        # if syntax['catalog'] == 'custom':
        #     dir_name = os.path.join(syntax['wdir'],'catalog_queries')
        #     catalog_dir = syntax['catalog']
        #     target = syntax['target_name']
        #     target_dir =  dir_name + '/' + catalog_dir+'/'+target.lower()
        #     fname = str(target) + '_RAD_' + str(float(syntax['radius']))
        #     data =pd.read_csv(target_dir +'/'+ fname+'.csv')

        #  If catalog set to cutsom
        if syntax['catalog'] == 'custom':
            target = syntax['target_name']
            fname = str(target) + '_RAD_' + str(float(syntax['radius']))

            if not syntax['catalog_custom_fpath']:
                logger.critical(
                    'Custoim catalog selected but "catalog_custom_fpath" not defined'
                )
                exit()
            else:
                fname = syntax['catalog_custom_fpath']

            data = pd.read_csv(fname)

        # if catalog is found via it's filename - use this and return data
        if os.path.isfile(os.path.join(target_dir, fname + '.csv')):
            logger.info(
                'Catalog found for Target: %s\nCatalog: %s \nFile: %s' %
                (target, str(catalog_dir).upper(), fname))
            data = Table.read(os.path.join(target_dir, fname + '.csv'),
                              format='csv')
            data = data.to_pandas()

        else:
            # If no previously catalog found - look for one
            logger.info('Searching for new catalog: %s ' % syntax['catalog'])

            if syntax['catalog'] in ['gaia']:

                import astropy.units as u
                from astroquery.gaia import Gaia
                import warnings
                warnings.filterwarnings('ignore')

                width = u.Quantity(radius, u.deg)
                height = u.Quantity(radius, u.deg)

                data = Gaia.query_object_async(coordinate=target_coords,
                                               width=width,
                                               height=height)

                data = data.to_pandas()
                data.to_csv(fname + '.csv', sep=',', index=False)

                # Move file to new location - 'catalog queries'
                shutil.move(os.path.join(os.getcwd(), fname + '.csv'),
                            os.path.join(target_dir, fname + '.csv'))

                warnings.filterwarnings('default')

            if syntax['catalog'] in ['apass', '2mass', 'sdss']:

                # No row limit
                Vizier.ROW_LIMIT = -1
                catalog_search = Vizier.query_region(target_coords,
                                                     radius=Angle(
                                                         radius, 'deg'),
                                                     catalog=syntax['catalog'])

                # Select first catalog from list
                data = catalog_search[0].to_pandas()
                data.to_csv(fname + '.csv', sep=',', index=False)

                # Move file to new location - 'catalog queries'
                shutil.move(os.path.join(os.getcwd(), fname + '.csv'),
                            os.path.join(target_dir, fname + '.csv'))

            # some catalogs need specific download path using 'requests'
            if syntax['catalog'] in ['pan_starrs', 'skymapper']:

                mindet = 1

                if syntax['catalog'] == 'pan_starrs':

                    server = ('https://archive.stsci.edu/' +
                              'panstarrs/search.php')
                    params = {
                        'RA': target_coords.ra.degree,
                        'DEC': target_coords.dec.degree,
                        'SR': radius,
                        'max_records': 10000,
                        'outputformat': 'VOTable',
                        'ndetections': ('>%d' % mindet)
                    }

                if syntax['catalog'] == 'skymapper':

                    server = (
                        'http://skymapper.anu.edu.au/sm-cone/public/query?')
                    params = {
                        'RA': target_coords.ra.degree,
                        'DEC': target_coords.dec.degree,
                        'SR': radius,
                        'RESPONSEFORMAT': 'VOTABLE'
                    }

                with open('temp.xml', "wb") as f:

                    logger.info('Downloading from %s' % syntax['catalog'])
                    response = requests.get(server, params=params)
                    f.write(response.content)

                # Parse local file into astropy.table object
                data = parse_single_table('temp.xml')

                # Delete temporary file
                os.remove('temp.xml')

                # Convert table to dataframe
                data_table = data.to_table(use_names_over_ids=True)
                data = data_table.to_pandas()

                # invalid entries in panstarrs are -999 - change to nans
                if syntax['catalog'] == 'pan_starrs':
                    data = data.replace(-999, np.nan)

                # No sources in field - temporary fix - will add "check different catalog"
                if len(data) == 0:
                    logging.critical('Catalog: %s : does not cover field' %
                                     syntax['catalog'])
                    sys.exit()

                # Save to csv and move to 'catalog_queries'
                data.to_csv(fname + '.csv', index=False)

                shutil.move(os.path.join(os.getcwd(), fname + '.csv'),
                            os.path.join(target_dir, fname + '.csv'))

        # Add in x and y pixel locatins under wcs
        x_pix, y_pix = w1.wcs_world2pix(data[catalog_syntax['RA']],
                                        data[catalog_syntax['DEC']], 1)

        data.insert(loc=5, column='x_pix', value=x_pix)
        data.insert(loc=6, column='y_pix', value=y_pix)

        # Remove boundary sources
        data = data[data.x_pix < image.shape[1] - syntax['pix_bound']]
        data = data[data.x_pix > syntax['pix_bound']]
        data = data[data.y_pix < image.shape[0] - syntax['pix_bound']]
        data = data[data.y_pix > syntax['pix_bound']]

        logger.info('Catalog length: %d' % len(data))

        warnings.filterwarnings("default")

    except Exception as e:
        logger.exception(e)
        data = None

    return data
Example #30
0
def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False):
    """
    download an SVO filter file and then add it to the user library
    :param observatory:
    :param instrument:
    :param ffilter:
    :return:
    """

    # make a directory for this observatory and instrument

    filter_path = os.path.join(get_speclite_filter_path(),
                               to_valid_python_name(observatory))

    if_directory_not_existing_then_make(filter_path)

    # grab the filter file from SVO

    # reconvert 2MASS so we can grab it

    if observatory == 'TwoMASS':
        observatory = '2MASS'

    if not file_existing_and_readable(
            os.path.join(
                filter_path, "%s-%s.ecsv" %
                (to_valid_python_name(instrument),
                 to_valid_python_name(ffilter)))) or update:

        url_response = urllib.request.urlopen(
            'http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB'
            % (observatory, instrument, ffilter))
        # now parse it
        data = votable.parse_single_table(url_response).to_table()

        # save the waveunit

        waveunit = data['Wavelength'].unit

        # the filter files are masked arrays, which do not go to zero on
        # the boundaries. This confuses speclite and will throw an error.
        # so we add a zero on the boundaries

        if data['Transmission'][0] != 0.:

            w1 = data['Wavelength'][0] * .9
            data.insert_row(0, [w1, 0])

        if data['Transmission'][-1] != 0.:

            w2 = data['Wavelength'][-1] * 1.1
            data.add_row([w2, 0])

        # filter any negative values

        idx = data['Transmission'] < 0
        data['Transmission'][idx] = 0

        # build the transmission. # we will force all the wavelengths
        # to Angstroms because sometimes AA is misunderstood

        try:

            transmission = spec_filter.FilterResponse(
                wavelength=data['Wavelength'] * waveunit.to('Angstrom') *
                u.Angstrom,
                response=data['Transmission'],
                meta=dict(group_name=to_valid_python_name(instrument),
                          band_name=to_valid_python_name(ffilter)))

            # save the filter

            transmission.save(filter_path)

            success = True

        except (ValueError):

            success = False

            print('%s:%s:%s has an invalid wave table, SKIPPING' %
                  (observatory, instrument, ffilter))

        return success

    else:

        return True
Example #31
0
def main():
    path = "/Users/boyangliu/Dropbox/ATCA_SMC/190624/"
    plotpath = path  #+"plot_files/"

    filelist = []

    for root, dirs, files in os.walk(plotpath):
        for file in files:
            # change the extension from '.mp3' to
            # the one of your choice.
            if file.endswith('_region.png'):
                #print(str(file))
                filelist.append(str(file))

    print('Table 1_no_warning')
    print("Field", "Source_Name", "Scont", "tau_peak", "sigma_tau_peak", "EW",
          "EW_err", "N_H_uncor", "N_H_uncor_error", "f_H_cor_iso", "Ts_aver",
          "Ts_aver_err")
    for n in range(len(filelist)):
        #for n in [2]:
        Field = filelist[n][0:9]
        # Source_Name=filelist[n][14:33]
        Source_Name = filelist[n][14:22] + filelist[n][23:30]
        RAHH = filelist[n][14:16]
        RAMM = filelist[n][16:18]
        RASS = filelist[n][18:23]
        DecDD = filelist[n][23:26]
        DecMM = filelist[n][26:28]
        DecSS = filelist[n][28:33]
        RAHMS = RAHH + ':' + RAMM + ':' + RASS
        DecDMS = DecDD + ':' + DecMM + ':' + DecSS

        votablefile = path + filelist[n][0:33] + "_opacity.votable.xml"
        table = parse_single_table(votablefile)
        velo = table.array['velocity']
        smooth_opacity = table.array['smooth_opacity']  #exp(-tau)
        sigma_tau_smooth = table.array['sigma_tau_smooth']
        emission = table.array['em_mean']
        emission_error = table.array['em_std']

        votablefileoj = parse(votablefile)
        Scont = float(votablefileoj.infos[3].value)

        # if len(np.where(smooth_opacity < 0)[0]) >0:
        # print("For "+Source_Name+" the smoothed opacity is sometimes < 0!")
        tau = -1 * np.log(smooth_opacity)
        tau_peak = max(tau)
        tau_peak_channel = np.where(tau == tau_peak)
        sigma_at_tau_peak = sigma_tau_smooth[tau_peak_channel[0][0]]
        sigma_tau_cont = sigma_tau_smooth[0]

        channel_width = (velo[1] - velo[0]) / 1000
        abs = 1 - smooth_opacity  #1-exp(-tau)
        EW = np.sum(abs) * channel_width
        smooth_opacity_err = sigma_tau_smooth
        abserr = smooth_opacity_err
        EWerr = np.sqrt(np.sum(abserr * abserr)) * channel_width

        if math.isnan(emission[500]):
            print("No emission spectra found!")

        N_H_uncor = 1.823 * np.power(10, 18) * np.sum(emission) * channel_width
        N_H_uncor_error = 1.823 * np.power(
            10, 18) * np.sum(emission_error) * channel_width

        N_H_cor_iso = 1.823 * np.power(10, 18) * np.sum(
            emission * tau / abs) * channel_width
        f_H_cor_iso = N_H_cor_iso / N_H_uncor

        Ts_aver = np.sum(emission) * channel_width / EW

        Emierr = np.sqrt(np.sum(emission_error * emission_error))
        tauerr = np.sqrt(np.sum(sigma_tau_smooth * sigma_tau_smooth))
        sumemi = np.sum(emission)
        sumabs = np.sum(abs)
        Ts_aver_err = sumemi / sumabs * (Emierr / sumemi + tauerr / sumabs)

        emiwitherr = unumpy.uarray(emission, emission_error)
        abswitherr = unumpy.uarray(abs, abserr)
        Ts_aver_unp = np.sum(emiwitherr) / np.sum(abswitherr)
        # print(str(format(Ts_aver_unp, '.f')).replace("+/-", "$\pm$"))

        EW_unp = np.sum(abswitherr) * channel_width
        # print(Source_Name, str(format(EW_unp, '.f')).replace("+/-", "$\pm$"))

        N_H_uncor_unp = 1.823 * np.sum(
            emiwitherr) * channel_width / 1000.  #devided by 1E21
        # print(Source_Name, str(format(N_H_uncor_unp, '.f')).replace("+/-", "$\pm$"))

        tau_peak_unp = ufloat(tau_peak, sigma_at_tau_peak)
        # print(Source_Name, str(format(tau_peak_unp, '.f')).replace("+/-", "$\pm$"))

        # print(Field, format(Scont*1000, '.1f'))#, Source_Name, format(tau_peak_unp, '.f'), format(EW_unp, '.f'), format(N_H_uncor_unp, '.f'), format(Ts_aver_unp, '.1f'))
        # print(Source_Name, sigma_tau_cont)
        EW_unp_err = float(format(EW_unp, '.3f').split("+/-")[1])

        # Table 1 for LaTex:
        if EW_unp < (3 * EW_unp_err):
            Tslowerlimit = sumemi * channel_width / (3 * EW_unp_err)
            # print(Field, '&', Source_Name, '&', format(Scont*1000, '.1f'), '&', str(format(tau_peak_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(EW_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(N_H_uncor_unp, '.f')).replace("+/-", "$\pm$"), '&', format(f_H_cor_iso, '.2f'), '&', '$> $'+str(format(Tslowerlimit,'.0f')), '\\\\')
            print(Field, Source_Name, RAHMS, DecDMS, Scont, tau_peak,
                  sigma_at_tau_peak, EW, N_H_uncor, N_H_uncor_error,
                  f_H_cor_iso, Ts_aver, Ts_aver_err, EWerr, Tslowerlimit)

        else:
            # print(Field, '&', Source_Name, '&', format(Scont*1000, '.1f'), '&', str(format(tau_peak_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(EW_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(N_H_uncor_unp, '.f')).replace("+/-", "$\pm$"), '&', format(f_H_cor_iso, '.2f'), '&', str(format(Ts_aver_unp, '.f')).replace("+/-", "$\pm$"), '\\\\')
            print(Field, Source_Name, RAHMS, DecDMS, Scont, tau_peak,
                  sigma_at_tau_peak, EW, N_H_uncor, N_H_uncor_error,
                  f_H_cor_iso, Ts_aver, Ts_aver_err, EWerr, 0)

        #print(Source_Name, smooth_opacity_err[0])

        # print(Source_Name, float(str(N_H_uncor_unp).split("+/-")[0])*10, float(str(N_H_uncor_unp).split("+/-")[1])*10, str(EW_unp).split("+/-")[0], str(EW_unp).split("+/-")[1])

        #fig = plt.figure()
        #plt.plot(velo,tau)
        #plt.show()
        #print(Field, '&', Source_Name, '&', RAHMS, '&', DecDMS, '&', format(Scont*1000, '.1f'), '&', format(tau_peak, '.1f'), '&', format(sigma_at_tau_peak, '.1f'), '&', format(EW, '.2f')+'$\pm$'+format(EWerr, '.2f'), '&', format(N_H_uncor/1E21, '.2f')+'$\pm$'+format(N_H_uncor_error/1E21, '.2f'), '&', format(f_H_cor_iso, '.2f'), '&', format(Ts_aver, '.1f')+'$\pm$'+format(Ts_aver_err, '.1f'), '\\\\')
        # print(Field, '&', Source_Name, '&', format(Scont*1000, '.1f'), '&', format(tau_peak, '.2f'), '&', format(sigma_at_tau_peak, '.2f'), '&', format(EW, '.2f')+'$\pm$'+format(EWerr, '.2f'), '&', format(N_H_uncor/1E21, '.2f')+'$\pm$'+format(N_H_uncor_error/1E21, '.2f'), '&', format(f_H_cor_iso, '.2f'), '&', format(Ts_aver, '.1f')+'$\pm$'+format(Ts_aver_err, '.1f'), '\\\\')
        # print(Field,  Source_Name,  format(Scont*1000, '.1f'),  format(tau_peak, '.2f'),  format(sigma_at_tau_peak, '.2f'),  format(EW, '.2f'),format(EWerr, '.2f'),  format(N_H_uncor/1E21, '.2f'),format(N_H_uncor_error/1E21, '.2f'),  format(f_H_cor_iso, '.2f'),  format(Ts_aver, '.1f'),format(Ts_aver_err, '.1f'))
        # print(Field,  Source_Name, RAHMS, DecDMS, Scont, tau_peak, sigma_at_tau_peak, EW, N_H_uncor, N_H_uncor_error, f_H_cor_iso, Ts_aver, Ts_aver_err, EWerr, Tslowerlimit)
        #print(Field, '&', Source_Name, '&', format(Scont*1000, '.1f'), '&', str(format(tau_peak_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(EW_unp, '.f')).replace("+/-", "$\pm$"), '&', str(format(N_H_uncor_unp, '.f')).replace("+/-", "$\pm$"), '&', format(f_H_cor_iso, '.2f'), '&', str(format(Ts_aver_unp, '.f')).replace("+/-", "$\pm$"), '\\\\')

    print("Finished")
Example #32
0
    def query_catalogue(self,
                        catalogue="PS1V3OBJECTS",
                        filtered=True,
                        tmpdir="/tmp"):
        '''
        Sends a VO query to the PS1 catalogue.
        Filters the result by mangitude.
        
        From: http://gsss.stsci.edu/Software/WebServices.htm
        
        General Catalog Access : http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?Parameters...

        Required Parameter List
        1 of the following 3 queries - VO ConeSearch, BoxSearch, IDsearch
        
            RA=ra(deg) &DEC=dec(deg) &SR=search radius(deg)
            BBOX=raMin(deg),decMin(deg),raMax(deg),decMax(deg)
            ID=catID
        
        Optional Parameters
        
            FORMAT= VOTABLE(default) | HTML | KML | CSV | TSV | JSON | TEXT(limited set of catalogs)
            CATALOG=GSC23(default) | GSC11 | GSC12 | USNOB | SDSS | FIRST | 2MASS | IRAS | GALEX | GAIA | TGAS | WISE
            | CAOM_OBSCORE | CAOM_OBSPOINTING | PS1V3OBJECTS | PS1V3DETECTIONS
            FILENAME=outputname (directs output to file)
            MAXOBJ=n (limits number of entries returned by brightest magnitude)
            MAGRANGE=bright,faint (limits number of entries returned by limits)
            MINDET=n (minimum numbr of detections PanSTARRS only)


        '''

        timestamp = datetime.datetime.isoformat(datetime.datetime.utcnow())

        url = "http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?CAT=%s&RA=%.5f&DEC=%.5f&SR=%.5f&MAGRANGE=%.3f,%.3f" % (
            catalogue, self.ra, self.dec, self.rad, self.minmag, self.maxmag)

        self.logger.info("URL queried: %s" % url)

        tmp_file = os.path.join(tmpdir, 'ps1_cat_%s.xml' % timestamp)

        with open(tmp_file, "wb") as f:
            page = urlopen(url)
            f.write(page.read())

        # Read RA, Dec and magnitude from XML format USNO catalog
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            try:
                catalog = votable.parse_single_table(tmp_file).to_table()
            except ValueError:
                self.logger.warn(
                    "The search radius was too large for the service. Reducing to 0.25 deg."
                )
                self.rad = 0.25
                return self.query_catalogue(catalogue=catalogue,
                                            filtered=filtered,
                                            tmpdir=tmpdir)

        if catalog.as_array() is None:
            #Clean temporary file.
            if (os.path.isfile(tmp_file)):
                os.remove(tmp_file)
            return None

        catalog = catalog.as_array().data

        #If it is PS1, we know what fields we want.
        #Otherwise, we just return everything.
        if (catalogue == "PS1V3OBJECTS"):

            if (filtered):
                #Filter spurious sources/ Objects where the majority of pixels where not masked (QFperfect >=0.9) and likely stars (rmeanpsfmag - rmeankronmag < 0.5)
                catalog = catalog[ (catalog["ng"]>3)*(catalog["nr"]>3)* (catalog["ni"]>3)\
                *(catalog["gQfPerfect"]>=0.95) *(catalog["rQfPerfect"]>=0.95)*(catalog["iQfPerfect"]>=0.95) * (catalog["rMeanPSFMag"] - catalog["rMeanKronMag"] < 0.5)]

            newcat = np.zeros(len(catalog), dtype=[("ra", np.double), ("dec", np.double), ("objid", np.long), ("mag", np.float), \
                ("g", np.float), ("r", np.float), ("i", np.float), ("z", np.float), ("y", np.float), \
                ("Err_g", np.float), ("Err_r", np.float), ("Err_i", np.float), ("Err_z", np.float), ("Err_y", np.float), ("distance", np.double)])
            newcat["objid"] = catalog["objID"]
            newcat["ra"] = catalog["RAmean"]
            newcat["dec"] = catalog["DECmean"]
            newcat["mag"] = catalog["rMeanPSFMag"]
            newcat["g"] = catalog["gMeanPSFMag"]
            newcat["r"] = catalog["rMeanPSFMag"]
            newcat["i"] = catalog["iMeanPSFMag"]
            newcat["z"] = catalog["zMeanPSFMag"]
            newcat["y"] = catalog["yMeanPSFMag"]
            newcat["Err_g"] = catalog["gMeanPSFMagErr"]
            newcat["Err_r"] = catalog["rMeanPSFMagErr"]
            newcat["Err_i"] = catalog["iMeanPSFMagErr"]
            newcat["Err_z"] = catalog["zMeanPSFMagErr"]
            newcat["Err_y"] = catalog["yMeanPSFMagErr"]
            newcat["distance"] = catalog["distance"]
        else:
            newcat = catalog

        #Clean temporary file.\
        if (os.path.isfile(tmp_file)):
            os.remove(tmp_file)

        return newcat
Example #33
0
def vo_get(ra,
           dec,
           ang_size,
           proj_opt='ZEA',
           download_dir=None,
           vo_host='gleam-vo.icrar.org',
           freq=[],
           clobber=True,
           file_name_func=None,
           alter_cmd=None,
           **kwargs):
    """
    ra, dec,
    ang_size:	    Position and angular size in degrees (float)

    download_dir:	Directory where images will be saved to (String).
			        Leaving it None (default) will not download any images

    proj_opt:   	Legitimit values (String):
                	'ZEA'   (default)
                	'ZEA_regrid'
                	'SIN'

    freq:       	A list of frequencies, e.g. ['223-231' '216-223']
                	An empty list means ALL

    clobber:		Overwrite existing images? (Boolean)

    file_name_func:
                	An optional function to create file name as you like.
                	Leaving it None will use the default "create_filename()"
    """
    if (ang_size > 5.0):
        raise GleamClientException("Angular size %.1f > 5.0 (degrees)"\
         % ang_size)

    if (download_dir and (not os.path.exists(download_dir))):
        raise GleamClientException("Invalid download dir: {0}"\
              .format(download_dir))

    if (not proj_opt in PROJ_OPTS):
        raise GleamClientException("Invalid projection: '{0}'."\
              " Should be one of {1}"\
              .format(proj_opt, PROJ_OPTS))

    url = VO_URL.format(vo_host)
    pos_p = 'POS=%s' % quote('{0},{1}'.format(ra, dec))
    proj_opt_p = 'proj_opt=%s' % proj_opt
    size_p = 'SIZE=%f' % (float(ang_size))
    url += '&'.join([pos_p, proj_opt_p, size_p])
    print(url)

    u = urlopen(url, timeout=200)
    warnings.simplefilter("ignore")
    if (2 == python_ver):
        try:
            tbl = parse_single_table(u.fp).array
            #print(tbl)
        except IndexError as ierr:
            raise GleamClientException('No results in the VO query: %s' %
                                       str(ierr))
    elif (3 == python_ver):
        buf = []
        while True:
            buff = u.read(1024)
            if not buff:
                break
            buf.append(buff.decode("utf-8"))
        vo_table = ''.join(buf)
        #print(vo_table)
        fp = BytesIO(vo_table.encode('utf-8'))
        tbl = parse_single_table(fp).array
        #print(tbl)
    else:
        raise Exception('Unknown Python version %d' % python_ver)
    warnings.simplefilter("default")
    ignore_freq = len(freq) == 0
    c = 0
    if (len(kwargs) > 0):
        tail = '&'.join(['{0}={1}'.format(k, v) for k, v in kwargs.items()])
    else:
        tail = None
    for row in tbl:
        r_freq = row[0]
        r_url = row[1]
        if (3 == python_ver):
            r_freq = r_freq.decode("utf-8")
            r_url = r_url.decode("utf-8")
        if (ignore_freq or r_freq in freq):
            if (tail):
                r_url += '&%s' % tail
            if (alter_cmd is not None and len(alter_cmd) > 0):
                r_url = r_url.replace('GLEAMCUTOUT', alter_cmd)
            if (download_dir):
                download_file(r_url,
                              ra,
                              dec,
                              ang_size,
                              r_freq,
                              download_dir,
                              clobber=clobber,
                              file_name_func=file_name_func)
            else:
                print(r_freq, r_url)
            c += 1
    if (c == 0):
        warnings.warn("No results from the VO query")
Example #34
0
def cmdplot(clustname='', magshift=0, colshift=0):

    path = 'Project_clusters/'

    #read in target cluster
    filename = path + clustname + ' 20arcmin-result.vot'
    table = parse_single_table(filename)
    cldat = table.array

    #read in target cluster
    filename = path + 'M 45 20arcmin-result.vot'
    table = parse_single_table(filename)
    refcldat = table.array

    #read in pleiades
    #reffilename = path + 'pleiadesdata.txt'
    #refcldat = ascii.read(reffilename)

    #set plotting parameters
    plotsize_single = (9, 7)
    params = {
        'backend': 'pdf',
        'axes.labelsize': 18,
        'font.size': 18,
        'legend.fontsize': 12,
        'xtick.labelsize': 14,
        'ytick.labelsize': 14,
        #'lines.markeredgecolor'  : 'k',
        #'figure.titlesize': 20,
        'mathtext.fontset': 'cm',
        'mathtext.rm': 'serif',
        #'text.usetex': True,
        'figure.figsize': plotsize_single
    }
    plt.rcParams.update(params)

    #apply Vthe magnitude offset
    Vshift = cldat['phot_rp_mean_mag'] + magshift
    BVshift = cldat['bp_rp'] + colshift

    #plot clusters
    plt.plot(BVshift, Vshift, 'ko', markersize=3, alpha=0.8, label=clustname)
    plt.plot(refcldat['bp_rp'],
             refcldat['phot_rp_mean_mag'],
             'ro',
             markersize=3,
             alpha=0.8,
             label='Pleiades')
    plt.ylim(18, 2)
    plt.xlim(-0.5, 3.5)
    plt.xlabel('GBP-GRP')
    plt.ylabel('GRP')
    plt.title(clustname)
    plt.legend(loc='upper left')

    s = 'Magnitude Shift =  %.2f' % (magshift)
    scol = 'Color Shift =  %.2f' % (colshift)

    plt.text(1.5, 4, s, horizontalalignment='left', fontsize=20)
    plt.text(1.5, 3, scol, horizontalalignment='left', fontsize=20)

    plt.show()
Example #35
0
import numpy as np
from sklearn.linear_model import Lasso
import matplotlib.pyplot as plt
from astropy.io.votable import parse_single_table
from sklearn.neighbors import KNeighborsRegressor


def find_error(y_true, y_pred):
    error = np.median(np.abs((y_true - y_pred) / (1 + y_true)))
    return error


"""""" """
No test set
""" """"""
table = parse_single_table("Tables/PhotoZFileA.vot")
data = table.array

X = np.empty([0, len(data)])
features = ['mag_r', 'u-g', 'g-r', 'r-i', 'i-z']
for feat in features:
    X = np.append(X, [data[feat]], axis=0)
X = X.T
y_true = data['z_spec']
errors = np.array([])

alphas = np.arange(0, 1, .0001)
for alpha in alphas:
    model = Lasso(alpha=alpha)
    model.fit(X, y_true)
    y_pred = model.predict(X)
Example #36
0
            plt.xticks(fontsize=20)
            plt.yticks(fontsize=20)
            # plt.title('sample of J-H color vs the Y-J color for 100,000 stars as 2D distribution',fontsize=40)
            plt.show()
            plt.close()
        con.commit()


# createandfilltables()

givenfield = 3
doqueries(givenfield, 6)
kjhkjh

tableA = parse_single_table(
    '/home/hartsuiker/Documents/dbdm/DDM2017/FinalProject/Q2/Tables/PhotoZFileA.vot'
)
arrayA = np.array([
    tableA.array['mag_r'], tableA.array['u-g'], tableA.array['g-r'],
    tableA.array['r-i'], tableA.array['i-z'], tableA.array['z_spec']
]).T

g = sns.pairplot(
    pd.DataFrame(arrayA[:500, 0:5],
                 columns=('mag_r', 'u-g', 'g-r', 'r-i', 'i-z')))
plt.show()
plt.close()


def linearregressiontrain():
    regr = LinearRegression()
Example #37
0
    def from_registry(cls, registry_url, timeout=60, **kwargs):
        """
        Create a database of VO services from VO registry URL.

        This is described in detail in :ref:`vo-sec-validator-build-db`,
        except for the ``validate_xxx`` keys that are added by the
        validator itself.

        Parameters
        ----------
        registry_url : str
            URL of VO registry that returns a VO Table.
            For example, see
            ``astroquery.vo_conesearch.validator.conf.cs_mstr_list``.
            Pedantic is automatically set to `False` for parsing.

        timeout : number
            Temporarily set ``astropy.utils.data.conf.remote_timeout``
            to this value to avoid time out error while reading the
            entire registry.

        kwargs : dict
            Keywords accepted by
            :func:`~astropy.utils.data.get_readable_fileobj`.

        Returns
        -------
        db : `VOSDatabase`
            Database from given registry.

        Raises
        ------
        VOSError
            Invalid VO registry.

        """
        # Download registry as VO table
        with data_conf.set_temp('remote_timeout', timeout):
            with get_readable_fileobj(registry_url, **kwargs) as fd:
                tab_all = parse_single_table(fd, pedantic=False)

        # Registry must have these fields
        compulsory_fields = ['res_title', 'access_url']
        cat_fields = tab_all.array.dtype.names
        for field in compulsory_fields:
            if field not in cat_fields:  # pragma: no cover
                raise VOSError('"{0}" is missing from registry.'.format(field))

        title_counter = defaultdict(int)
        title_fmt = '{0} {1}'
        db = cls.create_empty()

        # Each row in the table becomes a catalog
        for arr in tab_all.array.data:
            cur_cat = {}
            cur_key = ''

            # Process each field and build the catalog.
            # Catalog is completely built before being thrown out
            # because codes need less changes should we decide to
            # allow duplicate URLs in the future.
            for field in cat_fields:

                # For primary key, a number needs to be appended to the title
                # because registry can have multiple entries with the same
                # title but different URLs.
                if field == 'res_title':
                    cur_title = arr['res_title']
                    title_counter[cur_title] += 1  # Starts with 1

                    if isinstance(cur_title, bytes):  # pragma: py3
                        cur_key = title_fmt.format(cur_title.decode('utf-8'),
                                                   title_counter[cur_title])
                    else:  # pragma: py2
                        cur_key = title_fmt.format(cur_title,
                                                   title_counter[cur_title])

                # Special handling of title and access URL,
                # otherwise no change.
                if field == 'access_url':
                    s = unescape_all(arr['access_url'])
                    if isinstance(s, six.binary_type):
                        s = s.decode('utf-8')
                    cur_cat['url'] = s
                elif field == 'res_title':
                    cur_cat['title'] = arr[field]
                else:
                    cur_cat[field] = arr[field]

            # New field to track duplicate access URLs.
            cur_cat['duplicatesIgnored'] = 0

            # Add catalog to database, unless duplicate access URL exists.
            # In that case, the entry is thrown out and the associated
            # counter is updated.
            dup_keys = db._url_keys[cur_cat['url']]
            if len(dup_keys) < 1:
                db.add_catalog(
                    cur_key, VOSCatalog(cur_cat), allow_duplicate_url=False)
            else:
                db._catalogs[dup_keys[0]]['duplicatesIgnored'] += 1
                warnings.warn(
                    '{0} is thrown out because it has same access URL as '
                    '{1}.'.format(cur_key, dup_keys[0]), AstropyUserWarning)

        return db
Example #38
0
def download_filter(
    filter_id: str,
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[str]]:
    """
    Function for downloading filter profile data
    from the SVO Filter Profile Service.

    Parameters
    ----------
    filter_id : str
        Filter name as listed on the website of the SVO
        Filter Profile Service (see
        http://svo2.cab.inta-csic.es/svo/theory/fps/).

    Returns
    -------
    np.ndarray
        Wavelength (um).
    np.ndarray
        Fractional transmission.
    str
        Detector type ('energy' or 'photon').
    """

    if filter_id == "Magellan/VisAO.rp":
        url = "https://xwcl.science/magao/visao/VisAO_rp_filter_curve.dat"
        urllib.request.urlretrieve(url, "VisAO_rp_filter_curve.dat")

        wavelength, transmission, _, _ = np.loadtxt(
            "VisAO_rp_filter_curve.dat", unpack=True)

        # Not sure if energy- or photon-counting detector
        det_type = "photon"

        os.remove("VisAO_rp_filter_curve.dat")

    elif filter_id == "Magellan/VisAO.ip":
        url = "https://xwcl.science/magao/visao/VisAO_ip_filter_curve.dat"
        urllib.request.urlretrieve(url, "VisAO_ip_filter_curve.dat")

        wavelength, transmission, _, _ = np.loadtxt(
            "VisAO_ip_filter_curve.dat", unpack=True)

        # Not sure if energy- or photon-counting detector
        det_type = "photon"

        os.remove("VisAO_ip_filter_curve.dat")

    elif filter_id == "Magellan/VisAO.zp":
        url = "https://xwcl.science/magao/visao/VisAO_zp_filter_curve.dat"
        urllib.request.urlretrieve(url, "VisAO_zp_filter_curve.dat")

        wavelength, transmission, _, _ = np.loadtxt(
            "VisAO_zp_filter_curve.dat", unpack=True)

        # Not sure if energy- or photon-counting detector
        det_type = "photon"

        os.remove("VisAO_zp_filter_curve.dat")

    elif filter_id == "Keck/NIRC2.NB_4.05":
        # The filter profile of Br_alpha has been digitized from
        # https://www2.keck.hawaii.edu/inst/nirc2/filters.html

        url = "https://home.strw.leidenuniv.nl/~stolker/species/Keck_NIRC2.NB_4.05.dat"
        urllib.request.urlretrieve(url, "Keck_NIRC2.NB_4.05.dat")

        wavelength, transmission = np.loadtxt("Keck_NIRC2.NB_4.05.dat",
                                              unpack=True)

        # Not sure if energy- or photon-counting detector
        det_type = "photon"

        os.remove("Keck_NIRC2.NB_4.05.dat")

    elif filter_id in ["LCO/VisAO.Ys", "Magellan/VisAO.Ys"]:
        url = "https://xwcl.science/magao/visao/VisAO_Ys_filter_curve.dat"
        urllib.request.urlretrieve(url, "VisAO_Ys_filter_curve.dat")

        wavelength, transmission, _, _ = np.loadtxt(
            "VisAO_Ys_filter_curve.dat", unpack=True)

        # Remove wavelengths with zero transmission
        wavelength = wavelength[:-7]
        transmission = transmission[:-7]

        # Not sure if energy- or photon-counting detector
        det_type = "photon"

        os.remove("VisAO_Ys_filter_curve.dat")

    elif filter_id == "ALMA/band6":
        url = "https://home.strw.leidenuniv.nl/~stolker/species/alma_band6.dat"
        urllib.request.urlretrieve(url, "alma_band6.dat")

        wavelength, transmission = np.loadtxt("alma_band6.dat", unpack=True)

        det_type = "photon"

        os.remove("alma_band6.dat")

    elif filter_id == "ALMA/band7":
        url = "https://home.strw.leidenuniv.nl/~stolker/species/alma_band7.dat"
        urllib.request.urlretrieve(url, "alma_band7.dat")

        wavelength, transmission = np.loadtxt("alma_band7.dat", unpack=True)

        det_type = "photon"

        os.remove("alma_band7.dat")

    else:
        url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?ID=" + filter_id
        urllib.request.urlretrieve(url, "filter.xml")

        try:
            table = parse_single_table("filter.xml")

            wavelength = table.array["Wavelength"]
            transmission = table.array["Transmission"]

        except IndexError:
            wavelength = None
            transmission = None
            det_type = None

            warnings.warn(f"Filter '{filter_id}' is not available "
                          f"on the SVO Filter ProfileService.")

        except:
            os.remove("filter.xml")

            raise ValueError(
                f"The filter data of '{filter_id}' could not "
                f"be downloaded. Perhaps the website of the "
                f"SVO Filter Profile Service (http://svo2.cab."
                f"inta-csic.es/svo/theory/fps/) is not available?")

        if transmission is not None:
            det_type = table.get_field_by_id("DetectorType").value

            # For backward compatibility
            if not isinstance(det_type, str):
                det_type = det_type.decode("utf-8")

            if int(det_type) == 0:
                det_type = "energy"

            elif int(det_type) == 1:
                det_type = "photon"

            else:
                det_type = "photon"

                warnings.warn(f"Detector type ({det_type}) not "
                              f"recognized. Setting detector "
                              f"type to photon-counting detector.")

            wavelength *= 1e-4  # (um)

        os.remove("filter.xml")

    if wavelength is not None:
        indices = []

        for i in range(transmission.size):
            if i == 0 and transmission[i] == 0.0 and transmission[i +
                                                                  1] == 0.0:
                indices.append(i)

            elif (i == transmission.size - 1 and transmission[i - 1] == 0.0
                  and transmission[i] == 0.0):
                indices.append(i)

            elif (transmission[i - 1] == 0.0 and transmission[i] == 0.0
                  and transmission[i + 1] == 0.0):
                indices.append(i)

        wavelength = np.delete(wavelength, indices)
        transmission = np.delete(transmission, indices)

        if np.amin(transmission) < 0.0:
            warnings.warn(f"The minimum transmission value of {filter_id} is "
                          f"smaller than zero ({np.amin(transmission):.2e}). "
                          f"Wavelengths with negative transmission "
                          f"values will be removed.")

            indices = []

            for i, item in enumerate(transmission):
                if item > 0.0:
                    indices.append(i)

            wavelength = wavelength[indices]
            transmission = transmission[indices]

    return wavelength, transmission, det_type
Example #39
0
# %%
if(source == 'ESA'):
    tables = Gaia.load_tables(only_names=True)
    job = Gaia.launch_job_async("SELECT * FROM gaiadr2.gaia_source \
                                WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec),CIRCLE('ICRS', 56.75, 24.1167, 180))=1 \
                                AND phot_g_mean_mag<11.0 \
                                AND parallax IS NOT NULL \
                                AND pmra IS NOT NULL \
                                AND pmdec IS NOT NULL \
                                AND phot_g_mean_mag IS NOT NULL \
                                AND phot_bp_mean_mag IS NOT NULL \
                                AND phot_rp_mean_mag IS NOT NULL \
                                ;",dump_to_file=True)
    data = job.get_results()
else:
    table = parse_single_table(source+'.vot')        
    data = table.array
# %%
c              = np.asarray(data['bp_rp']);
G              = np.asarray(data['phot_g_mean_mag']);
BP             = np.asarray(data['phot_bp_mean_mag']);
RP             = np.asarray(data['phot_rp_mean_mag']);
ra             = np.asarray(data['ra']);
ra_error       = np.asarray(data['ra_error']);
dec            = np.asarray(data['dec']);
dec_error      = np.asarray(data['dec_error']);
parallax       = np.asarray(data['parallax']);
parallax_error = np.asarray(data['parallax_error']);
pmra           = np.asarray(data['pmra']);
pmra_error     = np.asarray(data['pmra_error']);                  
pmdec          = np.asarray(data['pmdec']);
Example #40
0
def conversionVOTable(archivosdat, flag):
    try:
        os.makedirs(directorioTransformadosVOTable + flag)

        for i in range(len(archivosdat)):

            nombre = archivosdat[i][32:]
            nombreFits = nombre.replace('.dat', '')
            nombreVOTable = nombre.replace('.dat', '')

            fichero_fits = directorioTransformadosFits + flag + "/" + nombreFicheros + nombreFits + '.fits'
            votable_output = directorioTransformadosVOTable + flag + "/" + nombreFicheros + nombreVOTable + '.vot'
            votable_output2 = directorioTransformadosVOTable + flag + "/" + nombreFicheros + nombreVOTable + '_2.vot'

            fichero = nombreFicheros + nombreFits + '.fits'

            t = tb.read(fichero_fits, 2)
            t2 = tb.read(fichero_fits, 3)
            os.remove(fichero_fits)

            votable = from_table(t[0:1])
            votable2 = from_table(t2[0:1])

            writeto(votable, votable_output)
            writeto(votable2, votable_output2)

            tabla2 = parse_single_table(votable_output2)
            os.remove(votable_output2)

            votable = parse(votable_output)
            resource = votable.resources[0]
            resource.description = "Fichero " + fichero + " " + DescripcionVOTable
            resource.tables.append(tabla2)

            param = Param(votable,
                          name="TITLE",
                          datatype="char",
                          arraysize=str(len(fichero)),
                          value=fichero)
            param.description = "nombre del fichero"
            resource.params.append(param)

            param = Param(votable,
                          name="DATE",
                          datatype="char",
                          arraysize=str(len(diaExtraido)),
                          value=diaExtraido)
            param.description = "fecha de la deteccion"
            resource.params.append(param)

            for n in range(len(totalValores)):
                if (totalValores[n].isdigit()
                        or (totalValores[n].startswith('-')
                            and totalValores[n][1:].isdigit())):
                    param = Param(votable,
                                  name=totalCabeceras[n],
                                  datatype="int",
                                  value=totalValores[n])
                    param.description = totalDescripciones[n]
                    resource.params.append(param)
                elif (totalValores[n] == "True" or totalValores[n] == "False"):
                    param = Param(votable,
                                  name=totalCabeceras[n],
                                  datatype="boolean",
                                  value=totalValores[n])
                    param.description = totalDescripciones[n]
                    resource.params.append(param)
                else:
                    try:
                        if (float(totalValores[n])):
                            param = Param(votable,
                                          name=totalCabeceras[n],
                                          datatype="float",
                                          value=totalValores[n])
                            param.description = totalDescripciones[n]
                            resource.params.append(param)
                    except:
                        if (totalCabeceras[n][0:7] == "COMMENT"
                                or totalCabeceras[n][0:7] == "HISTORY"):
                            info = Info(name=totalCabeceras[n][0:7],
                                        value=totalValores[n])
                            resource.infos.append(info)
                        else:
                            param = Param(votable,
                                          name=totalCabeceras[n],
                                          datatype="char",
                                          arraysize=str(len(totalValores[n])),
                                          value=totalValores[n])
                            param.description = totalDescripciones[n]
                            resource.params.append(param)

            votable.to_xml(votable_output)
            stri = '    <FITS extnum="2">\n     <STREAM encoding="gzip" href="' + enlaces[
                i] + '"/>\n    </FITS>\n'
            stri2 = '    <FITS extnum="3">\n     <STREAM encoding="gzip" href="' + enlaces[
                i] + '"/>\n    </FITS>\n'

            f = open(votable_output, "r")
            leido = f.readlines()
            f.close()
            os.remove(votable_output)

            hayIni = 0
            hayFin = 0
            for n in range(len(leido)):
                if (leido[n][4:8] == "DATA"):
                    if (hayIni == 0):
                        Ini = n + 1
                        hayIni = 1
                    else:
                        Ini2 = n + 1
                if (leido[n][5:9] == "DATA"):
                    if (hayFin == 0):
                        Fin = n
                        hayFin = 1
                    else:
                        Fin2 = n

            parte1 = leido[:Ini]
            parte2 = leido[Fin:Ini2]
            parte3 = leido[Fin2:]

            file2 = open(votable_output, "w")

            for p1 in range(len(parte1)):
                if (parte1[p1][3:10] == "TABLE n"):
                    file2.write("  <TABLE nrows=\"" + str(len(t)) + "\">" +
                                "\n")
                else:
                    file2.write(parte1[p1])

            file2.write(stri)

            for p2 in range(len(parte2)):
                if (parte2[p2][3:10] == "TABLE n"):
                    file2.write("  <TABLE nrows=\"" + str(len(t2)) + "\">" +
                                "\n")
                else:
                    file2.write(parte2[p2])

            file2.write(stri2)

            for p3 in range(len(parte3)):
                file2.write(parte3[p3])

            file2.close()
    except:
        flogs.write("LOG: ERROR en la conversion a  VOTable de los " + flag +
                    "\n")
        flogs.close()
        shutil.rmtree(directorio)
        sys.exit(1)
    def from_registry(cls, registry_url, timeout=60, **kwargs):
        """
        Create a database of VO services from VO registry URL.

        This is described in detail in :ref:`vo-sec-validator-build-db`,
        except for the ``validate_xxx`` keys that are added by the
        validator itself.

        Parameters
        ----------
        registry_url : str
            URL of VO registry that returns a VO Table.
            For example, see
            ``astroquery.vo_conesearch.validator.conf.cs_mstr_list``.
            Pedantic is automatically set to `False` for parsing.

        timeout : number
            Temporarily set ``astropy.utils.data.conf.remote_timeout``
            to this value to avoid time out error while reading the
            entire registry.

        kwargs : dict
            Keywords accepted by
            :func:`~astropy.utils.data.get_readable_fileobj`.

        Returns
        -------
        db : `VOSDatabase`
            Database from given registry.

        Raises
        ------
        VOSError
            Invalid VO registry.

        """
        # Download registry as VO table
        with data_conf.set_temp('remote_timeout', timeout):
            with get_readable_fileobj(registry_url, **kwargs) as fd:
                tab_all = parse_single_table(fd, pedantic=False)

        # Registry must have these fields
        compulsory_fields = ['title', 'accessURL']
        cat_fields = tab_all.array.dtype.names
        for field in compulsory_fields:
            if field not in cat_fields:  # pragma: no cover
                raise VOSError('"{0}" is missing from registry.'.format(field))

        title_counter = defaultdict(int)
        title_fmt = '{0} {1}'
        db = cls.create_empty()

        # Each row in the table becomes a catalog
        for arr in tab_all.array.data:
            cur_cat = {}
            cur_key = ''

            # Process each field and build the catalog.
            # Catalog is completely built before being thrown out
            # because codes need less changes should we decide to
            # allow duplicate URLs in the future.
            for field in cat_fields:

                # For primary key, a number needs to be appended to the title
                # because registry can have multiple entries with the same
                # title but different URLs.
                if field == 'title':
                    cur_title = arr['title']
                    title_counter[cur_title] += 1  # Starts with 1

                    if isinstance(cur_title, bytes):  # pragma: py3
                        cur_key = title_fmt.format(cur_title.decode('utf-8'),
                                                   title_counter[cur_title])
                    else:  # pragma: py2
                        cur_key = title_fmt.format(cur_title,
                                                   title_counter[cur_title])

                # Special handling of access URL, otherwise no change.
                if field == 'accessURL':
                    s = unescape_all(arr['accessURL'])
                    if isinstance(s, six.binary_type):
                        s = s.decode('utf-8')
                    cur_cat['url'] = s
                else:
                    cur_cat[field] = arr[field]

            # New field to track duplicate access URLs.
            cur_cat['duplicatesIgnored'] = 0

            # Add catalog to database, unless duplicate access URL exists.
            # In that case, the entry is thrown out and the associated
            # counter is updated.
            dup_keys = db._url_keys[cur_cat['url']]
            if len(dup_keys) < 1:
                db.add_catalog(cur_key,
                               VOSCatalog(cur_cat),
                               allow_duplicate_url=False)
            else:
                db._catalogs[dup_keys[0]]['duplicatesIgnored'] += 1
                warnings.warn(
                    '{0} is thrown out because it has same access URL as '
                    '{1}.'.format(cur_key, dup_keys[0]), AstropyUserWarning)

        return db
Example #42
0
    'Rad': 0.3,
    'OUT': 'vot',
    'DB': "photcat",
    'SHORT': 'long'
}

data = urllib.urlencode(values)
data = data.encode('ascii')  # data should be bytes
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = response.read()

xml = the_page.split("(right-mouse-click and save as to <a href=")[1].split(
    ">download")[0]

table = parse_single_table(xml)

mag = table.array['Mag']
magerr = table.array['Magerr']
mjd = table.array['ObsTime']

plt.figure()
plt.title('Light Curves')
plt.xlabel('Time (MJD)')
plt.ylabel('Magnitude')
plt.errorbar(mjd,
             mag,
             yerr=magerr,
             fmt='o',
             ecolor='r',
             markersize=2,
Example #43
0
def make_pix_models(
    fname,
    ra1="ra",
    dec1="dec",
    ra2="RAJ2000",
    dec2="DEJ2000",
    fitsname=None,
    plots=False,
    smooth=300.0,
    sigcol=None,
    noisecol=None,
    SNR=10,
    latex=False,
    max_sources=None,
):
    """
    Read a fits file which contains the crossmatching results for two catalogues.
    Catalogue 1 is the source catalogue (positions that need to be corrected)
    Catalogue 2 is the reference catalogue (correct positions)
    return rbf models for the ra/dec corrections
    :param fname: filename for the crossmatched catalogue
    :param ra1: column name for the ra degrees in catalogue 1 (source)
    :param dec1: column name for the dec degrees in catalogue 1 (source)
    :param ra2: column name for the ra degrees in catalogue 2 (reference)
    :param dec2: column name for the dec degrees in catalogue 2 (reference)
    :param fitsname: fitsimage upon which the pixel models will be based
    :param plots: True = Make plots
    :param smooth: smoothing radius (in pixels) for the RBF function
    :param max_sources: Maximum number of sources to include in the construction of the warping model (defaults to None, use all sources)
    :return: (dxmodel, dymodel)
    """
    filename, file_extension = os.path.splitext(fname)
    if file_extension == ".fits":
        raw_data = fits.open(fname)[1].data
    elif file_extension == ".vot":
        raw_data = parse_single_table(fname).array

    # get the wcs
    hdr = fits.getheader(fitsname)
    imwcs = wcs.WCS(hdr, naxis=2)

    raw_nsrcs = len(raw_data)

    # filter the data to only include SNR>10 sources
    if sigcol is not None and noisecol is not None:
        flux_mask = np.where(raw_data[sigcol] / raw_data[noisecol] > SNR)
        data = raw_data[flux_mask]
    else:
        data = raw_data

    if max_sources is not None:
        if sigcol is not None:
            # argsort goes in ascending order, so select from the end
            sort_idx = np.squeeze(np.argsort(data[sigcol]))[-max_sources:]
            print(
                "Selecting the {0} brightest of {1} sources...".format(
                    max_sources, raw_nsrcs
                )
            )
        else:
            if max_sources > len(data):
                print("Maximum number of sources larger than number of available. ")
                max_sources = len(data) - 1
            # This really should not be used...
            sort_idx = np.random.choice(
                np.arange(len(data)), size=max_sources, replace=False
            )
            print("Randomly selecting {0} sources...".format(max_sources))

        data = data[sort_idx]

    print(
        "Selected {0} of {1} available sources to construct the pixel offset model".format(
            len(data), raw_nsrcs
        ),
    )

    start = time()

    cat_xy = imwcs.all_world2pix(list(zip(data[ra1], data[dec1])), 1)
    ref_xy = imwcs.all_world2pix(list(zip(data[ra2], data[dec2])), 1)

    diff_xy = ref_xy - cat_xy

    global dxmodel
    dxmodel = interpolate.Rbf(
        cat_xy[:, 0], cat_xy[:, 1], diff_xy[:, 0], function="linear", smooth=smooth
    )
    global dymodel
    dymodel = interpolate.Rbf(
        cat_xy[:, 0], cat_xy[:, 1], diff_xy[:, 1], function="linear", smooth=smooth
    )

    print("Model created in {0} seconds".format(time() - start))

    if plots:
        import matplotlib

        # Super-computer-safe
        matplotlib.use("Agg")
        from matplotlib import pyplot
        from matplotlib import gridspec

        # Perceptually uniform cyclic color schemes
        try:
            import seaborn as sns

            cmap = matplotlib.colors.ListedColormap(sns.color_palette("husl", 256))
        except ImportError:
            print("seaborne not detected; using hsv color scheme")
            cmap = "hsv"
        # Attractive serif fonts
        if latex is True:
            if which("latex"):
                try:
                    from matplotlib import rc

                    rc("text", usetex=True)
                    rc("font", **{"family": "serif", "serif": ["serif"]})
                except:
                    print("rc not detected; using sans serif fonts")
            else:
                print("latex not detected; using sans serif fonts")
        xmin, xmax = 0, hdr["NAXIS1"]
        ymin, ymax = 0, hdr["NAXIS2"]

        gx, gy = np.mgrid[
            xmin : xmax : (xmax - xmin) / 50.0, ymin : ymax : (ymax - ymin) / 50.0
        ]
        mdx = dxmodel(np.ravel(gx), np.ravel(gy))
        mdy = dymodel(np.ravel(gx), np.ravel(gy))
        x = cat_xy[:, 0]
        y = cat_xy[:, 1]

        # plot w.r.t. centre of image, in degrees
        try:
            delX = abs(hdr["CD1_1"])
        except:
            delX = abs(hdr["CDELT1"])
        try:
            delY = hdr["CD2_2"]
        except:
            delY = hdr["CDELT2"]
        # shift all co-ordinates and put them in degrees
        x -= hdr["NAXIS1"] / 2
        gx -= hdr["NAXIS1"] / 2
        xmin -= hdr["NAXIS1"] / 2
        xmax -= hdr["NAXIS1"] / 2
        x *= delX
        gx *= delX
        xmin *= delX
        xmax *= delX
        y -= hdr["NAXIS2"] / 2
        gy -= hdr["NAXIS2"] / 2
        ymin -= hdr["NAXIS2"] / 2
        ymax -= hdr["NAXIS2"] / 2
        y *= delY
        gy *= delY
        ymin *= delY
        ymax *= delY
        scale = 1

        dx = diff_xy[:, 0]
        dy = diff_xy[:, 1]

        fig = pyplot.figure(figsize=(12, 6))
        gs = gridspec.GridSpec(100, 100)
        gs.update(hspace=0, wspace=0)
        kwargs = {
            "angles": "xy",
            "scale_units": "xy",
            "scale": scale,
            "cmap": cmap,
            "clim": [-180, 180],
        }
        angles = np.degrees(np.arctan2(dy, dx))
        ax = fig.add_subplot(gs[0:100, 0:48])
        cax = ax.quiver(x, y, dx, dy, angles, **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.set_ylabel("Distance from pointing centre / degrees")
        ax.set_title("Source position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='horizontal')

        ax = fig.add_subplot(gs[0:100, 49:97])
        cax = ax.quiver(gx, gy, mdx, mdy, np.degrees(np.arctan2(mdy, mdx)), **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.tick_params(axis="y", labelleft="off")
        ax.set_title("Model position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='vertical')
        # Color bar
        ax2 = fig.add_subplot(gs[0:100, 98:100])
        cbar3 = pyplot.colorbar(cax, cax=ax2, use_gridspec=True)
        cbar3.set_label("Angle CCW from West / degrees")  # ,labelpad=-75)
        cbar3.ax.yaxis.set_ticks_position("right")

        outname = os.path.splitext(fname)[0] + ".png"
        #        pyplot.show()
        pyplot.savefig(outname, dpi=200)
Example #44
0
    def _parse_result(self, response, verbose=False):
        """
        Parse the result of a `~requests.Response` (from API) or `pyvo.dal.tap.TAPResults` (from TAP) object
        and return an `~astropy.table.Table`

        Parameters
        ----------
        response : `~requests.Response` or `pyvo.dal.tap.TAPResults`
            The response from the server.
        verbose : bool
            Currently has no effect.

        Returns
        -------
        data : `~astropy.table.Table` or `~astropy.table.QTable`
        """

        if isinstance(response, pyvo.dal.tap.TAPResults):
            data = response.to_table()
            # TODO: implement format conversion for TAP return
        else:
            # Extract the decoded body of the response
            text = response.text

            # Raise an exception if anything went wrong
            self._handle_error(text)

            # Parse the requested format to figure out how to parse the returned data.
            fmt = response.requested_format.lower()
            if "ascii" in fmt or "ipac" in fmt:
                data = ascii.read(text,
                                  format="ipac",
                                  fast_reader=False,
                                  converters=CONVERTERS)
            elif "csv" in fmt:
                data = ascii.read(text,
                                  format="csv",
                                  fast_reader=False,
                                  converters=CONVERTERS)
            elif "bar" in fmt or "pipe" in fmt:
                data = ascii.read(text,
                                  fast_reader=False,
                                  delimiter="|",
                                  converters=CONVERTERS)
            elif "xml" in fmt or "table" in fmt:
                data = parse_single_table(io.BytesIO(
                    response.content)).to_table()
            else:
                data = ascii.read(text,
                                  fast_reader=False,
                                  converters=CONVERTERS)

        # Fix any undefined units
        data = self._fix_units(data)

        # For backwards compatibility, add a `sky_coord` column with the coordinates of the object
        # if possible
        if "ra" in data.columns and "dec" in data.columns:
            data["sky_coord"] = SkyCoord(ra=data["ra"],
                                         dec=data["dec"],
                                         unit=u.deg)

        if not data:
            warnings.warn("Query returned no results.", NoResultsWarning)

        return data
Example #45
0
def V(B_V,RP):
    a = 0.1245
    b = 1.0147
    c = 0.1329
    d = -0.0044
    V = RP + a + b*(B_V) + c*(B_V)**2 + d*(B_V)**3
    return V

# ---------------------------------------------------------------------
# ---------------------------------------------------------------------



# Se lee la tabla con el nombre del archivo
data = parse_single_table("alfaPersei-result.vot").array
 
G = data['phot_g_mean_mag']    # Magnitud absoluta de Gaia
bp_rp = data['bp_rp']          # Indice de color de Gaia
temp = data['teff_val']        # Temperatura efectiva
rp = data['phot_rp_mean_mag']  # G_RP: Indice centrado en 797 nm 
d = 1000/data['parallax']      # Distancia en parsec (d=1/paralaje)
E = 0.09                       # Valor del reddening (tomado de WEBDA)


# Se crean ARRYAS vacíos con el fin de llenarlos de los datos de los índices
# de Johnson-Cousins
b_v = np.zeros(len(G))  # Acá se pondrá el índice de color       (B-V)
v   = np.zeros(len(G))  # Acá se pondrá el filtro en el visible  (V)
Mv  = np.zeros(len(G))  # Acá se pondrá la Magnitud absoluta     (Mv)
Example #46
0
 def table(self):
     if self.__table is None:
         self.__table = votable.parse_single_table(BytesIO(self.data.encode('utf8')), pedantic=False).to_table()
     return self.__table
Example #47
0
def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False):
    """
    download an SVO filter file and then add it to the user library
    :param observatory:
    :param instrument:
    :param ffilter:
    :return:
    """

    # make a directory for this observatory and instrument

    filter_path = os.path.join(get_speclite_filter_path(), to_valid_python_name(observatory))

    if_directory_not_existing_then_make(filter_path)


    # grab the filter file from SVO

    # reconvert 2MASS so we can grab it



    if observatory == 'TwoMASS':
        observatory = '2MASS'



    if not file_existing_and_readable(os.path.join(filter_path,
                                                   "%s-%s.ecsv"%(to_valid_python_name(instrument),
                                                                             to_valid_python_name(ffilter)))) or update:

        url_response = urllib2.urlopen(
            'http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB' % (observatory,
                                                                                           instrument,
                                                                                           ffilter))
        # now parse it
        data = votable.parse_single_table(url_response).to_table()

        # save the waveunit

        waveunit = data['Wavelength'].unit


        # the filter files are masked arrays, which do not go to zero on
        # the boundaries. This confuses speclite and will throw an error.
        # so we add a zero on the boundaries

        if data['Transmission'][0] != 0.:

            w1 = data['Wavelength'][0] * .9
            data.insert_row(0, [w1, 0])

        if data['Transmission'][-1] != 0.:

            w2 = data['Wavelength'][-1]* 1.1
            data.add_row([w2, 0])


        # filter any negative values

        idx = data['Transmission'] < 0
        data['Transmission'][idx] = 0





        # build the transmission. # we will force all the wavelengths
        # to Angstroms because sometimes AA is misunderstood

        try:

            transmission = spec_filter.FilterResponse(
                wavelength=data['Wavelength'] * waveunit.to('Angstrom') * u.Angstrom,
                response=data['Transmission'],
                meta=dict(group_name=to_valid_python_name(instrument),
                          band_name=to_valid_python_name(ffilter)))


            # save the filter

            transmission.save(filter_path)

            success = True

        except(ValueError):

            success = False

            print('%s:%s:%s has an invalid wave table, SKIPPING'% (observatory, instrument, ffilter))

        return success

    else:

        return True
Example #48
0
    def __init__(self,
                 file,
                 wave_units=None,
                 flux_units=None,
                 ext=0,
                 survey=None,
                 name=None,
                 **kwargs):
        """Create a spectrum from an ASCII or FITS file

        Parameters
        ----------
        file: str
            The path to the ascii or FITS file
        wave_units: astropy.units.quantity.Quantity
            The wavelength units
        flux_units: astropy.units.quantity.Quantity
            The flux units
        ext: int, str
            The FITS extension name or index
        survey: str (optional)
            The name of the survey
        """
        # Read the fits data...
        if file.endswith('.fits'):

            if file.endswith('.fits'):
                data = u.spectrum_from_fits(file, ext=ext)

            elif survey == 'SDSS':
                head = fits.getheader(file)
                flux_units = 1E-17 * q.erg / q.s / q.cm**2 / q.AA
                wave_units = q.AA
                log_w = head['COEFF0'] + head['COEFF1'] * np.arange(
                    len(raw.flux))
                data = [10**log_w, raw.flux, raw.ivar]

            # Check if it is a recarray
            elif isinstance(raw, fits.fitsrec.FITS_rec):

                # Check if it's an SDSS spectrum
                raw = fits.getdata(file, ext=ext)
                data = raw['WAVELENGTH'], raw['FLUX'], raw['ERROR']

            # Otherwise just an array
            else:
                print("Sorry, I cannot read the file at", file)

        # ...or the ascii data...
        elif file.endswith('.txt'):
            data = np.genfromtxt(file, unpack=True)

        # ...or the VO Table
        elif file.endswith('.xml'):
            vot = vo.parse_single_table(file)
            data = np.array([list(i) for i in vot.array]).T

        else:
            raise IOError('The file needs to be ASCII, XML, or FITS.')

        # Apply units
        wave = data[0] * wave_units
        flux = data[1] * flux_units
        if len(data) > 2:
            unc = data[2] * flux_units
        else:
            unc = None

        if name is None:
            name = file

        super().__init__(wave, flux, unc, name=name, **kwargs)
Example #49
0
def sed(epic='211800191',ra='132.884782',dec='17.319834'):
	os.chdir('outputs/')

	urllib.urlretrieve('http://vizier.u-strasbg.fr/viz-bin/sed?-c='+ra+"%2C"+dec+'&-c.rs=1', epic+'_sed.vot')
	print 'http://vizier.u-strasbg.fr/viz-bin/sed?-c='+ra+"%2C"+dec+'&-c.rs=0.005'

	tb = votable.parse_single_table(epic+'_sed.vot')
	data = tb.array
	wav_all = 3e5 * 1e4 / data['_sed_freq'].data #angstrom
	f_all = data['_sed_flux'].data
	unc_all = data['_sed_eflux'].data
	filters = data['_sed_filter'].data

	filter_dict = {'2MASS:Ks':'2MASS Ks','2MASS:J':'2MASS J','2MASS:H':'2MASS H','WISE:W1':'WISE-1','WISE:W2':'WISE-2','SDSS:u':'SDSS u','SDSS:g':'SDSS g',\
	'SDSS:r':'SDSS r','SDSS:i':'SDSS i','SDSS:z':'SDSS z'}

	c = coord.SkyCoord(float(ra)*u.deg,float(dec)*u.deg,frame='icrs')
	tb = IrsaDust.get_extinction_table(c)
	filters2 = tb['Filter_name']
	allA = tb['A_SandF']
	A = []
	f_ob_orig = []
	wav_ob = []
	unc = []

	for f in filters:
		if f in filter_dict.keys():
			filtmatch = filter_dict[f]
			ind = where(filters2==filtmatch)[0]
			A.append(mean(allA[ind]))
			ind = where(filters==f)[0]
			f_ob_orig.append(mean(f_all[ind]))
			wav_ob.append(mean(wav_all[ind]))
			unc.append(mean(unc_all[ind]))

	f_ob_orig = array(f_ob_orig)
	A = array(A)
	wav_ob = array(wav_ob)
	unc = array(unc)
	f_ob = (f_ob_orig*10**(A/2.5))

	metallicity = ['ckp00']
	m = [0.0]
	t = arange(3500,13000,250)
	t2 = arange(14000,50000,1000)
	t = concatenate((t,t2),axis=1)

	log_g = ['g20','g25','g30','g35','g40','g45','g50']
	g = arange(2.,5.,0.5)

	best_m =0
	best_t =0
	best_g =0
	best_off =0.0
	chi2_rec = 1e6
	os.chdir('..')

	for im, mval in enumerate(m):
		for it, tval in enumerate(t):
			for ig, gval in enumerate(g):
				#load model
				hdulist = pyfits.open('fits/'+metallicity[im]+'/'+metallicity[im]+'_'+str(tval)+'.fits')
				data = hdulist[1].data
				wmod = data['WAVELENGTH']
				fmod = data[log_g[ig]]*3.34e4*wmod**2

				#fit observations
				f_int = exp( interp(log(wav_ob), log(wmod), log(fmod)) )
				offsets = linspace(log(min(f_ob/f_int)), log(max(f_ob/f_int)), 51)
				for i_off, offset in enumerate(offsets):
					chi2 = sum((f_int*exp(offset)-f_ob)**2)

					print 'chi2=', chi2, mval, tval, gval
					if chi2 < chi2_rec:
						chi2_rec = chi2
						best_m = im
						best_g = ig
						best_t = it
						best_off = offset

	print 'best fit: m=', m[best_m], 'T=', t[best_t], 'log g=', g[best_g]

	hdulist = pyfits.open('fits/'+metallicity[best_m]+'/'+metallicity[best_m]+'_'+str(t[best_t])+'.fits')
	data = hdulist[1].data
	wmod = data['WAVELENGTH']
	fmod = data[log_g[best_g]]*3.34e4*wmod**2
	fmod *= exp(best_off)

	plt.close('all')
	fig = plt.figure(figsize=(8,5))
	plt.plot(wmod/1e4, fmod,label='Castelli & Kurucz model')
	plt.xscale('log')
	plt.plot(wav_ob/1e4, f_ob_orig, lw=0, marker='s', label='Uncorrected',ms=10)
	plt.plot(wav_ob/1e4, f_ob, lw=0, marker='o', label='Corrected for extinction',ms=10)
	plt.xlabel(r'${\rm Wavelength} \ (\mu m)}$',fontsize=18)
	plt.xlim(0.1,max(wmod)/1e4)
	plt.ylabel(r'$F_{\nu} \ {\rm (Jy)}$',fontsize=18)
	plt.legend()
	plt.savefig('outputs/'+epic+'_sed.pdf',dpi=150)
	return t[best_t]
Example #50
0
    def __init__(self,
                 band,
                 filter_directory=pkg_resources.resource_filename(
                     'svo_filters', 'data/filters/'),
                 wl_units=q.um,
                 zp_units=q.erg / q.s / q.cm**2 / q.AA,
                 DELETE=False,
                 guess=False,
                 **kwargs):
        """
        Loads the bandpass data into the Filter object
        
        Parameters
        ----------
        band: str
            The bandpass filename (e.g. 2MASS.J)
        filter_directory: str
            The directory containing the filter files
        wl_units: str, astropy.units.core.PrefixUnit  (optional)
            The wavelength units
        zp_units: str, astropy.units.core.PrefixUnit  (optional)
            The zeropoint flux units
        DELETE: bool
            Delete the given filter
        guess: bool
            Guess the filter if no match
        """
        self.filterID = band

        # If the filter_directory is an array, load it as the filter
        if isinstance(filter_directory, (list, np.ndarray)):

            self.filter_from_array(filter_directory)

        else:

            # Check if TopHat
            if band.lower().replace('-', '').replace(' ', '') == 'tophat':

                # check kwargs for limits
                wl_min = kwargs.get('wl_min')
                wl_max = kwargs.get('wl_max')
                filepath = ''

                if not wl_min and not wl_max:
                    print(
                        "Please provide **{'wl_min','wl_max'} to create top hat filter."
                    )
                    return
                else:
                    # Load the filter
                    self.load_TopHat(wl_min, wl_max,
                                     kwargs.get('n_pixels', 100))

            else:

                # Get list of filters
                files = glob(os.path.join(filter_directory, '*'))
                bands = [os.path.basename(b) for b in files]
                filepath = os.path.join(filter_directory, band)

                if band + '.txt' in bands:

                    filepath = filepath + '.txt'
                    data = np.genfromtxt(filepath, unpack=True)
                    self.filter_from_array(data)

                # If the filter is missing, ask what to do
                elif filepath not in files:

                    if guess:

                        # Guess the band they meant to use, e.g. 'SDSS_z' => 'SDSS.z'
                        band = min(bands,
                                   key=lambda v: len(set(band) ^ set(v)))
                        filepath = os.path.join(filter_directory, band)

                    else:

                        print('Current filters:', ', '.join(bands), '\n')

                        print('No filters match', filepath)
                        dl = input('Would you like me to download it? [y/n] ')

                        if dl.lower() == 'y':

                            # Prompt for new filter
                            print('\nA full list of available filters from the\n'\
                                  'SVO Filter Profile Service can be found at\n'\
                                  'http://svo2.cab.inta-csic.es/theory/fps3/\n')
                            band = input(
                                'Enter the band name to retrieve (e.g. 2MASS/2MASS.J): '
                            )

                            # Download the XML (VOTable) file
                            baseURL = 'http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?ID='
                            filepath = filter_directory + os.path.basename(
                                band)
                            _ = urllib.request.urlretrieve(
                                baseURL + band, filepath)

                            # Print the new filepath
                            print('Band stored as', filepath)

                            # Update the table of filters
                            print('Indexing the master table of filters...')
                            filters(update=True)

                        else:
                            return

                # Try to read filter info
                else:
                    try:

                        # Parse the XML file
                        vot = vo.parse_single_table(filepath)
                        self.rsr = np.array([list(i) for i in vot.array]).T

                        # Parse the filter metadata
                        for p in [str(p).split() for p in vot.params]:

                            # Extract the key/value pairs
                            key = p[1].split('"')[1]
                            val = p[-1].split('"')[1]

                            # Do some formatting
                            if p[2].split('"')[1]=='float'\
                            or p[3].split('"')[1]=='float':
                                val = float(val)

                            else:
                                val = val.replace('b&apos;','')\
                                         .replace('&apos','')\
                                         .replace('&amp;','&')\
                                         .strip(';')

                            # Set the attribute
                            if key != 'Description':
                                setattr(self, key, val)

                        # Create some attributes
                        self.path = filepath
                        self.pixels_per_bin = self.rsr.shape[-1]
                        self.n_pixels = self.rsr.shape[-1]
                        self.n_bins = 1
                        self.raw = self.rsr.copy()
                        self.wl_min = self.WavelengthMin
                        self.wl_max = self.WavelengthMax

                    # If empty, delete XML file
                    except IOError:

                        print('No filter named', band)
                        # if os.path.isfile(filepath):
                        #     os.remove(filepath)

                        return

        # Get the bin centers
        w_cen = np.nanmean(self.rsr[0])
        f_cen = np.nanmean(self.rsr[1])
        self.centers = np.asarray([[w_cen], [f_cen]])

        # Set the wavelength units
        if wl_units:
            self.set_wl_units(wl_units)

        # Set zeropoint flux units
        if zp_units:
            self.set_zp_units(zp_units)

        # Get references
        try:
            self.refs = [self.CalibrationReference.split('=')[-1]]
        except:
            self.refs = []

        # Bin
        if kwargs:
            bwargs = {k:v for k,v in kwargs.items() if k in \
                      inspect.signature(self.bin).parameters.keys()}
            self.bin(**bwargs)
Example #51
0
def sed_from_vizier(vizier_fn,
                    from_file=False,
                    radius=2.0,
                    refine=False,
                    variable=0.0):
    """
    Generate spectral energy distribution file in the format expected 
    for an observed SED from a Vizier generated SED file. Output file
    includes wavelength in microns, flux in janskys, uncertainty in 
    janskys, and a string giving the source of the photometry.
    
    Parameters
    ----------
    vizier_fn: string
        name of object
        alternatively, if from_file is true, this is the
            filename of the votable.
    radius: float
        position matching in arseconds.
    from_file: boolean
        set to True if using previously generated votable
    refine: boolean
        Set to True to get rid of duplicates, assign missing 
        uncertainties, sorting 
    variable: float
        Minimum percentage value allowed for uncertainty
        e.g. 0.1 would require 10% uncertainties on all measurements.
     
    Returns
    -------
    table: astropy.Table
        VO table returned by the Vizier service.
         
    """

    from astroquery.vizier import Vizier
    import astropy.units as u
    from StringIO import StringIO as BytesIO
    from httplib import HTTPConnection

    from astropy.table import Table

    from astropy.coordinates import SkyCoord
    import numpy as np

    if from_file:
        from astropy.io.votable import parse_single_table
        table = parse_single_table(vizier_fn)

        sed_freq = table.array['sed_freq']  # frequency in Ghz
        sed_flux = table.array['sed_flux']  # flux in Jy
        sed_eflux = table.array['sed_eflux']  # flux error in Jy
        sed_filter = table.array['sed_filter']  # filter for photometry
    else:

        try:
            coords = SkyCoord.from_name(vizier_fn)
        except:
            print 'Object name was not resolved by Simbad. Play again.'
            return False
        pos = np.fromstring(coords.to_string(), dtype=float, sep=' ')
        ra, dec = pos
        target = "{0:f},{1:f}".format(ra, dec)

        # Queue Vizier directly without having to use the web interface
        # Specify the columns:
        #v = Vizier(columns=['_RAJ2000', '_DEJ2000','_sed_freq', '_sed_flux', '_sed_eflux','_sed_filter'])
        #result = v.query_region(vizier_fn, radius=2.0*u.arcsec)
        #print result

        url = "http:///viz-bin/sed?-c={target:s}&-c.rs={radius:f}"
        host = "vizier.u-strasbg.fr"
        port = 80
        path = "/viz-bin/sed?-c={target:s}&-c.rs={radius:f}".format(
            target=target, radius=radius)
        connection = HTTPConnection(host, port)
        connection.request("GET", path)
        response = connection.getresponse()

        table = Table.read(BytesIO(response.read()), format="votable")

        sed_freq = table['sed_freq'].quantity.value
        sed_flux = table['sed_flux'].quantity.value
        sed_eflux = table['sed_eflux'].quantity.value
        sz = sed_flux.shape[0]
        filters = []
        for i in np.arange(sz):
            filters.append(table['sed_filter'][i])
        sed_filter = np.asarray(filters)

    wavelength = 2.99e14 / (sed_freq * 1.0e9)  # wavelength in microns
    flux = sed_flux  # flux in Jy
    uncertainty = sed_eflux  # uncertainty in Jy
    source = sed_filter  # string of source of photometry

    sz = np.shape(wavelength)[0]

    if refine:

        # if uncertainty values don't exist, generate
        # new uncertainty for 3*flux.
        for j in np.arange(sz):
            if np.isnan(uncertainty[j]):
                uncertainty[j] = flux[j] * 3

        # Remove duplicate entries based on filter names.
        # Choose entry with smallest error bar for duplicates.
        filter_set = list(set(source))
        wls = []
        jys = []
        ejys = []
        for filt in filter_set:
            inds = np.where(source == filt)
            filt_eflux = uncertainty[inds]
            picinds = np.where(filt_eflux == np.min(filt_eflux))
            wls.append(wavelength[inds][picinds][0])
            jys.append(flux[inds][picinds][0])
            ejys.append(uncertainty[inds][picinds][0])

        # Sort by increasing wavelength
        sortind = np.argsort(np.asarray(wls))
        wavelength = np.asarray(wls)[sortind]
        flux = np.asarray(jys)[sortind]
        uncertainty = np.asarray(ejys)[sortind]
        source = np.asarray(filter_set)[sortind]

        sz = np.shape(wavelength)[0]

    # Update uncertainties for value of variable
    for j in np.arange(sz):
        if uncertainty[j] < flux[j] * variable:
            uncertainty[j] = flux[j] * variable

    txtfn = 'observed_sed' + vizier_fn.replace(" ", "") + '.txt'
    f = open(txtfn, 'w')
    f.write('#  Wavelength (microns)' + "\t" + 'Flux (Jy)' + "\t" +
            'Uncertainty (Jy)' + "\t" + 'Source' + "\n")

    for j in np.arange(sz):

        f.write(
            str(wavelength[j]) + "\t" + str(flux[j]) + "\t" +
            str(uncertainty[j]) + "\t" + str(source[j]) + "\n")

    f.close()

    return table
Example #52
0
def make_pix_models(fname,
                    ra1='ra',
                    dec1='dec',
                    ra2='RAJ2000',
                    dec2='DEJ2000',
                    fitsname=None,
                    plots=False,
                    smooth=300.,
                    sigcol=None,
                    noisecol=None,
                    SNR=10):
    """
    Read a fits file which contains the crossmatching results for two catalogues.
    Catalogue 1 is the source catalogue (positions that need to be corrected)
    Catalogue 2 is the reference catalogue (correct positions)
    return rbf models for the ra/dec corrections
    :param fname: filename for the crossmatched catalogue
    :param ra1: column name for the ra degrees in catalogue 1 (source)
    :param dec1: column name for the dec degrees in catalogue 1 (source)
    :param ra2: column name for the ra degrees in catalogue 2 (reference)
    :param dec2: column name for the dec degrees in catalogue 2 (reference)
    :param fitsname: fitsimage upon which the pixel models will be based
    :param plots: True = Make plots
    :param smooth: smoothing radius (in pixels) for the RBF function
    :return: (dxmodel, dymodel)
    """
    filename, file_extension = os.path.splitext(fname)
    if file_extension == ".fits":
        raw_data = fits.open(fname)[1].data
    elif file_extension == ".vot":
        raw_data = parse_single_table(fname).array

    # get the wcs
    hdr = fits.getheader(fitsname)
    imwcs = wcs.WCS(hdr, naxis=2)

    # filter the data to only include SNR>10 sources
    if sigcol is not None and noisecol is not None:
        flux_mask = np.where(raw_data[sigcol] / raw_data[noisecol] > SNR)
        data = raw_data[flux_mask]
    else:
        data = raw_data

    cat_xy = imwcs.all_world2pix(zip(data[ra1], data[dec1]), 1)
    ref_xy = imwcs.all_world2pix(zip(data[ra2], data[dec2]), 1)

    diff_xy = ref_xy - cat_xy

    dxmodel = interpolate.Rbf(cat_xy[:, 0],
                              cat_xy[:, 1],
                              diff_xy[:, 0],
                              function='linear',
                              smooth=smooth)
    dymodel = interpolate.Rbf(cat_xy[:, 0],
                              cat_xy[:, 1],
                              diff_xy[:, 1],
                              function='linear',
                              smooth=smooth)

    if plots:
        import matplotlib
        # Super-computer-safe
        matplotlib.use('Agg')
        from matplotlib import pyplot
        from matplotlib import gridspec
        # Perceptually uniform cyclic color schemes
        try:
            import seaborn as sns
            cmap = matplotlib.colors.ListedColormap(
                sns.color_palette("husl", 256))
        except ImportError:
            print("seaborne not detected; using hsv color scheme")
            cmap = 'hsv'
# Attractive serif fonts
        if which("latex"):
            try:
                from matplotlib import rc
                rc('text', usetex=True)
                rc('font', **{'family': 'serif', 'serif': ['serif']})
            except:
                print("rc not detected; using sans serif fonts")
        else:
            print("latex not detected; using sans serif fonts")
        xmin, xmax = 0, hdr['NAXIS1']
        ymin, ymax = 0, hdr['NAXIS2']

        gx, gy = np.mgrid[xmin:xmax:(xmax - xmin) / 50.,
                          ymin:ymax:(ymax - ymin) / 50.]
        mdx = dxmodel(np.ravel(gx), np.ravel(gy))
        mdy = dymodel(np.ravel(gx), np.ravel(gy))
        x = cat_xy[:, 0]
        y = cat_xy[:, 1]

        # plot w.r.t. centre of image, in degrees
        try:
            delX = abs(hdr['CD1_1'])
        except:
            delX = abs(hdr['CDELT1'])
        try:
            delY = hdr['CD2_2']
        except:
            delY = hdr['CDELT2']


# shift all co-ordinates and put them in degrees
        x -= hdr['NAXIS1'] / 2
        gx -= hdr['NAXIS1'] / 2
        xmin -= hdr['NAXIS1'] / 2
        xmax -= hdr['NAXIS1'] / 2
        x *= delX
        gx *= delX
        xmin *= delX
        xmax *= delX
        y -= hdr['NAXIS2'] / 2
        gy -= hdr['NAXIS2'] / 2
        ymin -= hdr['NAXIS2'] / 2
        ymax -= hdr['NAXIS2'] / 2
        y *= delY
        gy *= delY
        ymin *= delY
        ymax *= delY
        scale = 1

        dx = diff_xy[:, 0]
        dy = diff_xy[:, 1]

        fig = pyplot.figure(figsize=(12, 6))
        gs = gridspec.GridSpec(100, 100)
        gs.update(hspace=0, wspace=0)
        kwargs = {
            'angles': 'xy',
            'scale_units': 'xy',
            'scale': scale,
            'cmap': cmap,
            'clim': [-180, 180]
        }
        angles = np.degrees(np.arctan2(dy, dx))
        ax = fig.add_subplot(gs[0:100, 0:48])
        cax = ax.quiver(x, y, dx, dy, angles, **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.set_ylabel("Distance from pointing centre / degrees")
        ax.set_title("Source position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='horizontal')

        ax = fig.add_subplot(gs[0:100, 49:97])
        cax = ax.quiver(gx, gy, mdx, mdy, np.degrees(np.arctan2(mdy, mdx)),
                        **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.tick_params(axis='y', labelleft='off')
        ax.set_title("Model position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='vertical')
        # Color bar
        ax2 = fig.add_subplot(gs[0:100, 98:100])
        cbar3 = pyplot.colorbar(cax, cax=ax2, use_gridspec=True)
        cbar3.set_label('Angle CCW from West / degrees')  #,labelpad=-75)
        cbar3.ax.yaxis.set_ticks_position('right')

        outname = os.path.splitext(fname)[0] + '.png'
        #        pyplot.show()
        pyplot.savefig(outname, dpi=200)

    return dxmodel, dymodel
Example #53
0
    MG = 5 + 5 * np.log10(best['parallax'] / 1000) + best['phot_g_mean_mag']
    bprp = best['bp_rp']

    gaia_id = np.int(gaia_id)
    G = np.float(best['phot_g_mean_mag'])
    MG = np.float(MG)
    bprp = np.float(bprp)

    # Coordinates for plotting
    radecs = np.vstack([c2000.ra, c2000.dec]).T
    coords = tpf.wcs.all_world2pix(radecs, 0.5)
    sizes = 128.0 / 2**((g_all - best['phot_g_mean_mag']))

# Reference sample

table = parse_single_table("SampleC.vot")
data = table.array

s_MG = 5 + 5 * np.log10(
    table.array['parallax'] / 1000) + table.array['phot_g_mean_mag']
s_bprp = table.array['bp_rp']

################################

#######  2-MINUTE DATA  ########

slow_lc = tul.LCdata(TIC)

slow_lc.read_data(infile)
BJD_or = slow_lc.bjd
flux_or = slow_lc.flux
            else:
                os.system('stilts tpipe in='+MRCvot+' cmd=\'select NULL_MFLAG\' cmd=\'addcol PA "0.0"\' cmd=\'addcol S_'+freq_str+' "S408*pow(('+str(freq)+'/408000000.0),-0.85)"\' out='+image+'temp1.vot')
                os.system('stilts tpipe in='+vot+' cmd=\'select local_rms<1.0\' out='+image+'temp2.vot')
                os.system('stilts tmatch2 matcher=skyellipse params=30 in1='+image+'temp1.vot in2='+image+'temp2.vot out='+image+'temp.vot values1="_RAJ2000 _DEJ2000 e_RA2000 e_DE2000 PA" values2="ra dec a b pa" ofmt=votable')
            # Exclude extended sources
# weight is currently S/N
                os.system('stilts tpipe in='+image+'temp.vot cmd=\'select ((int_flux/peak_flux)<2)\' cmd=\'addcol logratio "(ln(S_'+freq_str+'/int_flux))"\' cmd=\'addcol weight "(int_flux/local_rms)"\' cmd=\'addcol delRA "(_RAJ2000-ra)"\' cmd=\'addcol delDec "(_DEJ2000-dec)"\' omode=out ofmt=vot out='+image+'temp3.vot')
                os.system('stilts tpipe in='+image+'temp3.vot cmd=\'select abs(delRA)<1.0\' out='+matchvot)
                os.remove(image+'temp.vot')
                os.remove(image+'temp1.vot')
                os.remove(image+'temp2.vot')
                os.remove(image+'temp3.vot')

# Check the matched table actually has entries
    Imatchvot=Ifits.replace(".fits","_MRC.vot")
    t = parse_single_table(Imatchvot)
    if t.array.shape[0]>0:
    # Now calculate the correction factors for the I, XX and YY snapshots
        for pb in Ifits,Xpb,Ypb:
            image = pb.replace("_pb.fits",".fits")
            matchvot = pb.replace(".fits","_MRC.vot")
            t = parse_single_table(matchvot)
            ratio=np.exp(np.average(a=t.array['logratio'],weights=(t.array['weight']))) #*(distfunc)))
            stdev=np.std(a=t.array['logratio'])
            print "Ratio of "+str(ratio)+" between "+image+" and MRC."
            print "stdev= "+str(stdev)

            if corr:
                # Write new NON-primary-beam-corrected fits files
                hdu_in = fits.open(image)
            # Modify to fix flux scaling
Example #55
0
def append_data(data, template_path, output_path):
    old_table = parse_single_table(template_path).to_table()
    new_table = vstack([template_path, data])
    votable = from_table(new_table)

    writeto(votable, output_path)
Example #56
0
            output = catfile.replace('_comp.vot', '_ddmod.vot')
    if options.fitsimage:
        fitsimage = options.fitsimage
    else:
        fitsimage = catfile.replace('_comp.vot', '.fits')
        if not os.path.exists(fitsimage):
            tempvar = catfile.split("_")
            fitsimage = tempvar[0] + "_" + tempvar[1] + ".fits"
        if options.outfits:
            outfits = options.outfits
        else:
            outfits = fitsimage.replace('.fits', '_mod.fits')

# Read the VO table and start processing

table = parse_single_table(catfile)
data = table.array

psf_table = parse_single_table(psfcat)
psf_data = psf_table.array
psf_ratio = np.average(
    a=(psf_data['int_flux'] / psf_data['peak_flux']),
    weights=((psf_data['peak_flux'] / psf_data['local_rms']) *
             (psf_data['peak_flux'] / psf_data['residual_std'])))
#    print "Average psf_ratio = "+str(psf_ratio)
#    psf_ratio=wgmean(a=(psf_data['int_flux']/psf_data['peak_flux']),weights=(np.power(psf_data['peak_flux']/psf_data['local_rms'],2)))
#    print "Weighted geometric mean psf_ratio = "+str(psf_ratio)
#    psf_ratio=scipy.stats.gmean(a=(psf_data['int_flux']/psf_data['peak_flux']))
#    print "Unweighted geometric mean psf_ratio = "+str(psf_ratio)
if options.printaverage:
    print psf_ratio
Example #57
0
    def __init__(self, band, filter_directory=pkg_resources.resource_filename('ExoCTK', 'data/filters/'), 
                 wl_units=q.um, zp_units=q.erg/q.s/q.cm**2/q.AA, DELETE=False, **kwargs):
        """
        Loads the bandpass data into the Filter object
        
        Parameters
        ----------
        band: str
            The bandpass filename (e.g. 2MASS.J)
        filter_directory: str
            The directory containing the filter files
        wl_units: str, astropy.units.core.PrefixUnit  (optional)
            The wavelength units
        zp_units: str, astropy.units.core.PrefixUnit  (optional)
            The zeropoint flux units
        DELETE: bool
            Delete the given filter
        """
        # If the filter_directory is an array, load it as the filter
        if isinstance(filter_directory, (list,np.ndarray)):
            
            self.raw = np.array([filter_directory[0].data,filter_directory[1]])
            self.WavelengthUnit = str(q.um)
            self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
            x, f = self.raw
            
            # Get a spectrum of Vega
            vega = np.genfromtxt(pkg_resources.resource_filename('ExoCTK', 'data/core/vega.txt'), unpack=True)[:2]
            vega = core.rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
            self.ZeroPoint = (np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)/np.trapz(f, x=x)).to(q.erg/q.s/q.cm**2/q.AA).value
            
            # Calculate the filter's properties
            self.filterID = band
            self.WavelengthPeak = np.max(self.raw[0])
            self.WavelengthMin = np.interp(max(f)/100.,f[:np.where(np.diff(f)>0)[0][-1]],x[:np.where(np.diff(f)>0)[0][-1]])
            self.WavelengthMax = np.interp(max(f)/100.,f[::-1][:np.where(np.diff(f[::-1])>0)[0][-1]],x[::-1][:np.where(np.diff(f[::-1])>0)[0][-1]])
            self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
            self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
            self.WidthEff = np.trapz(f*x, x=x)
            self.WavelengthPivot = np.sqrt(np.trapz(f*x, x=x)/np.trapz(f/x, x=x))
            self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)

            # Fix these two:
            self.WavelengthCen = self.WavelengthMean
            self.FWHM = self.WidthEff
            
            # Add missing attributes
            self.rsr = self.raw.copy()
            self.path = ''
            self.pixels_per_bin = self.rsr.shape[-1]
            self.n_pixels = self.rsr.shape[-1]
            self.n_bins = 1
            self.wl_min = self.WavelengthMin
            self.wl_max = self.WavelengthMax
            
        else:
            
            # Check if TopHat
            if band.lower().replace('-','').replace(' ','')=='tophat':
            
                # check kwargs for limits
                wl_min = kwargs.get('wl_min')
                wl_max = kwargs.get('wl_max')
                filepath = ''
            
                if not wl_min and not wl_max:
                    print("Please provide **{'wl_min','wl_max'} to create top hat filter.")
                    return
                else:
                    # Load the filter
                    self.load_TopHat(wl_min, wl_max, kwargs.get('n_pixels',100))
                    
            else:
                
                # Get list of filters
                files = glob(filter_directory+'*')
                bands = [os.path.basename(b) for b in files]
                filepath = filter_directory+band
                
                # If the filter is missing, ask what to do
                if filepath not in files:
                    
                    print('Current filters:',
                          ', '.join(bands),
                          '\n')
                      
                    print('No filters match',filepath)
                    dl = input('Would you like me to download it? [y/n] ')
                
                    if dl.lower()=='y':
                    
                        # Prompt for new filter
                        print('\nA full list of available filters from the\n'\
                              'SVO Filter Profile Service can be found at\n'\
                              'http://svo2.cab.inta-csic.es/theory/fps3/\n')
                        band = input('Enter the band name to retrieve (e.g. 2MASS/2MASS.J): ')
                    
                        # Download the XML (VOTable) file
                        baseURL = 'http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?ID='
                        filepath = filter_directory+os.path.basename(band)
                        _ = urllib.request.urlretrieve(baseURL+band, filepath)
                    
                        # Print the new filepath
                        print('Band stored as',filepath)
                    
                    else:
                        return
                    
                # Try to read filter info
                try:
                    
                    # Parse the XML file
                    vot = vo.parse_single_table(filepath)
                    self.rsr = np.array([list(i) for i in vot.array]).T
                    
                    # Parse the filter metadata
                    for p in [str(p).split() for p in vot.params]:
                        
                        # Extract the key/value pairs
                        key = p[1].split('"')[1]
                        val = p[-1].split('"')[1]
                        
                        # Do some formatting
                        if p[2].split('"')[1]=='float'\
                        or p[3].split('"')[1]=='float':
                            val = float(val)
                            
                        else:
                            val = val.replace('b&apos;','')\
                                     .replace('&apos','')\
                                     .replace('&amp;','&')\
                                     .strip(';')
                                 
                        # Set the attribute
                        if key!='Description':
                            setattr(self, key, val)
                            
                    # Create some attributes
                    self.path = filepath
                    self.pixels_per_bin = self.rsr.shape[-1]
                    self.n_pixels = self.rsr.shape[-1]
                    self.n_bins = 1
                    self.raw = self.rsr.copy()
                    self.wl_min = self.WavelengthMin
                    self.wl_max = self.WavelengthMax
                    
                # If empty, delete XML file
                except IOError:
                    
                    print('No filter named',band)
                    # if os.path.isfile(filepath):
                    #     os.remove(filepath)
                    
                    return
                    
        # Get the bin centers
        w_cen = np.nanmean(self.rsr[0])
        f_cen = np.nanmean(self.rsr[1])
        self.centers = np.asarray([[w_cen],[f_cen]])
        
        # Set the wavelength units
        if wl_units:
            self.set_wl_units(wl_units)
            
        # Set zeropoint flux units
        if zp_units!=self.ZeroPointUnit:
            self.set_zp_units(zp_units)
            
        # Get references
        try:
            self.refs = [self.CalibrationReference.split('=')[-1]]
        except:
            self.refs = []
            
        # Bin
        if kwargs:
            bwargs = {k:v for k,v in kwargs.items() if k in \
                      inspect.signature(self.bin).parameters.keys()}
            self.bin(**bwargs)
centre_bins = [200, 600, 1000, 1400]
#load planet images
earth = Image.open(indir + '/earth.png')
earth.thumbnail((20, 20), Image.ANTIALIAS)
super_earth = Image.open(indir + '/super_earth.png')
super_earth.thumbnail((30, 30), Image.ANTIALIAS)
neptune = Image.open(indir + '/neptune.png')
neptune.thumbnail((80, 80), Image.ANTIALIAS)
jup = Image.open(indir + '/jupiter.png')
jup.thumbnail((200, 200), Image.ANTIALIAS)

#initialist animated gif frames
frames = []

#arrange the data into arrays for usage
table = parse_single_table(infile)
data = table.array
date1_bytes = data['pl_publ_date']
date1_strings = date1_bytes.astype('U')
date2_bytes = data['rowupdate']
date2_strings = date2_bytes.astype('U')
disc_year = data['pl_disc']
disc_method_bytes = data['pl_discmethod']
disc_method_strings = disc_method_bytes.astype('U')
pl_hostname_bytes = data['pl_hostname']
pl_hostname_strings = pl_hostname_bytes.astype('U')
pl_letter_bytes = data['pl_letter']
pl_letter_strings = pl_letter_bytes.astype('U')
pl_rad = 11.2 * data['pl_radj']
data['pl_bmassj'].fill_value = -99
pl_mass = 317.8 * data['pl_bmassj']
Example #59
0
    outfile = sys.argv[2]
else:
    outfile = filename

# suppress version mismatch warning
warnings.filterwarnings(action='ignore', category=astropy.io.votable.exceptions.W21)

# suppress invalid unit warnings
warnings.filterwarnings(action='ignore', category=astropy.io.votable.exceptions.W50)

# enforce utf-8 encoding
reload(sys)
sys.setdefaultencoding("utf-8")

# load data
table = parse_single_table(filename)
data = table.array

# Read comlumn names
columns = []
for field in table.fields:
	columns.append(field.name)

print(columns)

df = pd.DataFrame(index=np.arange(len(data[columns[0]])))
for c in columns:
    s = pd.Series(data[c])
    s.name = c
    df[c] = s
del table
Example #60
0
    )
    os.system(
        'stilts tmatch2 matcher=skyellipse params=30 in1=' + table1 + ' in2=' +
        table4 +
        ' out=a1_4.vot values1="ra dec a b pa" values2="ra dec a b pa" ofmt=votable find=all fixcols=all join=1and2'
    )

# Grab each xmatch and select the values with the lowest RMSs (for now -- lowest resid_std might be best)
ands = ["a1_2.vot", "a2_3.vot", "a3_4.vot", "a4_1.vot"]
revands = ["a2_1.vot", "a3_2.vot", "a4_3.vot", "a1_4.vot"]

for amatch, rmatch in zip(ands, revands):
    output = amatch.replace(".vot", "_best.vot")
    if not os.path.exists(output):
        print "Generating " + output
        a = parse_single_table(amatch)
        r = parse_single_table(rmatch)
        data_a = a.array
        data_r = r.array
        indices_a = np.where(data_a['local_rms_1'] < data_a['local_rms_2'])
        indices_r = np.where(data_r['local_rms_1'] < data_r['local_rms_2'])
        data_x = np.ma.concatenate([data_a[indices_a], data_r[indices_r]])
        vot = Table(data_x)
        writetoVO(vot, 'temp.vot')
        # Run through tpipe and keep the right columns (i.e. none of the _2 columns)
        #        os.system('stilts tpipe in=temp.vot cmd=\'keepcols "island source background local_rms ra_str dec_str ra err_ra dec err_dec peak_flux err_peak_flux int_flux err_int_flux a err_a b err_b pa err_pa flags residual_mean residual_std uuid"\' out='+output)
        # For the older version of Aegean (no uuid)
        os.system(
            'stilts tpipe in=temp.vot cmd=\'keepcols "island source background local_rms ra_str dec_str ra err_ra dec err_dec peak_flux err_peak_flux int_flux err_int_flux a err_a b err_b pa err_pa flags residual_mean residual_std"\' out='
            + output)