Example #1
0
def table_from_scratch():
    from astropy.io.votable.tree import VOTableFile, Resource, Table, Field

    # Create a new VOTable file...
    votable = VOTableFile()

    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)

    # ... with one table
    table = Table(votable)
    resource.tables.append(table)

    # Define some fields
    table.fields.extend([
            Field(votable, ID="filename", datatype="char"),
            Field(votable, ID="matrix", datatype="double", arraysize="2x2")])

    # Now, use those field definitions to create the numpy record arrays, with
    # the given number of rows
    table.create_arrays(2)

    # Now table.array can be filled with data
    table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
    table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])

    # Now write the whole thing to a file.
    # Note, we have to use the top-level votable file object
    out = io.StringIO()
    votable.to_xml(out)
Example #2
0
    def createTableFromObject(data, path="", names=[], dtypes=[], sizes=[]):

        path_tmp = "/Users/cjimenez/Documents/PHD/data/tmp/"
        # Create a new VOTable file...
        votable = VOTableFile()

        # ...with one resource...
        resource = Resource()
        votable.resources.append(resource)

        # ... with one table
        table = Table(votable)
        resource.tables.append(table)

        # Define some fields

        fields = []
        for idx, val in enumerate(names):
            fields.append(Field(votable, name=val, datatype=dtypes[idx]))

        table.fields.extend(fields)

        # Now, use those field definitions to create the numpy record arrays, with
        # the given number of rows
        table.create_arrays(len(data))

        # Now table.array can be filled with data
        for idx, val in enumerate(data):
            table.array[idx] = val

        # Now write the whole thing to a file.
        # Note, we have to use the top-level votable file object
        votable.to_xml(path_tmp + path)
Example #3
0
def _run_test_from_scratch_example():
    from astropy.io.votable.tree import VOTableFile, Resource, Table, Field

    # Create a new VOTable file...
    votable = VOTableFile()

    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)

    # ... with one table
    table = Table(votable)
    resource.tables.append(table)

    # Define some fields
    table.fields.extend([
        Field(votable, name="filename", datatype="char", arraysize="*"),
        Field(votable, name="matrix", datatype="double", arraysize="2x2")])

    # Now, use those field definitions to create the numpy record arrays, with
    # the given number of rows
    table.create_arrays(2)

    # Now table.array can be filled with data
    table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
    table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])

    assert table.array[0][0] == 'test1.xml'
Example #4
0
def exportData( dataTuple ,name):
	
	dataK, labelK, data_errorK, dataL, labelL = dataTuple
	
	# concatenate all arrays from dataTuple and add labels for errors
	data_full = np.concatenate( (np.concatenate( ( dataK, data_errorK ) , axis = 1 ), dataL), axis = 1 )
	error_label = []
	for i in labelK:
		error_label += [i + "_error"]

	label_full = labelK + error_label + labelL
		
	votable = VOTableFile()
	resource = Resource()
	votable.resources.append(resource)
	table = Table(votable)
	resource.tables.append(table)
	
	fields = []
	for i in range( data_full.shape[1] ):
		fields += [Field( votable, name = label_full[i], datatype='float' )]
	
	table.fields.extend( fields )
	table.create_arrays( data_full.shape[0] )
	
	for i in range( data_full.shape[0] ):    
		table.array[i] = tuple( data_full[i,:] )   
	
	votable.to_xml(name +".xml")     
Example #5
0
def _to_votable(data, file_name):
    votable = VOTableFile()
    resource = Resource()
    votable.resources.append(resource)
    table = Table(votable)
    resource.tables.append(table)
    columns = data.columns
    if data.columns[-1] == 'class':
        columns = columns[:-1]
    fields = [
        Field(votable, name="intensities", datatype="double", arraysize='*')]
    table.fields.extend(fields)
    table.create_arrays(1)
    table.array[0] = columns.tolist()
    votable.to_xml(file_name)
Example #6
0
def test_exec_sync():
    # save results in a file
    # create the VOTable result
    # example from http://docs.astropy.org/en/stable/io/votable/
    votable = VOTableFile()
    resource = Resource()
    votable.resources.append(resource)
    table = Table(votable)
    resource.tables.append(table)
    table.fields.extend([
        Field(votable, name="filename", datatype="char", arraysize="*"),
        Field(votable, name="matrix", datatype="double", arraysize="2x2")])
    table.create_arrays(2)
    table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
    table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
    buffer = BytesIO()
    votable.to_xml(buffer)
    cadc = Cadc(auth_session=requests.Session())
    response = Mock()
    response.to_table.return_value = buffer.getvalue()
    cadc.cadctap.search = Mock(return_value=response)
    output_file = '{}/test_vooutput.xml'.format(tempfile.tempdir)
    cadc.exec_sync('some query', output_file=output_file)

    actual = parse(output_file)
    assert len(votable.resources) == len(actual.resources) == 1
    assert len(votable.resources[0].tables) ==\
        len(actual.resources[0].tables) == 1
    actual_table = actual.resources[0].tables[0]
    try:
        # TODO remove when astropy LTS upgraded
        from astropy.utils.diff import report_diff_values
        assert report_diff_values(table, actual_table, fileobj=sys.stdout)
    except ImportError:
        pass
Example #7
0
def table_from_scratch():
    from astropy.io.votable.tree import VOTableFile, Resource, Table, Field

    # Create a new VOTable file...
    votable = VOTableFile()

    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)

    # ... with one table
    table = Table(votable)
    resource.tables.append(table)

    # Define some fields
    table.fields.extend([
            Field(votable, ID="filename", datatype="char"),
            Field(votable, ID="matrix", datatype="double", arraysize="2x2")])

    # Now, use those field definitions to create the numpy record arrays, with
    # the given number of rows
    table.create_arrays(2)

    # Now table.array can be filled with data
    table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
    table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])

    # Now write the whole thing to a file.
    # Note, we have to use the top-level votable file object
    out = io.StringIO()
    votable.to_xml(out)
Example #8
0
def _run_test_from_scratch_example():
    from astropy.io.votable.tree import VOTableFile, Resource, Table, Field

    # Create a new VOTable file...
    votable = VOTableFile()

    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)

    # ... with one table
    table = Table(votable)
    resource.tables.append(table)

    # Define some fields
    table.fields.extend([
        Field(votable, name="filename", datatype="char", arraysize="*"),
        Field(votable, name="matrix", datatype="double", arraysize="2x2")])

    # Now, use those field definitions to create the numpy record arrays, with
    # the given number of rows
    table.create_arrays(2)

    # Now table.array can be filled with data
    table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
    table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])

    assert table.array[0][0] == 'test1.xml'
Example #9
0
def sql_to_vo(sql, output_name=None):
    """
    Run SQL query and save output to VO table.
    """
    data = LOCAL_CONN.execute_set(sql, False)
    # Create a new VOTable file...
    votable = VOTableFile()
    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)
    # ... with one table
    table = Table(votable)
    resource.tables.append(table)
    for column_descr in LOCAL_CONN.last_description:
        # Define fields
        xtype = get_vo_type(column_descr.type_code)
        if xtype == 'char':
            table.fields.extend([
                Field(votable,
                      name=column_descr.name,
                      datatype=xtype,
                      arraysize=str(column_descr.internal_size))])
        else:
            table.fields.extend([Field(votable,
                                       name=column_descr.name,
                                       datatype=xtype)])
    table.create_arrays(len(data))
    for irow, row in enumerate(data):
        table.array[irow] = row
    if output_name is not None:
        # Save to file
        votable.to_xml(output_name)
        return True
    else:
        # Return VOTable
        return votable
Example #10
0
 def format_votable(self):
     """
     Return
     -------
     Return data in VOTable format.
     """
     names_column = ['obj','ra','dec','mjd','mag','magerr','filter','catalog']
     descriptions_column = ['Id of object in catalog the original catalog',
                             'Right ascension of source','Declination of source',
                             'Julian Day','Magnitude','Magnitude Error',
                             'Filter code','Original Catalog']
     #dtype_column = [] # dtype=dtype_column
     t = Table(rows=self.data_np,names=names_column,descriptions=descriptions_column)
     votable= VOTableFile.from_table(t)
     buf = io.BytesIO()
     writeto(votable,buf)
     return buf.getvalue().decode("utf-8")
Example #11
0
    def __spectrum_to_votable(self, spectrum):
        # Create a new VOTable file...
        votable = VOTableFile()

        # ...with one resource...
        resource = Resource()
        votable.resources.append(resource)

        # ... with one table
        table = Table(votable)
        resource.tables.append(table)

        # Define some fields
        waveobs = Field(votable,
                        name="waveobs",
                        datatype="double",
                        unit="nm",
                        ucd="em.wl")
        flux = Field(votable,
                     name="flux",
                     datatype="double",
                     unit="Jy",
                     ucd="phot.flux")
        err = Field(votable,
                    name="err",
                    datatype="double",
                    ucd="stat.error;phot.flux")
        table.fields.extend([waveobs, flux, err])
        table.groups.extend([Group([flux, err])])
        #import ipdb
        #ipdb.set_trace()
        # Now, use those field definitions to create the numpy record arrays, with
        # the given number of rows
        table.create_arrays(len(spectrum))

        # Now table.array can be filled with data
        table.array['waveobs'] = spectrum['waveobs']
        table.array['flux'] = spectrum['flux']
        table.array['err'] = spectrum['err']

        #votable.set_all_tables_format('binary') # VOSpec does not understand binary format
        return votable
			# Row data
			Row=[]
			for c in range(0,len(StartPos)):
				#print (ColumnName[c],Text[int(StartPos[c])-1:int(FinishPos[c])])
				Result=Text[int(StartPos[c])-1:int(FinishPos[c])]
				Row.append(Result)			
			# Write row to CSV file
			Output.writerow(Row)			
			List.append(Row)
			
votable=VOTableFile()
#coosys=CooSys(ID="J2000", equinox="J2000", system="eq_FK5")
#votable.coordinate_systems.append(coosys)
resource=Resource()
votable.resources.append(resource)
table=Table(votable)
resource.tables.append(table)
table.params.extend([
	Param(votable, name="imageFile", ucd="meta.file;meta.fits", datatype="char", arraysize="255", value="atlas-cdfs.fits"),
	Param(votable, name="Reference frequency", ucd="em.freq;meta.main", datatype="float", unit="Hz", value="1.408e+08")])
table.fields.extend([
	Field(votable, name="island_id",datatype="char",unit="--",arraysize="15"),
	Field(votable, name="component_id",datatype="char",unit="--",arraysize="4"),
	Field(votable, name="component_name",datatype="char",unit="",arraysize="26"),
	Field(votable, name="ra_hms_cont", datatype="char",unit="",arraysize="12",ref="J2000"),
	Field(votable, name="dec_dms_cont", datatype="char", unit="",arraysize="13",ref="J2000"),
	Field(votable, name="ra_deg_cont", datatype="float",precision="6", unit="deg",ucd="pos.eq.ra;meta.main",ref="J2000",width="12"),
	Field(votable, name="dec_deg_cont", datatype="float",precision="6", unit="deg",ucd="pos.eq.dec;meta.main",ref="J2000",width="13"),
	Field(votable, name="ra_err", datatype="float",precision="2",unit="arcsec",ref="J2000",width="11"),
	Field(votable, name="dec_err", datatype="float",precision="2",unit="arcsec",ref="J2000",width="11"),
	Field(votable, name="freq", datatype="float",precision="1",unit="MHz", ucd="em.freq",width="11"),
Example #13
0
print np.array(table[sys.argv[2]])[i_r]
print i_r


from astropy.io.votable.tree import VOTableFile, Resource, Table, Field

# Create a new VOTable file...
votable = VOTableFile()

# ...with one resource...
resource = Resource()
votable.resources.append(resource)

# ... with one table
table_out = Table(votable)
resource.tables.append(table_out)

table_out.fields.extend(fields)
table_out.create_arrays(len(i_r))

for n in col_names:
    table_out.array[n] = table[n][i_r]

votable.to_xml (sys.argv[4])

#table = vot.get_first_table().to_table(use_names_over_ids=True)
#print table


Example #14
0
    def test_data_file(self):
        wh().get().RESULTS_PATH = os.path.dirname(os.path.realpath(__file__))

        test_log("Testing the initialization...", self)
        job_id = 1
        fname = "test_data.dat"
        model = results_model.objects.filter(job_id=job_id)
        self.assertFalse(model)
        d = data_file(1)
        model = results_model.objects.filter(job_id=job_id)
        self.assertEqual(len(model), 1)

        test_log("Testing a simple file creation...", self)
        file_name = os.path.join(wh().get().RESULTS_PATH, fname)
        f = d.file(fname)
        f.write("test")
        f.close()
        model = results_model.objects.filter(job_id=job_id)
        self.assertEqual(wh().get().RESULTS_PATH,
                         model[0].resources.all().filter(name=fname)[0].path)
        self.assertEqual(fname,
                         model[0].resources.all().filter(name=fname)[0].name)
        self.assertTrue(os.path.isfile(file_name))
        f = open(file_name)
        fdata = f.read()
        f.close()
        self.assertEqual(fdata, "test")
        os.remove(file_name)

        test_log("Testing a plot addition...", self)
        plot = plot_model.objects.create(name="test_plot",
                                         job_id=job_id,
                                         alg_name="test",
                                         script="",
                                         html="")
        self.assertFalse(model[0].plots.all().filter(name="test_plot"))
        d.add_plot(plot)
        self.assertEqual(len(model[0].plots.all().filter(name="test_plot")), 1)

        test_log("Testing a FITS file storage...", self)
        n = np.arange(100.0)
        hdu = fits.PrimaryHDU(n)
        hdul = fits.HDUList([hdu])
        d.save_fits(fname, hdul)
        self.assertEqual(len(model[0].resources.all().filter(name=fname)), 2)
        self.assertTrue(os.path.isfile(file_name))
        os.remove(file_name)

        test_log("Testing a VOTable file storage...", self)
        votable = VOTableFile()
        resource = Resource()
        votable.resources.append(resource)
        table = Table(votable)
        resource.tables.append(table)
        table.fields.extend([
            Field(votable, name="filename", datatype="char", arraysize="*"),
            Field(votable, name="matrix", datatype="double", arraysize="2x2")
        ])
        table.create_arrays(2)
        table.array[0] = ('test_1', [[1, 0], [0, 1]])
        table.array[1] = ('test_2', [[0.5, 0.3], [0.2, 0.1]])
        d.save_vot(fname, votable)
        self.assertEqual(len(model[0].resources.all().filter(name=fname)), 3)
        self.assertTrue(os.path.isfile(file_name))
        os.remove(file_name)
Example #15
0
          'can only process FITS files with a single, primary HDU.',
          file=sys.stderr)
    sys.exit(1)

# Process the FITS headers into a <GROUP> in the first <TABLE>.
hdr = hdulist[0].header

# Set up the main structure of the VOTable file:
vt = VOTableFile()

#   Create a single <RESOURCE> in the file
r = Resource()
vt.resources.append(r)

#   Place an empty (for now) <TABLE> in the file
t = Table(vt)
r.tables.append(t)

#   Create the special <GROUP> for the raw FITS headers
g = make_fits_header_group(hdr, t)
t.groups.append(g)

# Access the actual file data.  We can only have a primary HDU - this means it must be an IMAGE HDU.
pixel_data = hdulist[0].data

#   Here is the place for some introspection, if any is possible, into the data format.
#   For now this is hard-coded for one particular EXES file format.

#   Confirm that the data HDU shape is as expected.  This kind of EXES file has four stripes of data:
#     0: wavenumber
#     1: flux per wavenumber bin
Example #16
0
File: votable.py Project: atpy/atpy
def _to_table(self, vo_table):
    '''
    Return the current table as a VOT object
    '''

    table = VOTable(vo_table)

    # Add keywords
    for key in self.keywords:
        if isinstance(self.keywords[key], basestring):
            arraysize = '*'
        else:
            arraysize = None
        param = Param(table, name=key, ID=key, value=self.keywords[key], arraysize=arraysize)
        table.params.append(param)

    # Define some fields

    n_rows = len(self)

    fields = []
    for i, name in enumerate(self.names):

        data = self.data[name]
        unit = self.columns[name].unit
        description = self.columns[name].description
        dtype = self.columns[name].dtype
        column_type = smart_dtype(dtype)

        if data.ndim > 1:
            arraysize = str(data.shape[1])
        else:
            arraysize = None

        if column_type in type_dict:
            datatype = type_dict[column_type]
        elif column_type == np.int8:
            warnings.warn("int8 unsupported - converting to int16")
            datatype = type_dict[np.int16]
        elif column_type == np.uint16:
            warnings.warn("uint16 unsupported - converting to int32")
            datatype = type_dict[np.int32]
        elif column_type == np.uint32:
            warnings.warn("uint32 unsupported - converting to int64")
            datatype = type_dict[np.int64]
        elif column_type == np.uint64:
            raise Exception("uint64 unsupported")
        else:
            raise Exception("cannot use numpy type " + str(column_type))

        if column_type == np.float32:
            precision = 'E9'
        elif column_type == np.float64:
            precision = 'E17'
        else:
            precision = None

        if datatype == 'char':
            if arraysize is None:
                arraysize = '*'
            else:
                raise ValueError("Cannot write vector string columns to VO files")

        field = Field(vo_table, ID=name, name=name, \
                datatype=datatype, unit=unit, arraysize=arraysize, \
                precision=precision)

        field.description = description

        fields.append(field)

    table.fields.extend(fields)

    table.create_arrays(n_rows)

    # Character columns are stored as object columns in the vo_table
    # instance. Leaving the type as string should work, but causes
    # a segmentation fault on MacOS X with Python 2.6 64-bit so
    # we force the conversion to object type columns.

    for name in self.names:

        dtype = self.columns[name].dtype
        column_type = smart_dtype(dtype)

        # Add data to the table
        # At the moment, null values in VO table are dealt with via a
        # 'mask' record array

        if column_type == np.string_:
            table.array[name] = self.data[name].astype(np.object_)
            if self._masked:
                table.array.mask[name] = self.data[name].mask.astype(np.object_)
            else:
                if self.data[name].dtype.type == np.bytes_ and type(self.columns[name].null) != bytes:
                    table.array.mask[name] = (self.data[name] == \
                                self.columns[name].null.encode('utf-8')).astype(np.object_)
                else:
                    table.array.mask[name] = (self.data[name] == \
                                self.columns[name].null).astype(np.object_)
        else:
            table.array[name] = self.data[name]
            if self._masked:
                table.array.mask[name] = self.data[name].mask
            else:
                table.array.mask[name] = self.data[name] == \
                                        self.columns[name].null

    table.name = self.table_name

    return table
Example #17
0
def _to_table(self, vo_table):
    '''
    Return the current table as a VOT object
    '''

    table = VOTable(vo_table)

    # Add keywords
    for key in self.keywords:
        if isinstance(self.keywords[key], basestring):
            arraysize = '*'
        else:
            arraysize = None
        param = Param(table,
                      name=key,
                      ID=key,
                      value=self.keywords[key],
                      arraysize=arraysize)
        table.params.append(param)

    # Define some fields

    n_rows = len(self)

    fields = []
    for i, name in enumerate(self.names):

        data = self.data[name]
        unit = self.columns[name].unit
        description = self.columns[name].description
        dtype = self.columns[name].dtype
        column_type = smart_dtype(dtype)

        if data.ndim > 1:
            arraysize = str(data.shape[1])
        else:
            arraysize = None

        if column_type in type_dict:
            datatype = type_dict[column_type]
        elif column_type == np.int8:
            warnings.warn("int8 unsupported - converting to int16")
            datatype = type_dict[np.int16]
        elif column_type == np.uint16:
            warnings.warn("uint16 unsupported - converting to int32")
            datatype = type_dict[np.int32]
        elif column_type == np.uint32:
            warnings.warn("uint32 unsupported - converting to int64")
            datatype = type_dict[np.int64]
        elif column_type == np.uint64:
            raise Exception("uint64 unsupported")
        else:
            raise Exception("cannot use numpy type " + str(column_type))

        if column_type == np.float32:
            precision = 'F9'
        elif column_type == np.float64:
            precision = 'F17'
        else:
            precision = None

        if datatype == 'char':
            if arraysize is None:
                arraysize = '*'
            else:
                raise ValueError(
                    "Cannot write vector string columns to VO files")

        field = Field(vo_table, ID=name, name=name, \
                datatype=datatype, unit=unit, arraysize=arraysize, \
                precision=precision)

        field.description = description

        fields.append(field)

    table.fields.extend(fields)

    table.create_arrays(n_rows)

    # Character columns are stored as object columns in the vo_table
    # instance. Leaving the type as string should work, but causes
    # a segmentation fault on MacOS X with Python 2.6 64-bit so
    # we force the conversion to object type columns.

    for name in self.names:

        dtype = self.columns[name].dtype
        column_type = smart_dtype(dtype)

        # Add data to the table
        # At the moment, null values in VO table are dealt with via a
        # 'mask' record array

        if column_type == np.string_:
            table.array[name] = self.data[name].astype(np.object_)
            if self._masked:
                table.mask[name] = self.data[name].mask.astype(np.object_)
            else:
                table.mask[name] = (self.data[name] == \
                            self.columns[name].null).astype(np.object_)
        else:
            table.array[name] = self.data[name]
            if self._masked:
                table.mask[name] = self.data[name].mask
            else:
                table.mask[name] = self.data[name] == \
                            self.columns[name].null

    table.name = self.table_name

    return table
Example #18
0
def save_sed_to_vo(sed, filename, norm=1.):
    """
    Save a SED object to a VO-Table file

    Parameters
    ----------
    sed: a pcigale.sed.SED object
        The SED to save.
    filename: string
        Name of the file to save the SED to.
    norm: float
        Normalisation factor of the SED

    """
    votable = VOTableFile()

    spectra_resource = Resource(id="Spectra")
    votable.resources.append(spectra_resource)

    # Total F_nu
    fnu_table = Table(votable, name="Fnu", id="Fnu")
    spectra_resource.tables.append(fnu_table)
    fnu_table.fields.extend([
        Field(votable, name="wavelength", datatype="double", unit="nm",
              ucd="em.wl"),
        Field(votable, name="F_nu", datatype="double", unit="mJy",
              ucd="phot.flux")
    ])
    fnu_table.create_arrays(len(sed.wavelength_grid))
    fnu_table.array["wavelength"] = sed.wavelength_grid
    fnu_table.array["F_nu"] = norm * sed.fnu

    # L_lambda contributions and total
    Llambda_table = Table(votable, name="Llambda", id="Llambda")
    spectra_resource.tables.append(Llambda_table)
    Llambda_fields = [
        Field(votable, name="wavelength", datatype="double", unit="nm",
              ucd="em.wl"),
        Field(votable, name="L_lambda_total", datatype="double", unit="W/nm",
              ucd="phot.flux")]
    for name in sed.contribution_names:
        Llambda_fields.append(Field(votable, name=name, datatype="double",
                                    unit="W/nm", ucd="phot.flux"))
    Llambda_table.fields.extend(Llambda_fields)
    Llambda_table.create_arrays(len(sed.wavelength_grid))
    Llambda_table.array["wavelength"] = sed.wavelength_grid
    Llambda_table.array["L_lambda_total"] = norm * sed.luminosity
    for name in sed.contribution_names:
        Llambda_table.array[name] = norm * sed.get_lumin_contribution(name)

    # SFH
    if sed.sfh is not None:
        sfh_resource = Resource(id="Star_Formation_History")
        votable.resources.append(sfh_resource)
        sfh_table = Table(votable, name="SFH", id="SFH")
        sfh_resource.tables.append(sfh_table)
        sfh_table.fields.extend([
            Field(votable, name="time", datatype="double", unit="Myr",
                  ucd="time.age"),
            Field(votable, name="SFR", datatype="double", unit="Msun/yr",
                  ucd="phys.SFR")
        ])
        sfh_table.create_arrays(sed.sfh.size)
        sfh_table.array["time"] = np.arange(sed.sfh.size)
        sfh_table.array["SFR"] = norm * sed.sfh

    # SED information to keywords
    if sed.sfh is not None:
        # If there is a stellar population then the norm factor is the stellar
        # mass.
        votable.infos.append(Info(name="Galaxy mass in Msun", value=norm))

    for name, value in sorted(sed.info.items()):
        if name in sed.mass_proportional_info:
            votable.infos.append(Info(name=name, value=norm * value))
        else:
            votable.infos.append(Info(name=name, value=value))

    votable.set_all_tables_format('binary')
    votable.to_xml(filename)
Example #19
0
def VOGetCapabilities(filename, outfile):
    votable = VOTableFile()
    parser = ElementTree.XMLParser(recover=True)
    f = codecs.open(filename, 'r', 'utf-8')
    string = f.read()
    string = bytes(bytearray(
        string,
        encoding='utf-8'))  ## force utf-8 encoding even if xml says otherwise
    root = ElementTree.fromstring(string, parser)
    for element in root:
        res = element.tag
        resid = str(res).split('}')[-1:][
            0]  # MM, note that we have to convert res to string first
        #    logger.debug(resid) ## debug
        #    if (res!=ElementTree.Comment):
        if (resid != str(ElementTree.Comment)):  # MM
            #      resid=re.sub(':','_',res)
            resid = re.sub(':', '_', resid)
            resource = Resource(name=res, ID=resid)
            votable.resources.append(resource)
            table = Table(votable)
            resource.tables.append(table)
            for param in element:
                par = param.tag
                parid = str(par).split('}')[-1:][0]  # MM
                value = param.text
                if value is None:
                    listvalue = param.items()
                    value = listvalue
                dt = type(value).__name__
                if (dt == "str"):
                    dt = "char"
                    arraysize = "*"
                    table.params.extend([
                        #            Param(votable, name=par, datatype=dt, value=value)])
                        Param(votable, name=parid, datatype=dt, value=value)
                    ])
                elif (dt == "list"):
                    if (len(value) > 0):
                        for tup in value:
                            table.params.extend([
                                #                Param(votable, name=tup[0], datatype="char", value=tup[1])])
                                Param(votable,
                                      name=tup[0].split('}')[-1:][0],
                                      datatype="char",
                                      value=tup[1])
                            ])


#        if (par=="Layer"):
                if (parid == "Layer"):
                    #          lresource = Resource(name=par,ID=par)
                    lresource = Resource(name=par, ID=parid)  # MM
                    votable.resources.append(lresource)
                    ltable = Table(votable)
                    lresource.tables.append(ltable)
                    j = 0
                    data = []
                    for layer in param:
                        lay = layer.tag
                        layid = str(lay).split('}')[-1:][0]  # MM
                        datalay = []
                        #            if (lay=="Layer"):
                        if (layid == "Layer"):  # MM
                            for field in layer:
                                fieldid = str(field.tag).split('}')[-1:][0]
                                if (j == 0):
                                    ltable.fields.extend([
                                        #                    Field(votable, name=field.tag, datatype="char", arraysize="*")])
                                        Field(votable,
                                              name=fieldid,
                                              datatype="char",
                                              arraysize="*")
                                    ])
                                else:
                                    try:
                                        #                    ltable.get_field_by_id_or_name(field.tag)
                                        ltable.get_field_by_id_or_name(fieldid)
                                    except:
                                        ltable.fields.extend([
                                            #                      Field(votable, name=field.tag, datatype="char", arraysize="*")])
                                            Field(votable,
                                                  name=fieldid,
                                                  datatype="char",
                                                  arraysize="*")
                                        ])
                                if field.text == None:
                                    field.text = "Empty"
                                datalay.append(field.text)
                                try:
                                    i = i + 1
                                except:
                                    i = 0
                            j = 1
                        if datalay != []:
                            data.append(datalay)
                    l = len(ltable.fields)
                    #          nl = int(math.ceil((i)/l)+1) #this will only work if there are no nested layers
                    #          print(nl) # so i'm commenting this out and replacing with simple len(data)
                    nl = len(data)  #MM
                    #          logger.debug(nl) ## debug
                    dim = nl * l - 1
                    for x in range(0, nl):
                        while len(data[x]) < l:
                            data[x].append('Empty')
                    try:
                        ltable.create_arrays(dim)
                        ltable.array = (np.ma.asarray(data, dtype='str'))
                        ltable.array.mask = False
                    except:
                        raise

    votable.to_xml(outfile)
Example #20
0
def sort_write(sortname, spect, fitsdict, filesort, space=3):
    """
    Write out an xml and ascii file that contains the details of the file sorting.
    By default, the filename is printed first, followed by the filetype.
    After these, all parameters listed in the 'keyword' item in the
    settings file will be printed

    Parameters
    ----------
    sortname : str
      The filename to be used to save the list of sorted files
    spect : dict
      Properties of the spectrograph.
    fitsdict : dict
      Contains relevant information from fits header files
    filesort : dict
      Details of the sorted files
    space : int
      Keyword to set how many blank spaces to place between keywords
    """
    msgs.info("Preparing to write out the data sorting details")
    nfiles = fitsdict['filename'].size
    # Specify which keywords to print after 'filename' and 'filetype'
    prord = ['filename', 'frametype', 'target', 'exptime', 'naxis0', 'naxis1', 'filter1', 'filter2']
    prdtp = ["char",     "char",      "char",   "double",  "int",    "int",    "char",     "char"]
    # Now insert the remaining keywords:
    fkey = spect['keyword'].keys()
    for i in fkey:
        if i not in prord:
            prord.append(i)
            # Append the type of value this keyword holds
            typv = type(fitsdict[i][0])
            if typv is int or typv is np.int_:
                prdtp.append("int")
            elif isinstance(fitsdict[i][0], basestring) or typv is np.string_:
                prdtp.append("char")
            elif typv is float or typv is np.float_:
                prdtp.append("double")
            else:
                msgs.bug("I didn't expect useful headers to contain type {0:s}".format(typv).replace('<type ', '').replace('>', ''))
    # Open a VOTable for writing
    votable = VOTableFile()
    resource = Resource()
    votable.resources.append(resource)
    table = Table(votable)
    resource.tables.append(table)
    # Define VOTable fields
    tabarr=[]
    # Insert the filename and filetype first
    for i in xrange(len(prord)):
        tabarr.append(Field(votable, name=prord[i], datatype=prdtp[i], arraysize="*"))
    table.fields.extend(tabarr)
    table.create_arrays(nfiles)
    filtyp = filesort.keys()
    for i in xrange(nfiles):
        values = ()
        for pr in prord:
            if pr == 'frametype':
                addval = ""
                for ft in filtyp:
                    if i in filesort[ft]:
                        if len(addval) != 0: addval += ","
                        addval += ft
                addval = (addval,)
            else: addval = (fitsdict[pr][i],)
            values = values + addval
        table.array[i] = values
    #osspl = sortname.split('.')
    #if len(osspl) > 1:
    #    fname = sortname
    #else:
    fname = sortname+'.xml'
    votable.to_xml(fname)
    msgs.info("Successfully written sorted data information file:"+msgs.newline() +
              "{0:s}".format(fname))

    # ASCII file (JXP)
    jxpord = ['filename', 'date', 'frametype', 'target', 'exptime', 'binning',
        'dichroic', 'disperser', 'cdangle', 'decker']
    # Generate the columns
    clms = []
    for pr in jxpord:
        try:
            lidx = prord.index(pr)
        except ValueError:
            msgs.warn('{:s} keyword not used'.format(pr))
        else:
            clm = []
            for i in xrange(nfiles):
                clm.append(table.array[i][lidx])
            clms.append(Column(clm, name=pr))
    # Create Table
    jxp_tbl = tTable(clms)
    # Write
    jxp_name = fname.replace('.xml', '.lst')
    jxp_tbl.write(jxp_name, format='ascii.fixed_width')
    return
            # Row data
            Row = []
            for c in range(0, len(StartPos)):
                #print (ColumnName[c],Text[int(StartPos[c])-1:int(FinishPos[c])])
                Result = Text[int(StartPos[c]) - 1:int(FinishPos[c])]
                Row.append(Result)
            # Write row to CSV file
            Output.writerow(Row)
            List.append(Row)

votable = VOTableFile()
#coosys=CooSys(ID="J2000", equinox="J2000", system="eq_FK5")
#votable.coordinate_systems.append(coosys)
resource = Resource()
votable.resources.append(resource)
table = Table(votable)
resource.tables.append(table)
table.params.extend([
    Param(votable,
          name="imageFile",
          ucd="meta.file;meta.fits",
          datatype="char",
          arraysize="255",
          value="atlas-cdfs.fits"),
    Param(votable,
          name="Reference frequency",
          ucd="em.freq;meta.main",
          datatype="float",
          unit="Hz",
          value="1.408e+08")
])
Example #22
0
def visibility(request):
    now = datetime.datetime.now()
    html = "<html><body>It is now %s.</body></html>" % now

    # Create a new VOTable file...
    votable = VOTableFile()
    # ...with one resource...
    resource = Resource()
    votable.resources.append(resource)

    # ... with one table
    table = Table(votable)
    resource.tables.append(table)


    resource.description ="European Space Astronomy Centre. INTEGRAL SOC - " \
                          "Object Visibility Simple Access Protocol (ObjVisSAP)"
    resource.infos.append(Info(name="QUERY_STATUS", value="OK"))

    resource.infos.append(Info(name="SERVICE PROTOCOL", value="1.0"))
    resource.infos.append(Info(name="REQUEST", value="queryData"))
    resource.infos.append(
        Info(name="s_ra", value="%s" % request.GET.get("s_ra")))
    resource.infos.append(
        Info(name="s_dec", value="%s" % request.GET.get("s_dec")))
    resource.infos.append(
        Info(name="t_min", value="%s" % request.GET.get("t_min")))
    resource.infos.append(
        Info(name="t_max", value="%s" % request.GET.get("t_max")))

    # Define some fields
    # table.fields.extend([
    #     Field(votable, name="filename", datatype="char", arraysize="*"),
    #     Field(votable, name="matrix", datatype="double", arraysize="2x2")])

    table.fields.extend([
        Field(votable,
              name="t_start",
              datatype="double",
              ucd="time.start",
              utype="Char.TimeAxis.Coverage.Bounds.Limits.StartTime"),
        Field(votable,
              name="t_stop",
              datatype="double",
              ucd="time.start",
              utype="Char.TimeAxis.Coverage.Bounds.Limits.StartTime"),
        Field(votable,
              name="t_visibility",
              datatype="double",
              ucd="time.start",
              utype="Char.TimeAxis.Coverage.Bounds.Limits.StartTime"),
    ])
    results = VisibilityCalculator.getVisibilityIntervals(
        request.GET.get("s_ra"), request.GET.get("s_dec"),
        request.GET.get("t_min"), request.GET.get("t_max"))

    number_of_intervals = len(results)
    table.create_arrays(number_of_intervals)
    for i in range(0, number_of_intervals):
        table.array[i] = (results[i][0], results[i][1], results[i][2])

    # Now write the whole thing to a file to be streamed
    # Note, we have to use the top-level votable file object

    xml_now = "/tmp/new_votable_%s.xml" % now
    votable.to_xml(xml_now)
    stream = open(xml_now).read()
    os.remove(xml_now)
    return HttpResponse(stream, content_type='text/xml')
Example #23
0
def sort_write(sortname, spect, fitsdict, filesort, space=3):
    """
    Write out an xml and ascii file that contains the details of the file sorting.
    By default, the filename is printed first, followed by the filetype.
    After these, all parameters listed in the 'keyword' item in the
    settings file will be printed

    Parameters
    ----------
    sortname : str
      The filename to be used to save the list of sorted files
    spect : dict
      Properties of the spectrograph.
    fitsdict : dict
      Contains relevant information from fits header files
    filesort : dict
      Details of the sorted files
    space : int
      Keyword to set how many blank spaces to place between keywords
    """
    msgs.info("Preparing to write out the data sorting details")
    nfiles = fitsdict['filename'].size
    # Specify which keywords to print after 'filename' and 'filetype'
    prord = ['filename', 'frametype', 'target', 'exptime', 'naxis0', 'naxis1', 'filter1', 'filter2']
    prdtp = ["char",     "char",      "char",   "double",  "int",    "int",    "char",     "char"]
    # Now insert the remaining keywords:
    fkey = spect['keyword'].keys()
    for i in fkey:
        if i not in prord:
            prord.append(i)
            # Append the type of value this keyword holds
            typv = type(fitsdict[i][0])
            if typv is int or typv is np.int_:
                prdtp.append("int")
            elif isinstance(fitsdict[i][0], basestring) or typv is np.string_:
                prdtp.append("char")
            elif typv is float or typv is np.float_:
                prdtp.append("double")
            else:
                msgs.bug("I didn't expect useful headers to contain type {0:s}".format(typv).replace('<type ', '').replace('>', ''))
    # Open a VOTable for writing
    votable = VOTableFile()
    resource = Resource()
    votable.resources.append(resource)
    table = Table(votable)
    resource.tables.append(table)
    # Define VOTable fields
    tabarr=[]
    # Insert the filename and filetype first
    for i in range(len(prord)):
        tabarr.append(Field(votable, name=prord[i], datatype=prdtp[i], arraysize="*"))
    table.fields.extend(tabarr)
    table.create_arrays(nfiles)
    filtyp = filesort.keys()
    for i in range(nfiles):
        values = ()
        for pr in prord:
            if pr == 'frametype':
                addval = ""
                for ft in filtyp:
                    if i in filesort[ft]:
                        if len(addval) != 0: addval += ","
                        addval += ft
                addval = (addval,)
            else: addval = (fitsdict[pr][i],)
            values = values + addval
        table.array[i] = values
    #osspl = sortname.split('.')
    #if len(osspl) > 1:
    #    fname = sortname
    #else:
    fname = sortname+'.xml'
    votable.to_xml(fname)
    msgs.info("Successfully written sorted data information file:"+msgs.newline() +
              "{0:s}".format(fname))

    # ASCII file (JXP)
    jxpord = ['filename', 'date', 'frametype', 'target', 'exptime', 'binning',
        'dichroic', 'disperser', 'cdangle', 'decker']
    # Generate the columns
    clms = []
    for pr in jxpord:
        try:
            lidx = prord.index(pr)
        except ValueError:
            msgs.warn('{:s} keyword not used'.format(pr))
        else:
            clm = []
            for i in range(nfiles):
                clm.append(table.array[i][lidx])
            clms.append(Column(clm, name=pr))
    # Create Table
    jxp_tbl = tTable(clms)
    # Write
    jxp_name = fname.replace('.xml', '.lst')
    jxp_tbl.write(jxp_name, format='ascii.fixed_width')
    return
Example #24
0
def Generate_VO(name,data1,labels):
	data=Table(rows=data1,names=labels)
	ascii.write(data,'values.tbl',format='ipac',overwrite=True)
	t=Table.read('values.tbl',format='ipac')
	t.write(name+'.xml',format='votable',overwrite=True)
	os.remove('values.tbl')