Beispiel #1
0
def update_doc(obj, scaling='density'):
    """Update the docstring of ``obj`` to reference available FFT methods
    """
    header = 'The available methods are:'

    # remove the old format list
    lines = obj.__doc__.splitlines()
    try:
        pos = [i for i, line in enumerate(lines) if header in line][0]
    except IndexError:
        pass
    else:
        lines = lines[:pos]

    # work out the indentation
    matches = [re.search(r'(\S)', line) for line in lines[1:]]
    indent = min(match.start() for match in matches if match)

    # build table of methods
    from astropy.table import Table
    rows = []
    for method in METHODS[scaling]:
        func = METHODS[scaling][method]
        rows.append((method, '`%s.%s`' % (func.__module__, func.__name__)))
    format_str = Table(rows=rows, names=['Method name', 'Function']).pformat(
        max_lines=-1, max_width=80, align=('>', '<'))
    format_str[1] = format_str[1].replace('-', '=')
    format_str.insert(0, format_str[1])
    format_str.append(format_str[0])
    format_str.extend(['', 'See :ref:`gwpy-signal-fft` for more details'])

    lines.extend([' ' * indent + line for line in [header, ''] + format_str])

    # and overwrite the docstring
    obj.__doc__ = '\n'.join(lines)
Beispiel #2
0
def update_doc(obj, scaling='density'):
    """Update the docstring of ``obj`` to reference available FFT methods
    """
    header = 'The available methods are:'

    # if __doc__ isn't a string, bail-out now
    if not isinstance(obj.__doc__, string_types):
        return

    # remove the old format list
    lines = obj.__doc__.splitlines()
    try:
        pos = [i for i, line in enumerate(lines) if header in line][0]
    except IndexError:
        pass
    else:
        lines = lines[:pos]

    # work out the indentation
    matches = [re.search(r'(\S)', line) for line in lines[1:]]
    indent = min(match.start() for match in matches if match)

    # build table of methods
    from astropy.table import Table
    rows = []
    for method in METHODS[scaling]:
        f = METHODS[scaling][method]
        rows.append((method, '`%s.%s`' % (f.__module__, f.__name__)))
    rows.sort(key=lambda x: x[1])
    format_str = Table(rows=rows, names=['Method name',
                                         'Function']).pformat(max_lines=-1,
                                                              max_width=80,
                                                              align=('>', '<'))
    format_str[1] = format_str[1].replace('-', '=')
    format_str.insert(0, format_str[1])
    format_str.append(format_str[0])
    format_str.extend(['', 'See :ref:`gwpy-signal-fft` for more details'])

    lines.extend([' ' * indent + line for line in [header, ''] + format_str])
    # and overwrite the docstring
    try:
        obj.__doc__ = '\n'.join(lines)
    except AttributeError:
        obj.__func__.__doc__ = '\n'.join(lines)
Beispiel #3
0
def _update__doc__(data_class):
    header = "The available named formats are:"
    fetch = data_class.fetch

    # if __doc__ isn't a string, bail-out now
    if not isinstance(fetch.__doc__, string_types):
        return

    # remove the old format list
    lines = fetch.__doc__.splitlines()
    try:
        pos = [i for i, line in enumerate(lines) if header in line][0]
    except IndexError:
        pass
    else:
        lines = lines[:pos]

    # work out the indentation
    matches = [re.search(r'(\S)', line) for line in lines[1:]]
    indent = min(match.start() for match in matches if match)

    # now re-write the format list
    formats = []
    for fmt, cls in sorted(_FETCHERS, key=lambda x: x[0]):
        if cls is not data_class:
            continue
        usage = _FETCHERS[(fmt, cls)][1]
        formats.append((fmt, '``fetch(%r, %s)``' % (fmt, usage)))
    format_str = Table(rows=formats,
                       names=['Format',
                              'Basic usage']).pformat(max_lines=-1,
                                                      max_width=80,
                                                      align=('>', '<'))
    format_str[1] = format_str[1].replace('-', '=')
    format_str.insert(0, format_str[1])
    format_str.append(format_str[0])

    lines.extend([' ' * indent + line for line in [header, ''] + format_str])
    # and overwrite the docstring
    try:
        fetch.__doc__ = '\n'.join(lines)
    except AttributeError:
        fetch.__func__.__doc__ = '\n'.join(lines)
Beispiel #4
0
def _update__doc__(data_class):
    header = "The available named formats are:"
    fetch = data_class.fetch

    # if __doc__ isn't a string, bail-out now
    if not isinstance(fetch.__doc__, string_types):
        return

    # remove the old format list
    lines = fetch.__doc__.splitlines()
    try:
        pos = [i for i, line in enumerate(lines) if header in line][0]
    except IndexError:
        pass
    else:
        lines = lines[:pos]

    # work out the indentation
    matches = [re.search(r'(\S)', line) for line in lines[1:]]
    indent = min(match.start() for match in matches if match)

    # now re-write the format list
    formats = []
    for fmt, cls in sorted(_FETCHERS, key=lambda x: x[0]):
        if cls is not data_class:
            continue
        usage = _FETCHERS[(fmt, cls)][1]
        formats.append((
            fmt, '``fetch(%r, %s)``' % (fmt, usage)))
    format_str = Table(rows=formats, names=['Format', 'Basic usage']).pformat(
        max_lines=-1, max_width=80, align=('>', '<'))
    format_str[1] = format_str[1].replace('-', '=')
    format_str.insert(0, format_str[1])
    format_str.append(format_str[0])

    lines.extend([' ' * indent + line for line in [header, ''] + format_str])
    # and overwrite the docstring
    try:
        fetch.__doc__ = '\n'.join(lines)
    except AttributeError:
        fetch.__func__.__doc__ = '\n'.join(lines)
Beispiel #5
0
def _update__doc__(data_class):
    header = "The available named formats are:"
    sampler = data_class.sampler

    # if __doc__ isn't a string, bail-out now
    if not isinstance(sampler.__doc__, string_types):
        return

    # remove the old format list
    lines = sampler.__doc__.splitlines()
    try:
        pos = [i for i, line in enumerate(lines) if header in line][0]
    except IndexError:
        pass
    else:
        lines = lines[:pos]

    # work out the indentation
    matches = [re.search(r"(\S)", line) for line in lines[1:]]
    indent = min(match.start() for match in matches if match)

    # now re-write the format list
    formats = []
    for fmt, cls in sorted(_SAMPLERS, key=lambda x: x[0]):
        if cls is not data_class:
            continue
        usage = _SAMPLERS[(fmt, cls)][1]
        formats.append((fmt, "``sampler(%r, %s)``" % (fmt, usage)))
    format_str = Table(rows=formats, names=["Format", "Basic usage"]).pformat(
        max_lines=-1, max_width=80, align=(">", "<")
    )
    format_str[1] = format_str[1].replace("-", "=")
    format_str.insert(0, format_str[1])
    format_str.append(format_str[0])

    lines.extend([" " * indent + line for line in [header, ""] + format_str])
    # and overwrite the docstring
    try:
        sampler.__doc__ = "\n".join(lines)
    except AttributeError:
        sampler.__func__.__doc__ = "\n".join(lines)
Beispiel #6
0
def gather_nightwatch_qa(night, verbose=False, overwrite=False):
    """Read and stack all the nightwatch QA files for a given night.

    """
    import json
    import astropy.table
    from astropy.table import Table

    qadir = os.path.join(outdir, 'nightwatch')
    if not os.path.isdir(qadir):
        os.makedirs(qadir, exist_ok=True)
    stackwatchfile = os.path.join(qadir,
                                  'qa-nightwatch-{}.fits'.format(str(night)))
    fiberassignmapfile = os.path.join(
        qadir, 'fiberassignmap-{}.fits'.format(str(night)))

    if os.path.isfile(stackwatchfile) and not overwrite:
        print('Reading {}'.format(stackwatchfile))
        data = Table(fitsio.read(stackwatchfile, 'PER_CAMFIBER'))
        fiberassignmap = Table(fitsio.read(fiberassignmapfile))
    else:
        #print('Reading the focal plane model.')
        #fp = desimodel.io.load_focalplane()[0]
        #fp = fp['PETAL', 'FIBER', 'OFFSET_X', 'OFFSET_Y']

        nightdir = os.path.join(nightwatchdir, str(night))
        allexpiddir = glob(os.path.join(nightdir, '????????'))

        data = []
        fiberassignmap = Table(names=('NIGHT', 'EXPID', 'TILEID',
                                      'FIBERASSIGNFILE'),
                               dtype=('U8', 'U8', 'i4', 'U32'))
        for expiddir in allexpiddir:
            expid = os.path.basename(expiddir)
            qafile = os.path.join(expiddir, 'qa-{}.fits'.format(expid))

            qaFITS = fitsio.FITS(qafile)
            if 'PER_CAMFIBER' in qaFITS:
                if verbose:
                    print('Reading {}'.format(qafile))
                qa = Table(qaFITS['PER_CAMFIBER'].read())

            # Hack! Figure out the mapping between EXPID and FIBERMAPusing the request-EXPID.json file.
            requestfile = os.path.join(rawdata_dir, str(night), expid,
                                       'request-{}.json'.format(expid))
            if not os.path.isfile(requestfile):
                print('Missing {}'.format(requestfile))
                continue
            with open(requestfile) as ff:
                req = json.load(ff)
            if 'PASSTHRU' in req.keys():
                if type(req['PASSTHRU']) is dict:
                    tileid = req['PASSTHRU']['TILEID']
                    #tileid = int(req['PASSTHRU'].split(':')[3].split(',')[0])
                else:
                    indx = req['PASSTHRU'].index('TILEID')
                    tileid = req['PASSTHRU'][indx:]
                    tileid = int(tileid[tileid.index(':') +
                                        1:tileid.index(',')])
                # This should use the svn checkout!
                tilefile = glob(
                    os.path.join(rawdata_dir, str(night), '????????',
                                 'fiberassign-{:06d}.fits'.format(tileid)))
                #if len(tilefile) == 0:
                #    print('No fibermap file found for EXPID={}'.format(expid))
                #if len(tilefile) > 0:
                #    print('Multiple fibermap files found for EXPID={}!'.format(expid))
                if len(tilefile) > 0:
                    tsplit = tilefile[0].split('/')
                    fiberassignmap.add_row(
                        (str(night), str(expid), tileid,
                         os.path.join(tsplit[-2], tsplit[-1])))
                    #fiberassignmap[str(expid)] = [tileid]
                    data.append(qa)
                #else:
                #    print('  No tilefile found')
            #else:
            #    print('  No tilefile found')

        if len(data) == 0:
            print('No fiberassign files found for night {}'.format(night))
            return None, None
        data = astropy.table.vstack(data)

        # Need to update the data model to 'f4'.
        print('Updating the data model.')
        for col in data.colnames:
            if data[col].dtype == '>f8':
                data[col] = data[col].astype('f4')

        print('Writing {}'.format(stackwatchfile))
        fitsio.write(stackwatchfile,
                     data.as_array(),
                     clobber=True,
                     extname='PER_CAMFIBER')

        print('Writing {}'.format(fiberassignmapfile))
        # ValueError: unsupported type 'U42'
        #fitsio.write(fiberassignmapfile, fiberassignmap.as_array(), clobber=True)
        fiberassignmap.write(fiberassignmapfile, overwrite=True)

    return data, fiberassignmap
Beispiel #7
0
    def write(self,
              filename=None,
              max_rows=None,
              format="ascii.latex",
              column_format="{0:.3f}",
              **kwargs):
        """
		Outputs the points that make up the design in a nicely formatted table

		:param filename: name of the file to which the table will be saved; if None the contents will be printed
		:type filename: str. or file descriptor

		:param max_rows: maximum number of rows in the table, if smaller than the number of points the different chunks are hstacked (useful if there are too many rows for display)
		:type max_rows: int.

		:param format: passed to the Table.write astropy method
		:type format: str.

		:param column_format: format specifier for the numerical values in the Table
		:type column_format: str.

		:param kwargs: the keyword arguments are passed to astropy.Table.write method
		:type kwargs: dict.

		:returns: the Table instance with the design parameters

		"""

        #Check that there is something to save
        assert hasattr(self,
                       "points"), "There are no points in your design yet!"
        names = [self.label[p] for p in self.parameters]

        if (max_rows is None) or (max_rows >= self.npoints):

            #Construct the columns
            columns = self.points

            #Build the table
            design_table = Table(columns, names=names)

            #Add the number column to the left
            design_table.add_column(Column(data=range(1, self.npoints + 1),
                                           name=r"$N$"),
                                    index=0)

        else:

            #Figure out the splitting
            num_chunks = self.npoints // max_rows
            if self.npoints % max_rows != 0:
                num_chunks += 1

            #Construct the list of tables to hstack
            design_table = list()

            #Cycle through the chunks and create the sub-tables
            for n in range(num_chunks - 1):

                columns = self.points[n * max_rows:(n + 1) * max_rows]

                #Build the sub-table
                design_table.append(Table(columns, names=names))

                #Add the number column to the left
                design_table[-1].add_column(Column(data=range(
                    n * max_rows + 1, (n + 1) * max_rows + 1),
                                                   name=r"$N$"),
                                            index=0)

            #Create the last sub-table
            columns = self.points[(num_chunks - 1) * max_rows:]
            design_table.append(Table(columns, names=names))
            design_table[-1].add_column(Column(data=range(
                (num_chunks - 1) * max_rows + 1, self.npoints + 1),
                                               name=r"$N$"),
                                        index=0)

            #hstack in a single table
            design_table = hstack(design_table)

        #Tune the format
        for colname in design_table.colnames:
            if not design_table.dtype[colname] == np.int:
                design_table[colname].format = column_format

        #Write the table or return it
        if filename is not None:
            design_table.write(filename, format=format, **kwargs)
            return None
        else:
            return design_table
Beispiel #8
0
	def write(self,filename=None,max_rows=None,format="ascii.latex",column_format="{0:.3f}",**kwargs):

		"""
		Outputs the points that make up the design in a nicely formatted table

		:param filename: name of the file to which the table will be saved; if None the contents will be printed
		:type filename: str. or file descriptor

		:param max_rows: maximum number of rows in the table, if smaller than the number of points the different chunks are hstacked (useful if there are too many rows for display)
		:type max_rows: int.

		:param format: passed to the Table.write astropy method
		:type format: str.

		:param column_format: format specifier for the numerical values in the Table
		:type column_format: str.

		:param kwargs: the keyword arguments are passed to astropy.Table.write method
		:type kwargs: dict.

		:returns: the Table instance with the design parameters

		"""

		#Check that there is something to save
		assert hasattr(self,"points"),"There are no points in your design yet!"
		names = [ self.label[p] for p in self.parameters ]
		
		if (max_rows is None) or (max_rows>=self.npoints):
			
			#Construct the columns
			columns = self.points

			#Build the table
			design_table = Table(columns,names=names)

			#Add the number column to the left
			design_table.add_column(Column(data=range(1,self.npoints+1),name=r"$N$"),index=0)

		else:

			#Figure out the splitting
			num_chunks = self.npoints // max_rows
			if self.npoints%max_rows!=0:
				num_chunks+=1

			#Construct the list of tables to hstack
			design_table = list()

			#Cycle through the chunks and create the sub-tables
			for n in range(num_chunks-1):

				columns = self.points[n*max_rows:(n+1)*max_rows]

				#Build the sub-table
				design_table.append(Table(columns,names=names))

				#Add the number column to the left
				design_table[-1].add_column(Column(data=range(n*max_rows+1,(n+1)*max_rows+1),name=r"$N$"),index=0)

			#Create the last sub-table
			columns = self.points[(num_chunks-1)*max_rows:]
			design_table.append(Table(columns,names=names))
			design_table[-1].add_column(Column(data=range((num_chunks-1)*max_rows+1,self.npoints+1),name=r"$N$"),index=0)

			#hstack in a single table
			design_table = hstack(design_table)


		#Tune the format
		for colname in design_table.colnames:
			if not design_table.dtype[colname]==np.int:
				design_table[colname].format = column_format

		#Write the table or return it
		if filename is not None:
			design_table.write(filename,format=format,**kwargs)
			return None
		else:
			return design_table
Beispiel #9
0
class FITSdata:
	def __init__(self):
		self.sources = Table()
		
	def appendFromFile(self, filename):
		try:
			hdulist = fits.open(filename)
			header = hdulist[0].header
			# print(repr(header))
			tableData = hdulist[1].data
			#print tableData
			cols = hdulist[1].columns
			added = 0
			for d in tableData:
				rowObject = {}
				for c in cols.names:
					rowObject[c] = d[c]
				# print rowObject
				added+= 1
				self.sources.append(rowObject)
		except IOError as e:
			print "Could not load: %s"%filename
			return (-1, -1)
		return (added, len(self.sources))
		
	def appendRows(self, tab):
		self.sources = vstack([self.sources, tab])
		return len(self.sources)
		
		
	def sort(self):
		self.sources = sorted(self.sources, key=lambda object: object['mean'], reverse = True)	
		
	def writeToFile(self, filename):
		objects = self.sources
		hdu = fits.PrimaryHDU()
		cols = []
		cols.append(fits.Column(name='id', format='16A', array = [o['id'] for o in objects]))
		cols.append(fits.Column(name='ra', format='E', array = [o['ra'] for o in objects]))
		cols.append(fits.Column(name='dec', format = 'E', array = [o['dec'] for o in objects]))
		cols.append(fits.Column(name='xmax', format = 'E', array = [o['xmax'] for o in objects]))
		cols.append(fits.Column(name='ymax', format = 'E', array = [o['ymax'] for o in objects]))
		cols.append(fits.Column(name='mean', format = 'E', array = [o['mean'] for o in objects]))
		cols.append(fits.Column(name='peak', format = 'E', array = [o['peak'] for o in objects]))
		cols.append(fits.Column(name='variance', format = 'E', array = [o['variance'] for o in objects]))
		cols.append(fits.Column(name='type', format = '8A', array = [o['type'] for o in objects]))
		cols.append(fits.Column(name='CCD', format = '4A', array = [o['CCD'] for o in objects]))
		cols.append(fits.Column(name='Ha_sky', format = 'E', array = [o['Ha_sky'] for o in objects]))
		cols.append(fits.Column(name='sky_mean', format = 'E', array = [o['sky_mean'] for o in objects]))
		cols.append(fits.Column(name='r', format = 'E', array = [o['r'] for o in objects]))
		cols.append(fits.Column(name='r_sky', format = 'E', array = [o['r_sky'] for o in objects]))
		cols.append(fits.Column(name='r_sky_mean', format = 'E', array = [o['r_sky_mean'] for o in objects]))
		cols = fits.ColDefs(cols)
		tbhdu = fits.BinTableHDU.from_columns(cols)
			
		prihdr = fits.Header()
		prihdr['COMMENT'] = "Created by Hagrid (mergeOutput) on %s."%( datetime.datetime.ctime(datetime.datetime.now()))
			
		prihdu = fits.PrimaryHDU(header=prihdr)
		thdulist = fits.HDUList([prihdu, tbhdu])
		thdulist.writeto(filename, overwrite=True)
Beispiel #10
0
class FITSdata:
    def __init__(self):
        self.sources = Table()

    def appendFromFile(self, filename):
        try:
            hdulist = fits.open(filename)
            header = hdulist[0].header
            # print(repr(header))
            tableData = hdulist[1].data
            #print tableData
            cols = hdulist[1].columns
            added = 0
            for d in tableData:
                rowObject = {}
                for c in cols.names:
                    rowObject[c] = d[c]
                # print rowObject
                added += 1
                self.sources.append(rowObject)
        except IOError as e:
            print "Could not load: %s" % filename
            return (-1, -1)
        return (added, len(self.sources))

    def appendRows(self, tab):
        self.sources = vstack([self.sources, tab])
        return len(self.sources)

    def sort(self):
        self.sources = sorted(self.sources,
                              key=lambda object: object['mean'],
                              reverse=True)

    def writeToFile(self, filename):
        objects = self.sources
        hdu = fits.PrimaryHDU()
        cols = []
        cols.append(
            fits.Column(name='id',
                        format='16A',
                        array=[o['id'] for o in objects]))
        cols.append(
            fits.Column(name='ra',
                        format='E',
                        array=[o['ra'] for o in objects]))
        cols.append(
            fits.Column(name='dec',
                        format='E',
                        array=[o['dec'] for o in objects]))
        cols.append(
            fits.Column(name='xmax',
                        format='E',
                        array=[o['xmax'] for o in objects]))
        cols.append(
            fits.Column(name='ymax',
                        format='E',
                        array=[o['ymax'] for o in objects]))
        cols.append(
            fits.Column(name='mean',
                        format='E',
                        array=[o['mean'] for o in objects]))
        cols.append(
            fits.Column(name='peak',
                        format='E',
                        array=[o['peak'] for o in objects]))
        cols.append(
            fits.Column(name='variance',
                        format='E',
                        array=[o['variance'] for o in objects]))
        cols.append(
            fits.Column(name='type',
                        format='8A',
                        array=[o['type'] for o in objects]))
        cols.append(
            fits.Column(name='CCD',
                        format='4A',
                        array=[o['CCD'] for o in objects]))
        cols.append(
            fits.Column(name='Ha_sky',
                        format='E',
                        array=[o['Ha_sky'] for o in objects]))
        cols.append(
            fits.Column(name='sky_mean',
                        format='E',
                        array=[o['sky_mean'] for o in objects]))
        cols.append(
            fits.Column(name='r', format='E', array=[o['r'] for o in objects]))
        cols.append(
            fits.Column(name='r_sky',
                        format='E',
                        array=[o['r_sky'] for o in objects]))
        cols.append(
            fits.Column(name='r_sky_mean',
                        format='E',
                        array=[o['r_sky_mean'] for o in objects]))
        cols = fits.ColDefs(cols)
        tbhdu = fits.BinTableHDU.from_columns(cols)

        prihdr = fits.Header()
        prihdr['COMMENT'] = "Created by Hagrid (mergeOutput) on %s." % (
            datetime.datetime.ctime(datetime.datetime.now()))

        prihdu = fits.PrimaryHDU(header=prihdr)
        thdulist = fits.HDUList([prihdu, tbhdu])
        thdulist.writeto(filename, overwrite=True)