Пример #1
0
def main(args=None):
    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(
        description=('Print the header(s) of a FITS file. '
                     'All HDU extensions are shown by default. '
                     'In the case of a compressed image, '
                     'the decompressed header is shown.'))
    parser.add_argument('-e', '--ext', metavar='hdu',
                        help='specify the HDU extension number or name')
    parser.add_argument('-c', '--compressed', action='store_true',
                        help='for compressed image data, '
                             'show the true header which describes '
                             'the compression rather than the data')
    parser.add_argument('filename', nargs='+',
                        help='path to one or more FITS files to display')
    args = parser.parse_args(args)

    try:
        for filename in args.filename:
            print(HeaderFormatter(filename, args.compressed).parse(args.ext))
    except FormattingException as e:
        log.error(e)
    except IOError as e:
        # A 'Broken pipe' IOError may occur when stdout is closed prematurely,
        # eg when using `fitsheader file.fits | head`. We let this pass.
        pass
Пример #2
0
def hogg_iau_name_main():  # pragma: no cover
    from astropy.utils.compat import argparse
    parser = argparse.ArgumentParser(
        description=('Properly format astronomical source names ' +
                     'to the IAU convention.'))
    parser.add_argument('-P',
                        '--precision',
                        dest='precision',
                        action='store',
                        metavar='N',
                        default=1,
                        type=int,
                        help='Digits of precision to add to the declination.')
    parser.add_argument('-p',
                        '--prefix',
                        dest='prefix',
                        action='store',
                        metavar='STR',
                        default='SDSS',
                        help='Add this prefix to the name.')
    parser.add_argument('ra',
                        metavar='RA',
                        type=float,
                        help='Right Ascension.')
    parser.add_argument('dec', metavar='Dec', type=float, help='Declination.')
    options = parser.parse_args()
    print(
        hogg_iau_name(options.ra,
                      options.dec,
                      prefix=options.prefix,
                      precision=options.precision))
    return 0
Пример #3
0
def main(args=None):

    from astropy.utils.compat import argparse
    from time import time

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('-c', '--use-cython', dest='cy', action='store_true',
                        help='Use the Cython-based Prime number generator.')
    parser.add_argument('-t', '--timing', dest='time', action='store_true',
                        help='Time the Fibonacci generator.')
    parser.add_argument('-p', '--print', dest='prnt', action='store_true',
                        help='Print all of the Prime numbers.')
    parser.add_argument('n', metavar='N', type=int,
                        help='Get Prime numbers up to this number.')

    res = parser.parse_args(args)

    pre = time()
    primes = do_primes(res.n, res.cy)
    post = time()

    print('Found {0} prime numbers'.format(len(primes)))
    print('Largest prime: {0}'.format(primes[-1]))

    if res.time:
        print('Running time: {0} s'.format(post - pre))

    if res.prnt:
        print('Primes: {0}'.format(primes))
Пример #4
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Scan stacked sky fibers for negative fluxes... which should not happen, but in fact does.')
    parser.add_argument(
        '--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN',
        help='File pattern for stacked sky fibers.'
    )
    parser.add_argument(
        '--path', type=str, default='.', metavar='PATH',
        help='Path to work from, if not ''.'''
    )
    parser.add_argument(
        '--rewrite', action='store_true',
        help='Flag to control whether or not negative values are replaced with 0'
    )
    parser.add_argument(
        '--ltzero', type=float, default=0.5, metavar='LTZERO',
        help='Value below zero to consider negative'
    )
    args = parser.parse_args()

    flux_list = []
    exp_list = []
    mask_list = []
    wavelengths = None

    for file in os.listdir(args.path):
        if fnmatch.fnmatch(file, args.pattern):
            if file.endswith('.fits'):
                data = Table.read(os.path.join(args.path, file), format="fits")
            elif file.endswith('.csv'):
                data = Table.read(os.path.join(args.path, file), format="ascii.csv")

            #mask = data['ivar'] == 0

            #Get rid of shit like this, if going to not just be my hacky util
            exp = int(file.split("-")[2][3:9])

            neg_mask = data['flux'] < -(args.ltzero)
            set_neg_mask = neg_mask #& ~mask
            if np.any(set_neg_mask):
                print file, exp, data['wavelength'][set_neg_mask][0], data['flux'][set_neg_mask][0],
                if not args.rewrite:
                    print "FOUND"
                else:
                    print "REPAIRING..."
                    data['ivar'][set_neg_mask] = 0
                    data['flux'][set_neg_mask] = 0

                    if file.endswith('.fits'):
                        data.write(os.path.join(args.path, file), format="fits", overwrite=True)
                    elif file.endswith('.csv'):
                        data.write(os.path.join(args.path, file), format="ascii.csv", overwrite=True)
Пример #5
0
def main(args=None):
    from . import wcs
    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(
        description=("Check the WCS keywords in a FITS file for "
                     "compliance against the standards"))
    parser.add_argument('filename', nargs=1, help='Path to FITS file to check')
    args = parser.parse_args(args)

    print(wcs.validate(args.filename[0]))
Пример #6
0
def main(args=None):
    from . import table
    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(
        description=("Check a VOTable file for compliance to the "
                     "VOTable specification"))
    parser.add_argument('filename',
                        nargs=1,
                        help='Path to VOTable file to check')
    args = parser.parse_args(args)

    table.validate(args.filename[0])
Пример #7
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Stack sky fibers from a PLATE/MJD into a single spectra.')
    parser.add_argument(
        '--fibers',
        type=str,
        default=None,
        metavar='FIBERS',
        required=True,
        help=
        'File that contains list of PLATE, MJD, FIBER which are to be stacked (by PLATE/MJD).'
    )
    parser.add_argument(
        '--output',
        type=str,
        default='FITS',
        metavar='OUTPUT',
        help='Output format, either of FITS or CSV, defaults to FITS.')
    parser.add_argument(
        '--pins',
        action='store_true',
        help=
        'Whether or not to output "pins": file with wavelengths of prominent peaks between ~4400 and 5600'
    )
    args = parser.parse_args()

    sky_fibers_table = Table.read(args.fibers, format='ascii')
    sky_fibers_table = sky_fibers_table.group_by(["PLATE", "MJD"])

    progress_bar = ProgressBar(widgets=[Percentage(), Bar()],
                               maxval=len(sky_fibers_table)).start()
    counter = 0

    for group in sky_fibers_table.groups:
        exposures, stacks, pin_peaks = stack_exposures(group,
                                                       use_cframe=True,
                                                       find_pins=args.pins)

        save_stacks(stacks, group, exposures, args.output)
        if args.pins:
            save_pins(pin_peaks, group)

        counter += len(group)
        progress_bar.update(counter)
    progress_bar.finish()
Пример #8
0
def main(args=None):

    from astropy.utils.compat import argparse
    from time import time

    parser = argparse.ArgumentParser(description="Process some integers.")
    parser.add_argument(
        "-c",
        "--use-cython",
        dest="cy",
        action="store_true",
        help="Use the Cython-based Prime number generator.",
    )
    parser.add_argument(
        "-t",
        "--timing",
        dest="time",
        action="store_true",
        help="Time the Fibonacci generator.",
    )
    parser.add_argument(
        "-p",
        "--print",
        dest="prnt",
        action="store_true",
        help="Print all of the Prime numbers.",
    )
    parser.add_argument(
        "n", metavar="N", type=int, help="Get Prime numbers up to this number."
    )

    res = parser.parse_args(args)

    pre = time()
    primes = do_primes(res.n, res.cy)
    post = time()

    print("Found {0} prime numbers".format(len(primes)))
    print("Largest prime: {0}".format(primes[-1]))

    if res.time:
        print("Running time: {0} s".format(post - pre))

    if res.prnt:
        print("Primes: {0}".format(primes))
Пример #9
0
def main(args=None):
    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('-c',
                        '--use-cython',
                        dest='cy',
                        action='store_true',
                        help='Use the Cython-based Fibonacci generator.')
    parser.add_argument('n',
                        metavar='N',
                        type=int,
                        help='Run Fibonacci series up to this number.')

    res = parser.parse_args(args)

    fibs = do_fib(res.n, res.cy)
    print fibs
Пример #10
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'Build and test models based on dim reductions and provided spectra')
    parser.add_argument('--spectra_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Spectra path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--method',
                        type=str,
                        default='ICA',
                        metavar='METHOD',
                        help='Dim reduction method to load data for')
    parser.add_argument(
        '--file_path',
        type=str,
        default=None,
        metavar='FILE_PATH',
        help='COMPLETE path from which to load a dim reduction')

    args = parser.parse_args()

    data_model = None
    scaler = None
    if args.file_path is not None:
        data_model, scaler = ize.unpickle_model(filename=args.file_path)
    else:
        data_model, scaler = ize.unpickle_model(path=args.spectra_path,
                                                method=args.method)
    components = ize.get_components(args.method, data_model)

    offset = 0
    for i, comp_i in enumerate(components):
        if i > 0:
            offset += np.max(np.abs(comp_i[comp_i < 0])) * 1.2
        plt.plot(stack.skyexp_wlen_out, comp_i + offset)
        offset += np.max(comp_i[comp_i > 0]) * 1.2
    plt.show()
    plt.close()
Пример #11
0
def main(argv=None):
    # parse command-line arguments
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-n',
                        '--num-repeats',
                        type=int,
                        default=1000,
                        help='number of times to repeat timing loops')
    parser.add_argument('--all',
                        action='store_true',
                        help='run all benchmark suites')
    parser.add_argument('--magnitude',
                        action='store_true',
                        help='benchmark magnitude calculations')
    parser.add_argument(
        '--save',
        type=str,
        default=None,
        help='Name of file to save results to (or print if not set)')
    parser.add_argument('--format',
                        type=str,
                        default='ascii.fixed_width_two_line',
                        help='format to use for results')
    args = parser.parse_args(argv)

    results = astropy.table.Table(names=('Suite', 'Description', 'Time [us]'),
                                  dtype=('S8', 'S40', float))
    if args.magnitude or args.all:
        results = magnitude_calculation(results, args.num_repeats)

    results.write(args.save,
                  format=args.format,
                  delimiter_pad=' ',
                  position_char='=',
                  formats={'Time [us]': '%.1f'})
    return 0
Пример #12
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'Build and test models based on dim reductions and provided spectra')
    parser.add_argument('--metadata_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Metadata path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--lunar_metadata',
                        type=str,
                        default=None,
                        metavar='LUNAR_METADATA',
                        required=True,
                        help='File containing lunar ephemeris metadata.')
    parser.add_argument('--start_dt',
                        type=ani.valid_date,
                        help='DateTime to plot sky for')
    parser.add_argument('--end_dt',
                        type=ani.valid_date,
                        help='DateTime to plot sky for')
    parser.add_argument('--ra', type=str)
    parser.add_argument('--dec', type=str)
    args = parser.parse_args()

    m = np.zeros(hp.nside2npix(NSIDE))
    lunar_row, solar_row, ss_count, ss_area = ani.get_metadata_for_dt(
        args.datetime, args.lunar_metadata, args.solar_metadata,
        args.sunspot_metadata)

    fig = plt.figure(1, figsize=(10, 7.5))
    hp.mollview(m, coord=['C'], title="Mollview image RING", fig=1)
    plt.show()
Пример #13
0
def main(args=None):
    """
    This is the main function called by the `runvpfit` script.

    """

    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(
        description='Run VPFIT using a specified f26 file as input.')

    parser.add_argument('f26', help='f26 input filename')
    parser.add_argument('--include', help='path to the f26 file to include')
    parser.add_argument('--fwhm', type=float, default=10,
                        help='FWHM of the LSF of the spectrograph in km/s')
    parser.add_argument('--cos-fuv', help='option to use the HST/COS FUV LSF',
                        action='store_true')
    parser.add_argument('--cos-nuv', help='option to use the HST/COS NUV LSF',
                        action='store_true')

    args = parser.parse_args(args)

    run_vpfit(args.f26, inc=args.include, fwhm=args.fwhm, cos_fuv=args.cos_fuv,
              cos_nuv=args.cos_nuv)
Пример #14
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Pull metadata from FITS for PLATE/MJD combos, output.')

    parser.add_argument('--obs_metadata',
                        type=str,
                        default=None,
                        metavar='OBS_METADATA',
                        required=True,
                        help='File containing observation metadata.')
    parser.add_argument('--lunar_metadata',
                        type=str,
                        default=None,
                        metavar='LUNAR_METADATA',
                        required=True,
                        help='File containing lunar ephemeris metadata.')
    parser.add_argument('--solar_metadata',
                        type=str,
                        default=None,
                        metavar='SOLAR_METADATA',
                        required=True,
                        help='File containing solar ephemeris metadata.')
    parser.add_argument('--sunspot_metadata',
                        type=str,
                        default=None,
                        metavar='SUNSPOT_METADATA',
                        required=True,
                        help='File containing sunspot metadata.')
    parser.add_argument(
        '--output',
        type=str,
        default='FITS',
        metavar='OUTPUT',
        help='Output format, either of FITS or CSV, defaults to FITS.')
    args = parser.parse_args()

    if args.output.upper() == 'CSV':
        obs_md_table = Table.read(args.obs_metadata, format="ascii.csv")
    elif args.output.upper == 'FITS':
        obs_md_table = Table.read(args.obs_metadata, format="fits")
    else:
        obs_md_table = Table.read(args.obs_metadata)

    lunar_md_table = Table.read(args.lunar_metadata, format="ascii.csv")
    lunar_md_table.rename_column('UTC', 'EPHEM_DATE')
    solar_md_table = Table.read(args.solar_metadata, format="ascii.csv")
    solar_md_table.rename_column('UTC', 'EPHEM_DATE')
    sunspot_md_table = Table.read(args.sunspot_metadata, format="ascii.csv")

    print "Table has {} entries".format(len(obs_md_table))
    lookup_date, obs_md_table = find_ephemeris_lookup_date(
        obs_md_table['TAI-BEG'], obs_md_table['TAI-END'], obs_md_table)
    print "Successfully got {} ephemeris date entries".format(len(lookup_date))
    ephem_date_col = Column(lookup_date, name="EPHEM_DATE")
    obs_md_table.add_column(ephem_date_col)

    sunspot_count, sunspot_area = find_sunspot_data(ephem_date_col,
                                                    sunspot_md_table)
    sunspot_count_col = Column(sunspot_count, name="SS_COUNT")
    sunspot_area_col = Column(sunspot_area, name="SS_AREA")
    obs_md_table.add_column(sunspot_count_col)
    obs_md_table.add_column(sunspot_area_col)

    galactic_core = ascoord.SkyCoord(l=0.0,
                                     b=0.0,
                                     unit='deg',
                                     frame='galactic')

    #Join lunar data to the table
    obs_md_table = join(
        obs_md_table, lunar_md_table['EPHEM_DATE', 'RA_APP', 'DEC_APP',
                                     'MG_APP', 'ELV_APP'])

    lunar_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA_APP'],
                                    dec=obs_md_table['DEC_APP'],
                                    unit='deg',
                                    frame='icrs')
    boresight_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA'],
                                        dec=obs_md_table['DEC'],
                                        unit='deg',
                                        frame='fk5')
    lunar_seps = boresight_ra_dec.separation(lunar_ra_dec).degree
    obs_md_table.add_column(Column(lunar_seps, dtype=float, name="LUNAR_SEP"))

    obs_md_table.rename_column("MG_APP", "LUNAR_MAGNITUDE")
    obs_md_table.rename_column("ELV_APP", "LUNAR_ELV")
    obs_md_table.remove_columns(['RA_APP', 'DEC_APP'])

    #Join solar data to the table
    obs_md_table = join(
        obs_md_table, solar_md_table['EPHEM_DATE', 'RA_APP', 'DEC_APP',
                                     'ELV_APP'])
    solar_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA_APP'],
                                    dec=obs_md_table['DEC_APP'],
                                    unit='deg',
                                    frame='icrs')
    boresight_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA'],
                                        dec=obs_md_table['DEC'],
                                        unit='deg',
                                        frame='fk5')
    solar_seps = boresight_ra_dec.separation(solar_ra_dec).degree
    obs_md_table.add_column(Column(solar_seps, dtype=float, name="SOLAR_SEP"))

    obs_md_table.rename_column("ELV_APP", "SOLAR_ELV")
    obs_md_table.remove_columns(['RA_APP', 'DEC_APP'])

    #Add in galactic data
    boresight_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA'],
                                        dec=obs_md_table['DEC'],
                                        unit='deg',
                                        frame='fk5')
    obs_md_table.add_column(
        Column(boresight_ra_dec.separation(galactic_core).degree,
               dtype=float,
               name="GALACTIC_CORE_SEP"))
    boresight_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA'],
                                        dec=obs_md_table['DEC'],
                                        unit='deg',
                                        frame='fk5')
    obs_md_table.add_column(
        Column(boresight_ra_dec.transform_to('galactic').b.degree,
               dtype=float,
               name="GALACTIC_PLANE_SEP"))
    #print obs_md_table
    if args.output == 'CSV':
        obs_md_table.write("annotated_metadata.csv", format="ascii.csv")
    elif args.output == 'FITS':
        obs_md_table.write("annotated_metadata.fits", format="fits")
Пример #15
0
def prepare(args=None):
    # parse command-line arguments
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='provide verbose output on progress')
    parser.add_argument('--classname',
                        choices=['qso', 'lrg', 'elg', 'elgem', 'star'],
                        default=None,
                        help='Spectral class to prepare.')
    parser.add_argument(
        '-k',
        '--num-kilo-spectra',
        type=int,
        default=1,
        metavar='K',
        help='Number of thousands of spectra to sample for the prior.')
    parser.add_argument('--downsampling',
                        type=int,
                        default=4,
                        metavar='DS',
                        help='Downsampling of 1A simulated pixels to use.')
    parser.add_argument('--include-emission',
                        action='store_true',
                        help='Add emission spectrum to ELG class.')
    parser.add_argument('--seed',
                        type=int,
                        default=None,
                        metavar='S',
                        help='Random seed to use for sampling templates.')
    parser.add_argument('--print-interval',
                        type=int,
                        default=500,
                        metavar='P',
                        help='Print messages for every P sampled spectra.')
    args = parser.parse_args(args)

    if args.classname is None:
        print('You must specify a spectral class.')
        return -1
    sampler = bayez.sampler.Samplers[args.classname]()
    if args.verbose:
        sampler.print_summary()

    if args.seed is None:
        print('You must specify a seed to use.')
        return -1

    simulator = bayez.simulation.Simulator(
        analysis_downsampling=args.downsampling, verbose=args.verbose)

    prior = bayez.prior.build_prior(args.classname,
                                    sampler,
                                    simulator,
                                    1000 * args.num_kilo_spectra,
                                    seed=args.seed,
                                    print_interval=args.print_interval)

    # Save the prior.
    path = os.environ.get('BAYEZ_DATA', '.')
    name = os.path.join(
        path, '{}_{}_{}k.fits'.format(args.classname, args.downsampling,
                                      args.num_kilo_spectra))
    if args.verbose:
        print('Saving prior to {}.'.format(name))
    prior.save(name, clobber=True)
Пример #16
0
def main(args=None):
    parser = argparse.ArgumentParser(
        description='Perform basic arithmetic on two FITS files.')
    parser.add_argument('fits_filename',
                        nargs=2,
                        help='FITS filename (or scalar value)')
    parser.add_argument('-e1',
                        '--exten1',
                        metavar='exten1',
                        type=int,
                        default=0,
                        help='')
    parser.add_argument('-e2',
                        '--exten2',
                        metavar='exten2',
                        type=int,
                        default=0,
                        help='')
    parser.add_argument('operator',
                        metavar='operator',
                        help="Arithmetic operator.  Must be one "
                        "of '+', '-', '*', '/', '//', 'min', or 'max'")
    parser.add_argument('-f',
                        '--fill_value',
                        metavar='fill_value',
                        type=float,
                        default=0.,
                        help='')
    parser.add_argument('-k',
                        '--keywords',
                        metavar='keywords',
                        type=str,
                        default=None,
                        help='')
    parser.add_argument('-o',
                        '--outfilename',
                        metavar='outfilename',
                        type=str,
                        default='imarith.fits',
                        help='')
    parser.add_argument('-c',
                        '--clobber',
                        default=False,
                        action='store_true',
                        help='')

    args = parser.parse_args(args)

    # TODO: better FITS to NDData and NDData to FITS adapters
    try:
        nddata1 = np.float(args.fits_filename[0])
    except ValueError:
        nddata1 = basic_fits_to_nddata(args.fits_filename[0],
                                       exten=args.exten1)
    try:
        nddata2 = np.float(args.fits_filename[1])
    except ValueError:
        nddata2 = basic_fits_to_nddata(args.fits_filename[1],
                                       exten=args.exten2)

    if not isinstance(nddata1, NDData) and not isinstance(nddata2, NDData):
        raise ValueError('Both "fits_filenames" cannot be scalars.')

    keywords = None
    if args.keywords is not None:
        keywords = args.keywords.replace(' ', '').split(',')

    nddata = imarith(nddata1,
                     nddata2,
                     args.operator,
                     fill_value=args.fill_value,
                     keywords=keywords)

    basic_nddata_to_fits(nddata, args.outfilename, clobber=args.clobber)
Пример #17
0
def main(args=None):
    """
    This is the main function called by the `velplot` script.

    """

    from astropy.utils.compat import argparse
    from astropy.extern.configobj import configobj, validate

    from pkg_resources import resource_stream

    parser = argparse.ArgumentParser(
        description='Creates a stacked velocity plot.\nTo dump a default '
                    'configuration file: velplot -d\nTo dump an extended '
                    'default configuration file: velplot -dd',
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('config', help='path to the configuration file')

    config = resource_stream(__name__, '/config/velplot.cfg')
    config_extended = resource_stream(__name__, '/config/velplot_extended.cfg')
    spec = resource_stream(__name__, '/config/velplot_specification.cfg')

    if len(sys.argv) > 1:

        if sys.argv[1] == '-d':
            cfg = ConfigObj(config)
            cfg.filename = '{0}/velplot.cfg'.format(os.getcwd())
            cfg.write()
            return

        elif sys.argv[1] == '-dd':
            cfg = ConfigObj(config_extended)
            cfg.filename = '{0}/velplot.cfg'.format(os.getcwd())
            cfg.write()
            return

    args = parser.parse_args(args)

    try:
        cfg = configobj.ConfigObj(args.config, configspec=spec)
        validator = validate.Validator()
        cfg.validate(validator)

    except:
        raise IOError('Configuration file could not be read')

    figname = cfg['FIGURE'].pop('filename')

    # Create list of transitions:
    fname = cfg['FIGURE'].pop('transitions')
    print('Reading transitions from ', fname)

    fh = open(fname)
    transitions = list(fh)
    fh.close()

    # Don't include transitions that are commented out:
    transitions = [transition for transition in transitions
                   if not transition.startswith('#')]

    # Initialise figure:
    velplot = VelocityPlot(transitions, **cfg['FIGURE'])
    fname = cfg['DATA'].pop('filename')

    if not fname:
        raise IOError('no data to plot!')

    # Get spectrum information and plot:
    spectrum = (Table.read(fname) if fname.endswith('fits')
                else ascii.read(fname))
    wavelength = spectrum[cfg['DATA'].pop('wavelength_column')]
    flux = spectrum[cfg['DATA'].pop('flux_column')]
    error = spectrum[cfg['DATA'].pop('error_column')]
    continuum = spectrum[cfg['DATA'].pop('continuum_column')]
    velplot.plot_data(wavelength, flux, error, continuum, **cfg['DATA'])

    # Get model information and plot if specified:
    fname = cfg['MODEL'].pop('filename')

    if fname:

        ion = cfg['MODEL'].pop('ion_column')
        redshift = cfg['MODEL'].pop('redshift_column')
        logn = cfg['MODEL'].pop('logn_column')
        b = cfg['MODEL'].pop('b_column')

        if fname.endswith('f26'):
            model = read_f26(fname)
            absorbers = model.absorbers

        else:
            table = (Table.read(fname) if fname.endswith('fits')
                     else ascii.read(fname))
            absorbers = [Absorber(row[ion], row[redshift], row[logn], row[b])
                         for row in table]

        velplot.plot_models(absorbers, **cfg['MODEL'])

    # Save:
    if figname:
        print('Saving to {0}'.format(figname))
        velplot.savefig(figname)

    # Display:
    velplot.display()
Пример #18
0
def main(args=None):
    parser = argparse.ArgumentParser(description='Calculate image statistics.')
    parser.add_argument('fits_filename',
                        metavar='fits_filename',
                        nargs='*',
                        help='FITS filename(s)')
    parser.add_argument('-e',
                        '--exten',
                        metavar='exten',
                        type=int,
                        default=0,
                        help='')
    parser.add_argument('-s',
                        '--sigma',
                        metavar='sigma',
                        type=float,
                        default=3.,
                        help='The number of standard '
                        'deviations to use as the clipping limit')
    parser.add_argument('-i',
                        '--iters',
                        metavar='iters',
                        type=int,
                        default=1,
                        help='')
    parser.add_argument('-c',
                        '--columns',
                        metavar='columns',
                        type=str,
                        default='npixels, mean, std, min, max',
                        help='')
    parser.add_argument('-l',
                        '--lower',
                        metavar='lower',
                        type=float,
                        default=None,
                        help='')
    parser.add_argument('-u',
                        '--upper',
                        metavar='upper',
                        type=float,
                        default=None,
                        help='')
    parser.add_argument('-m',
                        '--mask_value',
                        metavar='mask_value',
                        type=float,
                        default=None,
                        help='')
    args = parser.parse_args(args)

    # TODO: better FITS to NDData object adapters!
    nddata = []
    for fits_fn in args.fits_filename:
        nddata.append(basic_fits_to_nddata(fits_fn, exten=args.exten))

    columns = args.columns.replace(' ', '').split(',')
    tbl = imstats(nddata,
                  sigma=args.sigma,
                  iters=args.iters,
                  cenfunc=np.ma.median,
                  varfunc=np.var,
                  columns=columns,
                  lower_bound=args.lower,
                  upper_bound=args.upper,
                  mask_value=args.mask_value)

    filenames = Column(args.fits_filename, name='filename')
    tbl.add_column(filenames, 0)

    tbl.pprint(max_lines=-1, max_width=-1)
Пример #19
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'Build and test models based on dim reductions and provided spectra')
    subparsers = parser.add_subparsers(dest='subparser_name')

    parser.add_argument('--metadata_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Metadata path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--spectra_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Spectra path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--method',
                        type=str,
                        default='ICA',
                        metavar='METHOD',
                        help='Dim reduction method to load data for')
    parser.add_argument('--n_jobs',
                        type=int,
                        default=1,
                        metavar='N_JOBS',
                        help='N_JOBS')
    parser.add_argument(
        '--model',
        type=str,
        choices=['ET', 'RF', 'GP', 'KNN', 'SVR'],
        default='ET',
        help=
        'Which model type to use: ET (Extra Trees), RF (Random Forest), GP (Gaussian Process), KNN, or SVR (Support Vector Regression)'
    )
    parser.add_argument(
        '--load_model',
        action='store_true',
        help='Whether or not to load the model from --model_path')
    parser.add_argument('--model_path',
                        type=str,
                        default='model.pkl',
                        metavar='MODEL_PATH',
                        help='COMPLETE path from which to load a model')
    parser.add_argument(
        '--metadata_flags', type=str, default='', metavar='METADATA_FLAGS',
        help='Flags specifying observational metadata pre-processing, e.g. LUNAR_MAG which takes the '\
            'magnitude and linearizes it (ignoring that it is an area magnitude)'
    )
    parser.add_argument(
        '--compacted_path',
        type=str,
        default=None,
        metavar='COMPATED_PATH',
        help=
        'Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored'
    )

    parser_compare = subparsers.add_parser('compare')
    parser_compare.add_argument(
        '--folds',
        type=int,
        default=3,
        metavar='TEST_FOLDS',
        help=
        'Do k-fold cross validation with specified number of folds.  Defaults to 3.'
    )
    parser_compare.add_argument(
        '--iters',
        type=int,
        default=50,
        metavar='HYPER_FIT_ITERS',
        help='Number of iterations when fitting hyper-params')
    parser_compare.add_argument(
        '--outputfbk',
        action='store_true',
        help='If set, outputs \'grid_scores_\' data from RandomizedSearchCV')
    parser_compare.add_argument(
        '--save_best',
        action='store_true',
        help=
        'Whether or not to save the (last/best) model built for e.g. --hyper_fit'
    )
    parser_compare.add_argument(
        '--scorer',
        type=str,
        choices=['R2', 'MAE', 'MSE', 'LL', 'EXP_VAR', 'MAPED', 'MSEMV'],
        default='R2',
        help=
        'Which scoring method to use to determine ranking of model instances.')
    parser_compare.add_argument(
        '--use_spectra',
        action='store_true',
        help=
        'Whether scoring is done against the DM components or the predicted spectra'
    )
    parser_compare.add_argument(
        '--ivar_cutoff',
        type=float,
        default=0.001,
        metavar='IVAR_CUTOFF',
        help='data with inverse variace below cutoff is masked as if ivar==0')
    parser_compare.add_argument(
        '--plot_final_errors', action='store_true',
        help='If set, will plot the errors from the final/best model, for the whole dataset, from ' + \
            'the best model re-trained on CV folds used for testing.' + \
            'Plots all errors on top of each other with low-ish alpha, to give a kind of visual ' + \
            'density map of errors.'
    )

    args = parser.parse_args()

    obs_metadata = trim_observation_metadata(
        load_observation_metadata(args.metadata_path,
                                  flags=args.metadata_flags))
    sources, components, exposures, wavelengths = ICAize.deserialize_data(
        args.spectra_path, args.method)
    source_model, ss, model_args = ICAize.unpickle_model(
        args.spectra_path, args.method)

    comb_flux_arr, comb_exposure_arr, comb_wavelengths = None, None, None
    if args.use_spectra:
        comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = ICAize.load_data(
            args)

        filter_arr = np.in1d(comb_exposure_arr, exposures)
        comb_flux_arr = comb_flux_arr[filter_arr]
        comb_exposure_arr = comb_exposure_arr[filter_arr]

        sorted_inds = np.argsort(comb_exposure_arr)
        comb_flux_arr = comb_flux_arr[sorted_inds]
        comb_exposure_arr = comb_exposure_arr[sorted_inds]

        del comb_ivar_arr
        del comb_masks

    reduced_obs_metadata = obs_metadata[np.in1d(obs_metadata['EXP_ID'],
                                                exposures)]
    reduced_obs_metadata.sort('EXP_ID')
    sorted_inds = np.argsort(exposures)

    reduced_obs_metadata.remove_column('EXP_ID')
    md_len = len(reduced_obs_metadata)
    var_count = len(reduced_obs_metadata.columns)
    X_arr = np.array(reduced_obs_metadata).view('f8').reshape((md_len, -1))
    Y_arr = sources[sorted_inds]

    if args.load_model:
        predictive_model = load_model(args.model_path)
    else:
        predictive_model = get_model(args.model)

    if args.subparser_name == 'compare':
        pdist = get_param_distribution_for_model(args.model, args.iters)

        scorer = None
        if args.scorer == 'R2':
            scorer = make_scorer(R2)
        elif args.scorer == 'MAE':
            if args.use_spectra:
                p_MAE_ = partial(MAE,
                                 Y_full=Y_arr,
                                 flux_arr=comb_flux_arr,
                                 source_model=source_model,
                                 ss=ss,
                                 source_model_args=model_args,
                                 method=args.method)
                scorer = make_scorer(p_MAE_, greater_is_better=False)
            else:
                scorer = make_scorer(MAE, greater_is_better=False)
        elif args.scorer == 'MSE':
            if args.use_spectra:
                p_MSE_ = partial(MSE,
                                 Y_full=Y_arr,
                                 flux_arr=comb_flux_arr,
                                 source_model=source_model,
                                 ss=ss,
                                 source_model_args=model_args,
                                 method=args.method)
                scorer = make_scorer(p_MSE_, greater_is_better=False)
            else:
                scorer = make_scorer(MSE, greater_is_better=False)
        elif args.scorer == 'MSEMV':
            if args.use_spectra:
                p_MSEMV_ = partial(MSEMV,
                                   Y_full=Y_arr,
                                   flux_arr=comb_flux_arr,
                                   source_model=source_model,
                                   ss=ss,
                                   source_model_args=model_args,
                                   method=args.method)
                scorer = make_scorer(p_MSEMV_, greater_is_better=False)
            else:
                scorer = make_scorer(MSEMV, greater_is_better=False)
        elif args.scorer == 'EXP_VAR':
            if args.use_spectra:
                p_EXP_VAR_ = partial(EXP_VAR,
                                     Y_full=Y_arr,
                                     flux_arr=comb_flux_arr,
                                     source_model=source_model,
                                     ss=ss,
                                     source_model_args=model_args,
                                     method=args.method)
                scorer = make_scorer(p_EXP_VAR_)
            else:
                scorer = make_scorer(EXP_VAR)
        elif args.scorer == 'MAPED':
            if args.use_spectra:
                p_MAPED_ = partial(MAPED,
                                   Y_full=Y_arr,
                                   flux_arr=comb_flux_arr,
                                   source_model=source_model,
                                   ss=ss,
                                   source_model_args=model_args,
                                   method=args.method)
                scorer = make_scorer(p_MAPED_, greater_is_better=False)
            else:
                scorer = make_scorer(MAPED, greater_is_better=False)
        elif args.scorer == 'LL':
            scorer = None

        folder = ShuffleSplit(exposures.shape[0],
                              n_iter=args.folds,
                              test_size=1.0 / args.folds,
                              random_state=12345)

        if args.model == 'GP':
            predictive_model.random_start = args.folds
            rcv = GridSearchCV(predictive_model,
                               param_grid=pdist,
                               error_score=0,
                               cv=3,
                               n_jobs=args.n_jobs,
                               scoring=scorer)
            #random_state=RANDOM_STATE,
            #n_iter=args.iters,
        else:
            rcv = RandomizedSearchCV(predictive_model,
                                     param_distributions=pdist,
                                     n_iter=args.iters,
                                     cv=folder,
                                     n_jobs=args.n_jobs,
                                     scoring=scorer)

        # This is going to fit X (metdata) to Y (DM'ed sources).  But there are
        # really two tests here:  how well hyperparams fit/predict the sources
        # and how well they fit/predict the actual source spectra.  Until I know
        # better, I 'm going to need to build a way to test both.
        rcv.fit(X_arr, Y_arr)

        print(rcv.best_score_)
        print(rcv.best_params_)
        print(rcv.best_estimator_)
        if args.outputfbk:
            print("=+" * 10 + "=")
            for val in rcv.grid_scores_:
                print(val)
            print("=+" * 10 + "=")

        if args.save_best:
            save_model(rcv.best_estimator_, args.model_path)

        if args.plot_final_errors:
            for train_inds, test_inds in folder:
                rcv.best_estimator_.fit(X_arr[train_inds], Y_arr[train_inds])
                predicted = rcv.best_estimator_.predict(X_arr[test_inds])
                back_trans_flux = ICAize.inverse_transform(
                    predicted, source_model, ss, args.method, model_args)
                diffs = np.abs(comb_flux_arr[test_inds] - back_trans_flux)
                #Is there not 'trick' to getting matplotlib to do this without a loop?
                for i in range(diffs.shape[0]):
                    plt.plot(comb_wavelengths, diffs[i, :], 'b-', alpha=0.01)
            plt.show()
Пример #20
0
def convert_fits_to_hdf(args=None):
    """ Convert a FITS file to HDF5 in HDFITS format

    An input and output directory must be specified, and all files with a matching
    extension will be converted. Command line options set the compression algorithm
    and other run-time settings.
    """
    # Parse options and arguments
    parser = argparse.ArgumentParser(description='Convert FITS files to HDF5 files in HDFITS format.')
    parser.add_argument('-c', '--compression', dest='comp', type=str,
                        help='Data compression. Defaults to None, also lzf, bitshuffle, gzip')
    parser.add_argument('-x', '--extension', dest='ext', type=str, default='fits',
                        help='File extension of FITS files. Defaults to .fits')
    parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, default=4,
                        help='verbosity level (default 0, up to 5)')
    parser.add_argument('-s', '--scaleoffset', dest='scale_offset', default=None,
                        help='Add scale offset')
    parser.add_argument('-S', '--shuffle', dest='shuffle', action='store_true', default=None,
                        help='Apply byte shuffle filter')
    parser.add_argument('-t', '--pytables', dest='table_type', action='store_true', default=None,
                        help='Set output tables to be PyTables TABLE class, instead of HDFITES DATA_GROUP')
    parser.add_argument('-C', '--checksum', dest='checksum', action='store_true', default=None,
                        help='Compute fletcher32 checksum on datasets.')
    parser.add_argument('dir_in', help='input directory')
    parser.add_argument('dir_out', help='output_directory')

    args = parser.parse_args()

    dir_in  = args.dir_in
    dir_out = args.dir_out

    if not os.path.exists(dir_out):
        print("Creating directory %s" % dir_out)
        os.mkdir(dir_out)

    # Form a list of keyword arguments to pass to HDF5 export
    kwargs = {}
    if args.comp is not None:
        kwargs['compression'] = args.comp
    if args.scale_offset is not None:
       kwargs['scaleoffset'] = int(args.scale_offset)
    if args.shuffle is not None:
       kwargs['shuffle'] = args.shuffle
    if args.checksum is not None:
       kwargs['fletcher32'] = args.checksum
    if args.table_type is not None:
       kwargs['table_type'] = 'TABLE'
    else:
        kwargs['table_type'] = 'DATA_GROUP'

    pp = PrintLog(verbosity=args.verbosity)
    if args.verbosity == 0:
        warnings.simplefilter("ignore")

    pp.h1("FITS2HDF")
    pp.pa("Input directory:  %s" % dir_in)
    pp.pa("Output directory: %s" % dir_out)
    pp.pa("Dataset creation arguments:")
    for key, val in kwargs.items():
        pp.pa("%16s: %s" % (key, val))

    # Create list of files to process
    filelist = os.listdir(dir_in)
    filelist = [fn for fn in filelist if fn.endswith(args.ext)]

    t_start = time.time()
    file_count = 0
    for filename in filelist:
        file_in = os.path.join(dir_in, filename)
        file_out = os.path.join(dir_out, filename.split('.' + args.ext)[0] + '.h5')

        a = IdiHdulist()
        try:
            pp.pp("\nReading  %s" % file_in)
            a = read_fits(file_in)
            pp.pp("Creating %s" % file_out)
            t1 = time.time()
            export_hdf(a, file_out, **kwargs)
            t2 = time.time()
            pp.pp("Input  filesize: %sB" % os.path.getsize(file_in))
            pp.pp("Output filesize: %sB" % os.path.getsize(file_out))
            compfact = float(os.path.getsize(file_in)) / float(os.path.getsize(file_out))
            pp.pp("Compression:     %2.2fx" % compfact)
            pp.pp("Comp/write time: %2.2fs" % (t2 - t1))

            file_count += 1

        except IOError:
            pp.err("ERROR: Cannot load %s" % file_in)

    pp.h1("\nSUMMARY")
    pp.pa("Files created: %i" % file_count)
    pp.pa("Time taken:    %2.2fs" % (time.time() - t_start))
Пример #21
0
def convert_hdf_to_fits(args=None):
    """ Convert a HDF5 (in HDFITS format) to a FITS file

    An input and output directory must be specified, and all files with a matching
    extension will be converted. Command line options set the run-time settings.
    """

    # Parse options and arguments
    parser = argparse.ArgumentParser(description='Convert HDF5 in HDFITS format FITS files.')
    parser.add_argument('-x', '--extension', dest='ext', type=str, default='h5',
                        help='File extension of HDFITS files. Defaults to .h5')
    parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, default=4,
                        help='verbosity level (default 0, up to 5)')
    parser.add_argument('dir_in', help='input directory')
    parser.add_argument('dir_out', help='output_directory')
    args = parser.parse_args()

    dir_in  = args.dir_in
    dir_out = args.dir_out

    if not os.path.exists(dir_out):
        print("Creating directory %s" % dir_out)
        os.mkdir(dir_out)

    # Form a list of keyword arguments to pass to HDF5 export
    kwargs = {}

    pp = PrintLog(verbosity=args.verbosity)
    if args.verbosity == 0:
        warnings.simplefilter("ignore")

    pp.h1("HDF2FITS")
    pp.pa("Input directory:  %s" % dir_in)
    pp.pa("Output directory: %s" % dir_out)
    pp.pa("Dataset creation arguments:")
    for key, val in kwargs.items():
        pp.pa("%16s: %s" % (key, val))

    # Create list of files to process
    filelist = os.listdir(dir_in)
    filelist = [fn for fn in filelist if fn.endswith(args.ext)]

    t_start = time.time()
    file_count = 0
    for filename in filelist:
        file_in = os.path.join(dir_in, filename)
        file_out = os.path.join(dir_out, filename.split('.' + args.ext)[0] + '.fits')

        a = IdiHdulist()
        try:
            pp.pp("\nReading  %s" % file_in)
            a = read_hdf(file_in)
            pp.pp("Creating %s" % file_out)
            t1 = time.time()
            export_fits(a, file_out, **kwargs)
            t2 = time.time()
            pp.pp("Input  filesize: %sB" % os.path.getsize(file_in))
            pp.pp("Output filesize: %sB" % os.path.getsize(file_out))
            compfact = float(os.path.getsize(file_in)) / float(os.path.getsize(file_out))
            pp.pp("Compression:     %2.2fx" % compfact)
            pp.pp("Comp/write time: %2.2fs" % (t2 - t1))

            file_count += 1

        except IOError:
            pp.err("ERROR: Cannot load %s" % file_in)

    pp.h1("\nSUMMARY")
    pp.pa("Files created: %i" % file_count)
    pp.pa("Time taken:    %2.2fs" % (time.time() - t_start))
Пример #22
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'Compute PCA/ICA/NMF/etc. components over set of stacked spectra, save those out, and pickle model'
    )
    parser.add_argument('--pattern',
                        type=str,
                        default='stacked*exp??????.*',
                        metavar='PATTERN',
                        help='File pattern for stacked sky fibers.')
    parser.add_argument('--path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Path to work from, if not '
                        '.'
                        '')
    parser.add_argument(
        '--compacted_path',
        type=str,
        default=None,
        metavar='COMPATED_PATH',
        help=
        'Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored'
    )
    parser.add_argument('--n_components',
                        type=int,
                        default=40,
                        metavar='N_COMPONENTS',
                        help='Number of ICA/PCA/etc. components')
    parser.add_argument(
        '--method',
        type=str,
        default='ICA',
        metavar='METHOD',
        choices=['ICA', 'PCA', 'SPCA', 'NMF', 'ISO', 'KPCA', 'FA', 'DL'],
        help='Which dim. reduction method to use')
    parser.add_argument(
        '--scale',
        action='store_true',
        help=
        'Should inputs variance be scaled?  Defaults to mean subtract and value scale, but w/out this does not scale variance.'
    )
    parser.add_argument('--no_scale',
                        action='store_true',
                        help='Suppresses all scaling')
    parser.add_argument(
        '--ivar_cutoff',
        type=float,
        default=0.001,
        metavar='IVAR_CUTOFF',
        help='data with inverse variace below cutoff is masked as if ivar==0')
    parser.add_argument(
        '--n_iter',
        type=int,
        default=1200,
        metavar='MAX_ITER',
        help=
        'Maximum number of iterations to allow for convergence.  For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500'
    )
    parser.add_argument('--n_jobs',
                        type=int,
                        default=None,
                        metavar='N_JOBS',
                        help='N_JOBS')
    args = parser.parse_args()

    comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = iz.load_data(
        args)
    model = iz.get_model(args.method,
                         n=args.n_components,
                         n_neighbors=None,
                         max_iter=args.n_iter,
                         random_state=iz.random_state,
                         n_jobs=args.n_jobs)

    ss = None
    if args.no_scale:
        scaled_flux_arr = comb_flux_arr
    else:
        ss = skpp.StandardScaler(with_std=False)
        if args.scale:
            ss = skpp.StandardScaler(with_std=True)
            scaled_flux_arr = ss.fit_transform(comb_flux_arr)

    #Heavily copied from J. Vanderplas/astroML bayesian_blocks.py
    N = comb_wavelengths.size
    step = args.n_components * 4

    edges = np.concatenate([
        comb_wavelengths[:1:step],
        0.5 * (comb_wavelengths[1::step] + comb_wavelengths[:-1:step]),
        comb_wavelengths[-1::step]
    ])
    block_length = comb_wavelengths[-1::step] - edges

    # arrays to store the best configuration
    nn_vec = np.ones(N / step) * step
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    for R in range(N / step):
        print("R: " + str(R))

        width = block_length[:R + 1] - block_length[R + 1]
        count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1]

        #width = nn_vec[:R + 1] - nn_vec[R + 1]
        #count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1]

        #print(width)
        #print(count_vec)
        #raw_input("Pausing... ")

        fit_vec = map(
            lambda n: iz.score_via_CV(['LL'],
                                      scaled_flux_arr[:, :n],
                                      model,
                                      ss,
                                      args.method,
                                      folds=3,
                                      n_jobs=args.n_jobs), count_vec)
        fit_vec = [d["mle"] for d in fit_vec]

        #print(fit_vec)
        fit_vec[1:] += best[:R]
        #print(fit_vec)

        i_max = np.argmax(fit_vec)
        last[R] = i_max
        best[R] = fit_vec[i_max]

        #print(best)

    change_points = np.zeros(N / step, dtype=int)
    i_cp = N / step
    ind = N / step
    while True:
        i_cp -= 1
        change_points[i_cp] = ind
        if ind == 0:
            break
        ind = last[ind - 1]
    change_points = change_points[i_cp:]

    print(edges[change_points])
    '''
Пример #23
0
def main(args=None):
    """
    This is the main function called by the `ivelplot` script.

    """

    from astropy.utils.compat import argparse
    from astropy.extern.configobj import configobj, validate

    from pkg_resources import resource_stream

    parser = argparse.ArgumentParser(
        description='An interactive environment for absorption line '
                    'identification and Voigt profile \nfitting with VPFIT.\n'
                    '\nTo dump a default configuration file: ivelplot -d'
                    '\nTo dump an extended default configuration file: '
                    'ivelplot -dd',
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('config', help='path to the configuration file')
    parser.add_argument('-z', '--redshift', help='redshift')
    parser.add_argument('--search', action='store_true',
                        help='display a general search list of ions')
    parser.add_argument('--lyman', action='store_true',
                        help='display the Lyman series transitions')
    parser.add_argument('--galactic', action='store_true',
                        help='display the common Galactic transitions')
    parser.add_argument('--agn', action='store_true',
                        help='display the common AGN associated transitions')

    config = resource_stream(__name__, '/config/ivelplot.cfg')
    config_extended = resource_stream(
        __name__, '/config/ivelplot_extended.cfg')
    spec = resource_stream(__name__, '/config/ivelplot_specification.cfg')

    if len(sys.argv) > 1:

        if sys.argv[1] == '-d':
            cfg = configobj.ConfigObj(config)
            cfg.filename = '{0}/ivelplot.cfg'.format(os.getcwd())
            cfg.write()
            return

        elif sys.argv[1] == '-dd':
            cfg = configobj.ConfigObj(config_extended)
            cfg.filename = '{0}/ivelplot.cfg'.format(os.getcwd())
            cfg.write()
            return

    args = parser.parse_args(args)

    try:
        cfg = configobj.ConfigObj(args.config, configspec=spec)
        validator = validate.Validator()
        cfg.validate(validator)

    except:
        raise IOError('Configuration file could not be read')

    fname = cfg['WINDOW'].pop('transitions')

    if args.search:
        fh = resource_stream(__name__, '/data/search.dat')
        transitions = list(fh)
        fh.close()

    elif args.lyman:
        fh = resource_stream(__name__, '/data/lyman.dat')
        transitions = list(fh)
        fh.close()

    elif args.galactic:
        fh = resource_stream(__name__, '/data/galactic.dat')
        transitions = list(fh)
        fh.close()

    elif args.agn:
        fh = resource_stream(__name__, '/data/agn.dat')
        transitions = list(fh)
        fh.close()

    else:
        print('Reading transitions from ', fname)
        fh = open(fname)
        transitions = list(fh)
        fh.close()

    transitions = [transition for transition in transitions
                   if not transition.startswith('#')]

    fname = cfg['DATA'].pop('filename')
    if not fname:
        raise IOError('no data to plot!')

    spectrum = Table.read(fname) if fname[-4:] == 'fits' else ascii.read(fname)
    wavelength = spectrum[cfg['DATA'].pop('wavelength_column')]
    flux = spectrum[cfg['DATA'].pop('flux_column')]
    error = spectrum[cfg['DATA'].pop('error_column')]
    continuum = spectrum[cfg['DATA'].pop('continuum_column')]
    redshift = float(args.redshift) if args.redshift is not None else 0

    cfg['MODEL']['system_width'] = (cfg['WINDOW']['vmax'] -
                                    cfg['WINDOW']['vmin'])
    cfg['MODEL']['absorbers'] = None

    print(info)

    app = QApplication(sys.argv)
    app.aboutToQuit.connect(app.deleteLater)

    desktop = app.desktop()
    screen = desktop.screenGeometry()
    width = screen.width() / desktop.physicalDpiX() * 0.88

    fontsize = 0.7 * width
    label_fontsize = 0.6 * width

    cfg['WINDOW']['width'] = width
    cfg['WINDOW']['fontsize'] = fontsize
    cfg['WINDOW']['label_fontsize'] = label_fontsize

    velocity_plot = InteractiveVelocityPlot(
        fname, transitions, wavelength, flux, error, continuum, redshift,
        **cfg)
    velocity_plot.window.show()

    output_stream = OutputStream()
    output_stream.text_written.connect(velocity_plot.on_output)

    sys.stdout = output_stream
    sys.exit(app.exec_())
Пример #24
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Add in solar and ecliptic data to observational metadata')

    parser.add_argument('--metadata_file_path',
                        type=str,
                        default=None,
                        metavar='PATH',
                        help='Metadata file path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--metadata_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Metadata path to work from, if not '
                        '.'
                        '')
    parser.add_argument(
        '--input',
        type=str,
        default='FITS',
        metavar='OUTPUT',
        help='Output format, either of FITS or CSV, defaults to FITS.')
    parser.add_argument('--solar_metadata',
                        type=str,
                        default='solar_ephemeris.csv',
                        help='Solar metadata file (from parse_ephemeris.py)')
    args = parser.parse_args()

    if args.metadata_file_path is None:
        if args.input == 'CSV':
            obs_md_table = astab.Table.read(os.path.join(
                args.metadata_path, "annotated_metadata.csv"),
                                            format="ascii.csv")
        elif args.input == 'FITS':
            obs_md_table = astab.Table.read(os.path.join(
                args.metadata_path, "annotated_metadata.fits"),
                                            format="fits")
    else:
        obs_md_table = Table.read(args.metadata_file_path)
    solar_md_table = astab.Table.read(args.solar_metadata, format="ascii.csv")
    solar_md_table.rename_column('UTC', 'EPHEM_DATE')

    obs_md_table = astab.join(
        obs_md_table, solar_md_table['EPHEM_DATE', 'RA_ABS', 'DEC_ABS'])

    boresight_ra_dec = ascoord.SkyCoord(
        ra=obs_md_table['RA'],
        dec=obs_md_table['DEC'],
        distance=1.0,
        unit=('deg', 'deg', 'AU'),
        frame='fk5')  #Change distance to... e.g. 1 Mpc
    boresight_ecliptic = boresight_ra_dec.transform_to(
        'heliocentrictrueecliptic')

    solar_ra_dec = ascoord.SkyCoord(ra=obs_md_table['RA_ABS'],
                                    dec=obs_md_table['DEC_ABS'],
                                    distance=1.0,
                                    unit=('deg', 'deg', 'AU'),
                                    frame='icrs')
    solar_ecliptic = solar_ra_dec.transform_to('heliocentrictrueecliptic')

    obs_md_table.add_column(
        astab.Column(boresight_ecliptic.lat,
                     dtype=float,
                     name="ECLIPTIC_PLANE_SEP"))
    belp = np.mod(boresight_ecliptic.lon.value + 360.0, 360.0)
    selp = np.mod(solar_ecliptic.lon.value + 360.0, 360.0)
    lon_diff = np.abs(belp - selp)
    lon_diff[lon_diff > 180] -= 360
    lon_diff = np.abs(lon_diff)
    obs_md_table.add_column(
        astab.Column(lon_diff, dtype=float, name="ECLIPTIC_PLANE_SOLAR_SEP"))

    obs_md_table.remove_columns(['RA_ABS', 'DEC_ABS'])

    if args.metadata_file_path is None:
        if args.input == 'CSV':
            obs_md_table.write(os.path.join(args.metadata_path,
                                            "annotated_metadata.csv"),
                               format="ascii.csv")
        elif args.input == 'FITS':
            obs_md_table.write(os.path.join(args.metadata_path,
                                            "annotated_metadata.fits"),
                               format="fits",
                               overwrite=True)
    else:
        obs_md_table.write(args.metadata_file_path, overwrite=True)
Пример #25
0
def main(args=None):
    """This is the main function called by the `fitsheader` script."""
    from astropy.utils.compat import argparse

    parser = argparse.ArgumentParser(
        description=('Print the header(s) of a FITS file. '
                     'Optional arguments allow the desired extension(s), '
                     'keyword(s), and output format to be specified. '
                     'Note that in the case of a compressed image, '
                     'the decompressed header is shown by default.'))
    parser.add_argument('-e',
                        '--extension',
                        metavar='HDU',
                        action='append',
                        dest='extensions',
                        help='specify the extension by name or number; '
                        'this argument can be repeated '
                        'to select multiple extensions')
    parser.add_argument('-k',
                        '--keyword',
                        metavar='KEYWORD',
                        action='append',
                        dest='keywords',
                        help='specify a keyword; this argument can be '
                        'repeated to select multiple keywords; '
                        'also supports wildcards')
    parser.add_argument('-t',
                        '--table',
                        nargs='?',
                        default=False,
                        metavar='FORMAT',
                        help='print the header(s) in machine-readable table '
                        'format; the default format is '
                        '"ascii.fixed_width" (can be "ascii.csv", '
                        '"ascii.html", "ascii.latex", "fits", etc)')
    parser.add_argument('-c',
                        '--compressed',
                        action='store_true',
                        help='for compressed image data, '
                        'show the true header which describes '
                        'the compression rather than the data')
    parser.add_argument('filename',
                        nargs='+',
                        help='path to one or more files; '
                        'wildcards are supported')
    args = parser.parse_args(args)

    # If `--table` was used but no format specified,
    # then use ascii.fixed_width by default
    if args.table is None:
        args.table = 'ascii.fixed_width'

    # Now print the desired headers
    try:
        if args.table:
            print_headers_as_table(args)
        else:
            print_headers_traditional(args)
    except IOError as e:
        # A 'Broken pipe' IOError may occur when stdout is closed prematurely,
        # eg. when calling `fitsheader file.fits | head`. We let this pass.
        pass
Пример #26
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description=
        'Build and test models based on dim reductions and provided spectra')
    parser.add_argument('--metadata_path',
                        type=str,
                        default='.',
                        metavar='PATH',
                        help='Metadata path to work from, if not '
                        '.'
                        '')
    parser.add_argument('--model_path',
                        type=str,
                        default='model.pkl',
                        metavar='MODEL_PATH',
                        help='COMPLETE path from which to load a model')
    parser.add_argument('--start_dt',
                        type=ani.valid_date,
                        help='DateTime to plot sky for')
    parser.add_argument('--end_dt',
                        type=ani.valid_date,
                        help='DateTime to plot sky for')
    parser.add_argument('--lunar_metadata',
                        type=str,
                        default=None,
                        metavar='LUNAR_METADATA',
                        required=True,
                        help='File containing lunar ephemeris metadata.')
    parser.add_argument('--solar_metadata',
                        type=str,
                        default=None,
                        metavar='SOLAR_METADATA',
                        required=True,
                        help='File containing solar ephemeris metadata.')
    parser.add_argument('--sunspot_metadata',
                        type=str,
                        default=None,
                        metavar='SUNSPOT_METADATA',
                        required=True,
                        help='File containing sunspot metadata.')
    parser.add_argument('--method', type=str)
    parser.add_argument('--ra', type=str)
    parser.add_argument('--dec', type=str)
    parser.add_argument('--dm_path', type=str)
    args = parser.parse_args()

    obs_coord = coord.SkyCoord(args.ra, args.dec, frame='icrs')
    metadata_tups = ani.get_sky_for_coord(args.start_dt, args.end_dt,
                                          obs_coord, args.lunar_metadata,
                                          args.solar_metadata,
                                          args.sunspot_metadata)
    spectra, labels = ani.animate_sky_spectra_for_coord(
        args.start_dt, args.end_dt, obs_coord, args.lunar_metadata,
        args.solar_metadata, args.sunspot_metadata, args.model_path,
        args.dm_path, args.method)

    #print(spectra)
    xscale = np.arange(len(spectra[0]))
    #print(xscale)

    fig = plt.figure()
    ax = plt.axes(xlim=(0, 7200), ylim=(0, 500))
    line, = ax.plot([], [])

    def init_func():
        line.set_data([], [])
        return line,

    def animate_func(i, data, xscale):
        line.set_data(xscale, data[i])
        plt.title(labels[i])
        return line,

    p_animate_func = partial(animate_func, data=spectra, xscale=xscale)

    anim = animation.FuncAnimation(fig,
                                   p_animate_func,
                                   init_func=init_func,
                                   frames=len(spectra),
                                   interval=1000)  #, blit=True)
    anim.save('polaris_sky_animation.mp4',
              fps=1,
              extra_args=['-vcodec', 'libx264'])
    plt.show()
Пример #27
0
def convert_fits_to_fits(args=None):
    """ Read a FITS file into the in-memory IDI format then back out into a FITS file

    An input and output directory must be specified, and all files with a matching
    extension will be converted. Command line options set the compression algorithm
    and other run-time settings.
    """
    # Parse options and arguments
    parser = argparse.ArgumentParser(description='Convert FITS files to HDF5 files in HDFITS format.')
    parser.add_argument('-x', '--extension', dest='ext', type=str, default='fits',
                      help='File extension of FITS files. Defaults to .fits')
    parser.add_argument('-v', '--verbosity', dest='vb', type=int, default=0,
                      help='verbosity level (default 0, up to 5)')
    parser.add_argument('-w', '--nowarn', dest='warn', action='store_false', default=True,
                      help='Turn off warnings created by FITS parsing')
    parser.add_argument('-o', '--overwrite', dest='overwrite', action='store_true', default=False,
                      help='Automatically overwrite output files if already exist')
    parser.add_argument('dir_in', help='input directory')
    parser.add_argument('dir_out', help='output_directory')

    args = parser.parse_args()

    dir_in  = args.dir_in
    dir_out = args.dir_out

    if not os.path.exists(dir_out):
        print("Creating directory %s" % dir_out)
        os.mkdir(dir_out)


    if not args.warn:
        warnings.simplefilter("ignore")
    try:
        assert dir_in != dir_out
    except AssertionError:
        print("Input directory cannot be same as output directory.")
        exit()

    # Create list of files to process
    filelist = os.listdir(dir_in)
    filelist = [fn for fn in filelist if fn.endswith(args.ext)]

    t1 = time.time()
    file_count = 0
    for filename in filelist:


        file_in = os.path.join(dir_in, filename)
        file_out = os.path.join(dir_out, filename)

        a = IdiHdulist()
        try:
            a = read_fits(file_in)
            if os.path.exists(file_out):
                if args.overwrite:
                    os.remove(file_out)
                else:
                    qn = raw_input("%s exists. Overwrite (y/n)?" % file_out)
                    if qn in ["y", "Y", "yes"]:
                        os.remove(file_out)

            print("\nCreating %s" % file_out)
            export_fits(a, file_out)
            print("Input  filesize: %sB" % os.path.getsize(file_in))
            print("Output filesize: %sB" % os.path.getsize(file_out))
            compfact = float(os.path.getsize(file_in)) / float(os.path.getsize(file_out))
            print("Compression:     %2.2fx" % compfact)

            file_count += 1

        except IOError:
            print("ERROR: Cannot read/write %s" % file_in)

    print("\nSUMMARY")
    print("-------")
    print("Files created: %i" % file_count)
    print("Time taken:    %2.2fs" % (time.time() - t1))
Пример #28
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Compute PCA/ICA/NMF/etc. components over set of stacked spectra, save those out, and pickle model'
    )
    subparsers = parser.add_subparsers(dest='subparser_name')

    parser.add_argument(
        '--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN',
        help='File pattern for stacked sky fibers.'
    )
    parser.add_argument(
        '--path', type=str, default='.', metavar='PATH',
        help='Path to work from, if not ''.'''
    )
    parser.add_argument(
        '--compacted_path', type=str, default=None, metavar='COMPATED_PATH',
        help='Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored'
    )
    parser.add_argument(
        '--method', type=str, default=['ICA'], metavar='METHOD',
        choices=['ICA', 'PCA', 'SPCA', 'NMF', 'ISO', 'KPCA', 'FA', 'DL'], nargs='+',
        help='Which dim. reduction method to use'
    )
    parser.add_argument(
        '--scale', action='store_true',
        help='Should inputs be scaled?  Will mean subtract and value scale, but does not scale variace.'
    )
    parser.add_argument(
        '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF',
        help='data with inverse variace below cutoff is masked as if ivar==0'
    )
    parser.add_argument(
        '--n_iter', type=int, default=1200, metavar='MAX_ITER',
        help='Maximum number of iterations to allow for convergence.  For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500'
    )
    parser.add_argument(
        '--n_jobs', type=int, default=None, metavar='N_JOBS',
        help='N_JOBS'
    )

    parser_compare = subparsers.add_parser('compare')
    parser_compare.add_argument(
        '--max_components', type=int, default=50, metavar='COMP_MAX',
        help='Max number of components to use/test'
    )
    parser_compare.add_argument(
        '--min_components', type=int, default=0, metavar='COMP_MIN',
        help='Min number of compoenents to use/test'
    )
    parser_compare.add_argument(
        '--step_size', type=int, default=5, metavar='COMP_STEP',
        help='Step size from comp_min to comp_max'
    )
    parser_compare.add_argument(
        '--comparison', choices=['EXP_VAR', 'R2', 'MSE', 'MAE'], nargs='*', default=['EXP_VAR'],
        help='Comparison methods: Explained variance (score), R2 (score), mean sq. error (loss), MEDIAN absolute error (loss)'
    )
    parser_compare.add_argument(
        '--mle_if_avail', action='store_true',
        help='In additon to --comparison, include MLE if PCA or FA methods specified'
    )
    parser_compare.add_argument(
        '--plot_example_reconstruction', action='store_true',
        help='Pick a random spectrum, plot its actual and reconstructed versions'
    )

    parser_build = subparsers.add_parser('build')
    parser_build.add_argument(
        '--n_components', type=int, default=40, metavar='N_COMPONENTS',
        help='Number of ICA/PCA/etc. components'
    )
    parser_build.add_argument(
        '--n_neighbors', type=int, default=10, metavar='N_NEIGHBORS',
        help='Number of neighbots for e.g. IsoMap'
    )

    args = parser.parse_args()

    comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = iz.load_data(args)

    if 'DL' in args.method:
        flux_arr = comb_flux_arr.astype(dtype=np.float64)
    else:
        flux_arr = comb_flux_arr
    scaled_flux_arr = None
    ss = None
    if args.scale:
        ss = skpp.StandardScaler(with_std=False)
        scaled_flux_arr = ss.fit_transform(flux_arr)
    else:
        scaled_flux_arr = flux_arr

    if args.subparser_name == 'compare':
        fig, ax1 = plt.subplots()
        ax2 = ax1.twinx()

        for method in args.method:
            model = iz.get_model(method, max_iter=args.n_iter, random_state=iz.random_state, n_jobs=args.n_jobs)
            scores = {}
            mles_and_covs = args.mle_if_avail and (method == 'FA' or method == 'PCA')

            n_components = np.arange(args.min_components, args.max_components+1, args.step_size)
            for n in n_components:
                print("Cross validating for n=" + str(n) + " on method " + method)

                model.n_components = n

                comparisons = iz.score_via_CV(args.comparison,
                                    flux_arr if method == 'NMF' else scaled_flux_arr,
                                    model, method, n_jobs=args.n_jobs, include_mle=mles_and_covs,
                                    modeler=_iter_modeler, scorer=_iter_scorer)
                for key, val in comparisons.items():
                    if key in scores:
                        scores[key].append(val)
                    else:
                        scores[key] = [val]

            if mles_and_covs:
                #ax2.axhline(cov_mcd_score(scaled_flux_arr, args.scale), color='violet', label='MCD Cov', linestyle='--')
                ax2.axhline(cov_lw_score(scaled_flux_arr, args.scale), color='orange', label='LW Cov', linestyle='--')

            for key, score_list in scores.items():
                if key != 'mle':
                    ax1.plot(n_components, score_list, label=method + ':' + key + ' scores')
                else:
                    ax2.plot(n_components, score_list, '-.', label=method + ' mle scores')

        ax1.set_xlabel('nb of components')
        ax1.set_ylabel('CV scores', figure=fig)

        ax1.legend(loc='lower left')
        ax2.legend(loc='lower right')

        plt.show()
Пример #29
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Pull metadata from FITS for PLATE/MJD combos, output.')

    parser.add_argument('--plate',
                        type=int,
                        default=None,
                        metavar='PLATE',
                        help='Plate number to pull metadata for.')
    parser.add_argument(
        '--mjd',
        type=int,
        default=None,
        metavar='MJD',
        help=
        'MJD of plate observation to use (can be omitted if only one value is possible)'
    )
    parser.add_argument(
        '--fiber',
        type=int,
        default=1,
        metavar='FIBER',
        help=
        'Fiber number identifying the spectrum of the requested PLATE-MJD to plot.'
    )
    parser.add_argument(
        '--list_file',
        type=str,
        default=None,
        metavar='LIST_FILE',
        help=
        'File that contains list of PLATE, MJD, FIBER records to output metadata for; e.g. the output from bossquery'
    )
    parser.add_argument('--output',
                        type=str,
                        default='FITS',
                        metavar='OUTPUT',
                        help='Output format, either of '
                        'FITS'
                        ' or '
                        'CSV'
                        ', defaults to FITS.')
    parser.add_argument(
        '--dont_gather',
        action='store_true',
        help=
        'Flag to prevent storing output in file; just dumps metadata to stdout.'
    )
    args = parser.parse_args()

    if args.plate is not None and args.mjd is not None:
        file_deets(plate=args.plate, mjd=args.mjd, fiber=args.fiber)
    elif args.list_file is not None:
        plates_table = Table.read(args.list_file, format='ascii')

        exposure_table_list = []
        exposure_table = None

        if not args.dont_gather:
            progress_bar = ProgressBar(widgets=[Percentage(),
                                                Bar()],
                                       maxval=len(plates_table)).start()
            counter = 0
        for row in plates_table:
            try:
                exposure_data = file_deets(row['PLATE'],
                                           row['MJD'],
                                           gather=not args.dont_gather)
                if exposure_data is not None:
                    exposure_table_list.append(Table(exposure_data))
            except RuntimeError as re:
                print "Caught runtime:"
                print re
            if not args.dont_gather:
                counter += 1
                progress_bar.update(counter)
        if not args.dont_gather:
            progress_bar.finish()

        if len(exposure_table_list):
            if len(exposure_table_list) > 1:
                exposure_table = vstack(exposure_table_list)
            else:
                exposure_table = exposure_table_list[0]

            if args.output.upper() == 'CSV':
                exposure_table.write("exposure_metadata.csv",
                                     format="ascii.csv")
            elif args.output.upper() == 'FITS':
                exposure_table.write("exposure_metadata.fits", format="fits")
Пример #30
0
def evaluate(args=None):
    # parse command-line arguments
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='provide verbose output on progress')
    parser.add_argument('--prior',
                        default=None,
                        help='Name of the prior to use, e.g., qso-4-10k.fits')
    parser.add_argument('-n',
                        '--num-spectra',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Number of spectra to use for evaluation.')
    parser.add_argument('--mag-err',
                        type=float,
                        default=0.1,
                        metavar='dM',
                        help='RMS error on targeting magnitudes to simulate.')
    parser.add_argument(
        '--quad-order',
        type=int,
        default=16,
        metavar='N',
        help='Quadrature order to use for magnitude marginalization.')
    parser.add_argument('--seed',
                        type=int,
                        default=None,
                        metavar='S',
                        help='Random seed to use for sampling templates.')
    args = parser.parse_args(args)

    if args.prior is None:
        print('You must specify a prior to use.')
        return -1
    if args.seed is None:
        print('You must specify a seed to use.')
        return -1

    # Parse the prior name, which is expected to have format xxx-nn-nnnk.fits
    basename, ext = os.path.splitext(args.prior)
    if ext != '.fits':
        print('Unexpected extension for prior filename: {}'.format(ext))
        return -1
    try:
        classname, downsampling, _ = basename.split('_')
        downsampling = int(downsampling)
    except ValueError:
        print('Badly formatted prior filename: {}.'.format(basename))
        return -1
    if classname not in ('qso', 'lrg', 'elg', 'elgem', 'star'):
        print('Invalid prior class name: {}.'.format(classname))
        return -1
    if args.verbose:
        print('Prior uses downsampling {} for class {}.'.format(
            downsampling, classname))

    # Load the prior now.  Prepend $BAYEZ_DATA unless we already have
    # an absolute path.
    path = os.environ.get('BAYEZ_DATA', '.')
    if not os.path.isabs(args.prior):
        args.prior = os.path.join(path, args.prior)
    if args.verbose:
        print('Reading prior from {}'.format(args.prior))
    prior = bayez.prior.load_prior(args.prior)

    # Prepare to simulate spectra for evaluation.
    sampler = bayez.sampler.Samplers[classname]()
    simulator = bayez.simulation.Simulator(analysis_downsampling=downsampling,
                                           verbose=args.verbose)

    # Run the evaluation.
    estimator = bayez.estimator.RedshiftEstimator(
        prior, dz=0.001, quadrature_order=args.quad_order)
    results = bayez.estimator.estimate_batch(
        estimator,
        args.num_spectra,
        sampler,
        simulator,
        mag_err=args.mag_err,
        seed=args.seed,
        print_interval=500 if args.verbose else 0)

    # Save the results.
    name = os.path.join(
        path, '{}_q{:+d}_{}.fits'.format(basename, args.quad_order, args.seed))
    if args.verbose:
        print('Saving results to {}'.format(name))
    results.write(name, overwrite=True)