Beispiel #1
0
def vphas_tile_merge_main(args=None):
    """Command-line interface to merge frame catalogues into a tile."""
    import argparse

    parser = argparse.ArgumentParser(
        description='Merge frame catalogues into a tile.')
    parser.add_argument('-s', '--size', metavar='size',
                        type=float, default=1,
                        help='Width and height of the tile in degrees.')
    parser.add_argument('-c', '--config', metavar='configfile',
                        type=str, default=None,
                        help='Configuration file.')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Turn on debug output.')
    parser.add_argument('l', type=float, help='Galactic longitude.')
    parser.add_argument('b', type=float, help='Galactic latitude.')
    args = parser.parse_args(args)

    if args.verbose:
        log.setLevel('DEBUG')
    else:
        log.setLevel('INFO')

    tile = VphasCatalogTile(args.l, args.b, args.size, configfile=args.config)
    tile.create()
Beispiel #2
0
def vphas_tile_merge_main(args=None):
    """Command-line interface to merge frame catalogues into a tile."""
    import argparse

    parser = argparse.ArgumentParser(
        description='Merge frame catalogues into a tile.')
    parser.add_argument('-s',
                        '--size',
                        metavar='size',
                        type=float,
                        default=1,
                        help='Width and height of the tile in degrees.')
    parser.add_argument('-c',
                        '--config',
                        metavar='configfile',
                        type=str,
                        default=None,
                        help='Configuration file.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Turn on debug output.')
    parser.add_argument('l', type=float, help='Galactic longitude.')
    parser.add_argument('b', type=float, help='Galactic latitude.')
    args = parser.parse_args(args)

    if args.verbose:
        log.setLevel('DEBUG')
    else:
        log.setLevel('INFO')

    tile = VphasCatalogTile(args.l, args.b, args.size, configfile=args.config)
    tile.create()
def main():
    parser = argparse.ArgumentParser(
        description="Ingest 2MASS PSC into Starplex")
    parser.add_argument('data_dir', action='store',
        help="Directory with psc_*.gz files")
    parser.add_argument('--name', action='store', default='starplex',
        help="Database name")
    parser.add_argument('--user', action='store', default='starplex',
        help="Database user")
    parser.add_argument('--pw', action='store', default=None,
        help="Database password")
    parser.add_argument('--url', action='store', default='localhost',
        help="Database URL")
    parser.add_argument('--port', action='store', default=5432, type=int,
        help="Database port")
    parser.add_argument('--ra', action='store', nargs=2,
        default=[0., 360.], type=float,
        help="Min and max RA range")
    parser.add_argument('--dec', action='store', nargs=2,
        default=[-90., 90.], type=float,
        help="Min and max Dec range")
    args = parser.parse_args()

    log.setLevel('INFO')
    connect(user=args.user, name=args.name)
    session = Session()
    create_all()
    tm_ingester = TwoMassPSCIngest(session, args.data_dir)
    tm_ingester.ingest_region('2MASS_PSC', [7.5, 17], [36, 47])
Beispiel #4
0
def main():
    log.setLevel("INFO")

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "fields",
        nargs='*',
        help="WIRCam field names(s)")
    parser.add_argument(
        '-n',
        type=int, default=8,
        help="Number of segments on each side of a WIRCam field")
    parser.add_argument(
        '--vega',
        action='store_true', default=False,
        help="Present SB in VEGAMAG, rather than ABMAG.")
    parser.add_argument(
        '--no-intercal',
        dest='disable_intercal',
        action='store_true', default=False,
        help="Disable starplex.intercal ZP corrections")

    args = parser.parse_args()

    for field in args.fields:
        process_wircam_field(field, args.n, args.vega, args.disable_intercal)
Beispiel #5
0
def fit_a_spectrum(inputs):
    """
    Process used for fitting spectra. Returns a best-fit solution and a dud for
    every spectrum.

    Parameters
    ----------
    inputs : list
        list containing inputs to parallel map - contains the spectrum index,
        the scouseobject, SAA, the best-fitting model solution to the SAA, and
        the template spectrum
    """
    idx, scouseobject, SAA, parent_model, template_spectrum = inputs
    key = SAA.indices_flat[idx]
    spec=None

    # Shhh
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        old_log = log.level
        log.setLevel('ERROR')
        # update the template
        spec = get_spec(scouseobject, SAA.indiv_spectra[key], template_spectrum)
        log.setLevel(old_log)

    # begin the fitting process
    bf = fitting_process_parent(scouseobject, SAA, key, spec, parent_model)
    # if the result is a zero component fit, create a dud spectrum
    if bf.ncomps == 0.0:
        dud = bf
    else:
        dud = fitting_process_duds(scouseobject, SAA, key, spec)
    return [bf, dud]
    def __init__(self, spec, idx=None, scouse=None, fit_dud=False, noise=None,
                 duddata=None):
        """
        Stores the best-fitting model

        """

        self._index = idx

        if fit_dud:
            spec=None
            #quickly and quietly generate a dud spec
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                old_log = log.level
                log.setLevel('ERROR')

                spec = pyspeckit.Spectrum(data=[0,0], error=[0,0], xarr=[0,0])
                spec.specfit.fittype = scouse.fittype
                spec.specfit.fitter = spec.specfit.Registry.multifitters[scouse.fittype]

                log.setLevel(old_log)

            fit_pars_dud(self, spec, scouse, noise, duddata)
        else:
            fit_pars(self, spec, scouse)
Beispiel #7
0
 def test_derivative(self):
     log.setLevel("DEBUG")
     testp = tdu.get_derivative_params(self.modelB1953)
     delay = self.modelB1953.delay(self.toasB1953)
     for p in testp.keys():
         log.debug("Runing derivative for %s".format("d_delay_d_" + p))
         ndf = self.modelB1953.d_phase_d_param_num(self.toasB1953, p,
                                                   testp[p])
         adf = self.modelB1953.d_phase_d_param(self.toasB1953, delay, p)
         diff = adf - ndf
         if not np.all(diff.value) == 0.0:
             mean_der = (adf + ndf) / 2.0
             relative_diff = np.abs(diff) / np.abs(mean_der)
             # print "Diff Max is :", np.abs(diff).max()
             msg = (
                 "Derivative test failed at d_delay_d_%s with max relative difference %lf"
                 % (p, np.nanmax(relative_diff).value))
             if p in ["ECC", "EDOT"]:
                 tol = 20
             elif p in ["PMDEC"]:
                 tol = 5e-3
             else:
                 tol = 1e-3
             log.debug("derivative relative diff for %s, %lf" %
                       ("d_delay_d_" + p, np.nanmax(relative_diff).value))
             assert np.nanmax(relative_diff) < tol, msg
         else:
             continue
def main():
    log.setLevel("INFO")
    args = parse_args()

    segmap_fits = fits.open(args.seg_path)
    segmap = segmap_fits[0].data
    wcs = WCS(segmap_fits[0].header)
    pixel_table = Table.read(args.pix_table_path,
                             format='ascii.commented_header')
    fluxsum_J = np.full(len(pixel_table), 0, dtype=np.float)
    varsum_J = np.full(len(pixel_table), 0, dtype=np.float)
    fluxsum_Ks = np.full(len(pixel_table), 0, dtype=np.float)
    varsum_Ks = np.full(len(pixel_table), 0, dtype=np.float)
    star_count = np.zeros(len(pixel_table), dtype=np.int)

    fields = ["M31-{0:d}".format(i) for i in range(1, 28)] + \
             ["M31-{0:d}".format(i) for i in range(47, 72)]
    # fields = ['M31-1']
    for field in fields:
        print "Processing", field
        data = load_photometry(field)
        x, y = wcs.wcs_world2pix(data['ra'], data['dec'], 0)
        # Round down to pixel indices
        x = x.astype(np.int)
        y = y.astype(np.int)
        # Filter out stars contained inside the image footprint
        ny, nx = segmap.shape
        s = np.where((x >= 0) & (y >= 0) & (x < nx) & (y < ny)
                     & np.isfinite(data['J']) & np.isfinite(data['Ks'])
                     & np.isfinite(data['J_err']) & np.isfinite(data['Ks_err'])
                     & (data['cfrac'] > 0.))[0]
        data = data[s]
        n_stars = data.shape[0]
        flux_J, flux_var_J = mag_to_mjy(data['J'], data['J_err'])
        flux_Ks, flux_var_Ks = mag_to_mjy(data['Ks'], data['Ks_err'])
        for i in xrange(n_stars):
            bin_id = segmap[y[i], x[i]]
            if bin_id >= 0:
                # add light to bin
                fluxsum_J[bin_id] += flux_J[i] / data['cfrac'][i]
                fluxsum_Ks[bin_id] += flux_Ks[i] / data['cfrac'][i]
                varsum_J[bin_id] += flux_var_J[i]
                varsum_Ks[bin_id] += flux_var_Ks[i]
                star_count[bin_id] += 1

    empty = np.where(star_count == 0)[0]
    fluxsum_J[empty] = np.nan
    fluxsum_Ks[empty] = np.nan
    varsum_J[empty] = np.nan
    varsum_Ks[empty] = np.nan

    flux_err_J = np.sqrt(varsum_J)
    flux_err_Ks = np.sqrt(varsum_Ks)
    pixel_table['n_stars'] = star_count
    pixel_table['synth_J'] = fluxsum_J
    pixel_table['synth_Ks'] = fluxsum_Ks
    pixel_table['synth_J_err'] = flux_err_J
    pixel_table['synth_Ks_err'] = flux_err_Ks
    pixel_table.write(args.output_path, format='ascii.commented_header')
Beispiel #9
0
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir):
    tmpfile = tmpdir.join('temp.fits')
    ccd_data.write(tmpfile.strpath)
    log.setLevel('INFO')
    explicit_unit_name = "photon"
    with log.log_to_list() as log_list:
        ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
        assert explicit_unit_name in log_list[0].message
Beispiel #10
0
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir):
    tmpfile = tmpdir.join('temp.fits')
    ccd_data.write(tmpfile.strpath)
    log.setLevel('INFO')
    explicit_unit_name = "photon"
    with log.log_to_list() as log_list:
        ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
        assert explicit_unit_name in log_list[0].message
Beispiel #11
0
def test_log_to_file(tmpdir, level):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())
    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_file(log_path):
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")

        log_file.close()
    finally:
        log.setLevel(orig_level)

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_entries) == 4
    elif level == 'INFO':
        assert len(log_entries) == 3
    elif level == 'WARN':
        assert len(log_entries) == 2
    elif level == 'ERROR':
        assert len(log_entries) == 1

    # Check list content

    assert eval(log_entries[0].strip())[-3:] == ('astropy.tests.test_logger',
                                                 'ERROR', 'Error message')

    if len(log_entries) >= 2:
        assert eval(
            log_entries[1].strip())[-3:] == ('astropy.tests.test_logger',
                                             'WARNING', 'Warning message')

    if len(log_entries) >= 3:
        assert eval(
            log_entries[2].strip())[-3:] == ('astropy.tests.test_logger',
                                             'INFO', 'Information message')

    if len(log_entries) >= 4:
        assert eval(
            log_entries[3].strip())[-3:] == ('astropy.tests.test_logger',
                                             'DEBUG', 'Debug message')
Beispiel #12
0
def make_cube_shh(**kwargs):
    """ Shush! Opens the cube without triggering a wall of warnings. """
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        old_log = log.level
        log.setLevel('ERROR')
        spc = make_cube(**kwargs)
        log.setLevel(old_log)

    return spc
Beispiel #13
0
def test_log_to_file(tmpdir, level):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())
    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_file(log_path):
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")

        log_file.close()
    finally:
        log.setLevel(orig_level)

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_entries) == 4
    elif level == 'INFO':
        assert len(log_entries) == 3
    elif level == 'WARN':
        assert len(log_entries) == 2
    elif level == 'ERROR':
        assert len(log_entries) == 1

    # Check list content

    assert eval(log_entries[0].strip())[-3:] == (
        'astropy.tests.test_logger', 'ERROR', 'Error message')

    if len(log_entries) >= 2:
        assert eval(log_entries[1].strip())[-3:] == (
            'astropy.tests.test_logger', 'WARNING', 'Warning message')

    if len(log_entries) >= 3:
        assert eval(log_entries[2].strip())[-3:] == (
            'astropy.tests.test_logger', 'INFO', 'Information message')

    if len(log_entries) >= 4:
        assert eval(log_entries[3].strip())[-3:] == (
            'astropy.tests.test_logger', 'DEBUG', 'Debug message')
def process_notebooks(nbfile_or_path, exec_only=False, verbosity=None,
                      **kwargs):
    """
    Execute and optionally convert the specified notebook file or directory of
    notebook files.

    This is a wrapper around the ``NBTutorialsConverter`` class that does file
    handling.

    Parameters
    ----------
    nbfile_or_path : str
        Either a single notebook filename or a path containing notebook files.
    exec_only : bool, optional
        Just execute the notebooks, don't run them.
    verbosity : int, optional
        A ``logging`` verbosity level, e.g., logging.DEBUG or etc. to specify
        the log level.
    **kwargs
        Any other keyword arguments are passed to the ``NBTutorialsConverter``
        init.

    """
    if verbosity is not None:
        logger.setLevel(verbosity)

    if path.isdir(nbfile_or_path):
        # It's a path, so we need to walk through recursively and find any
        # notebook files
        for root, dirs, files in walk(nbfile_or_path):
            for name in files:
                _,ext = path.splitext(name)
                full_path = path.join(root, name)

                if 'ipynb_checkpoints' in full_path: # skip checkpoint saves
                    continue

                if name.startswith('exec'): # notebook already executed
                    continue

                if ext == '.ipynb':
                    nbc = NBTutorialsConverter(full_path, **kwargs)
                    nbc.execute()

                    if not exec_only:
                        nbc.convert()

    else:
        # It's a single file, so convert it
        nbc = NBTutorialsConverter(nbfile_or_path, **kwargs)
        nbc.execute()

        if not exec_only:
            nbc.convert()
Beispiel #15
0
def snaplist(fname):
    from dragons import meraxes
    from astropy.table import Table
    from astropy import units as U, log

    log.setLevel('WARNING')
    meraxes.io.set_little_h(fname)
    snaplist, zlist, lbtime = meraxes.io.read_snaplist(fname)
    tab = Table((snaplist, zlist, lbtime),
                names=('snapshot', 'redshift', 'lookback_time'))
    tab['lookback_time'].unit = U.Myr
    tab.pprint(max_lines=-1, max_width=-1)
Beispiel #16
0
def xHI_evo(fname, weight):
    from dragons import meraxes
    from astropy.table import Table
    from astropy import units as U, log

    log.setLevel('WARNING')
    meraxes.io.set_little_h(fname)
    snaplist, zlist, lbtime = meraxes.io.read_snaplist(fname)
    xhi = meraxes.io.read_global_xH(fname, snaplist, weight=weight, quiet=True)
    tab = Table((snaplist, zlist, lbtime, xhi),
                names=('snapshot', 'redshift', 'lookback_time', 'xHI'))
    tab['lookback_time'].unit = U.Myr
    tab.pprint(max_lines=-1, max_width=-1)
Beispiel #17
0
def main():
    log.setLevel("INFO")
    fields = ('halo11', 'stream', 'disk', 'halo21', 'halo35a', 'halo35b')
    cols = defaultdict(list)
    for fieldname in fields:
        result = process_field(fieldname)
        for k, v in result.iteritems():
            cols[k].append(v)
    names = ['name', 'ra', 'dec', 'radius', 'f606w', 'f606w_err',
        'f814w', 'f814w_err']
    collist = [cols[k] for k in names]
    tbl = Table(collist, names=names)
    tbl.write("brown_sb.txt", format='ascii.commented_header')
Beispiel #18
0
def toy_observation(snr=2,
                    debug=False,
                    seed=0,
                    v1=10,
                    v2=70,
                    nchan=1000,
                    truth_narrow=[4, 40, 1.0],
                    truth_wide=[2, 41, 3.0]):
    np.random.seed(seed)
    if debug:  # <CheatMode>
        log.setLevel('DEBUG')

    # initialize the spectral axis
    xunit, bunit = 'km/s', 'K'
    refX = 120 * u.GHz
    log.debug("Genarating a spactral axis instance from {}"
              " to {} {}".format(v1, v2, xunit))
    xarr = SpectroscopicAxis(np.linspace(v1, v2, nchan) * u.Unit(xunit),
                             refX=refX,
                             velocity_convention='radio')

    # generate a spectrum approximated by a gaussian
    log.debug("Gaussian parameters for the"
              " narrow component: {}".format(truth_narrow))
    log.debug("Gaussian parameters for the"
              " wide component: {}".format(truth_wide))
    true_data_narrow = gaussian(xarr, *truth_narrow)
    true_data_wide = gaussian(xarr, *truth_wide)

    true_total = true_data_narrow + true_data_wide
    signal_peak = true_total.max()
    log.debug("For a signal-to-noise ratio of {} the square root of noise"
              " variance is {:.2f} {}.".format(snr, signal_peak / snr, bunit))
    noise = np.random.normal(loc=0, scale=signal_peak / snr, size=xarr.size)

    observed = true_total + noise

    log.setLevel('INFO')  # <\CheatMode>

    # make a spectrum class instance in Tmb units
    xarr._make_header()

    sp = Spectrum(xarr=xarr, data=observed, unit=u.Unit(bunit), header={})
    sp.header['NPEAKS'] = 2
    sp.header['NOISERMS'] = round(signal_peak / snr, 4)
    for comp, name in zip([truth_narrow, truth_wide], ['1', '2']):
        sp.header['AMP_' + name] = comp[0]
        sp.header['XOFF_' + name] = comp[1]
        sp.header['SIG_' + name] = comp[2]
    return sp
Beispiel #19
0
def test_log_to_list(level):

    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_list() as log_list:
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")
    finally:
        log.setLevel(orig_level)

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_list) == 4
    elif level == 'INFO':
        assert len(log_list) == 3
    elif level == 'WARN':
        assert len(log_list) == 2
    elif level == 'ERROR':
        assert len(log_list) == 1

    # Check list content

    assert log_list[0].levelname == 'ERROR'
    assert log_list[0].message.startswith('Error message')
    assert log_list[0].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 2:
        assert log_list[1].levelname == 'WARNING'
        assert log_list[1].message.startswith('Warning message')
        assert log_list[1].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 3:
        assert log_list[2].levelname == 'INFO'
        assert log_list[2].message.startswith('Information message')
        assert log_list[2].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 4:
        assert log_list[3].levelname == 'DEBUG'
        assert log_list[3].message.startswith('Debug message')
        assert log_list[3].origin == 'astropy.tests.test_logger'
Beispiel #20
0
def test_log_to_list(level):

    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_list() as log_list:
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")
    finally:
        log.setLevel(orig_level)

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_list) == 4
    elif level == 'INFO':
        assert len(log_list) == 3
    elif level == 'WARN':
        assert len(log_list) == 2
    elif level == 'ERROR':
        assert len(log_list) == 1

    # Check list content

    assert log_list[0].levelname == 'ERROR'
    assert log_list[0].message.startswith('Error message')
    assert log_list[0].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 2:
        assert log_list[1].levelname == 'WARNING'
        assert log_list[1].message.startswith('Warning message')
        assert log_list[1].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 3:
        assert log_list[2].levelname == 'INFO'
        assert log_list[2].message.startswith('Information message')
        assert log_list[2].origin == 'astropy.tests.test_logger'

    if len(log_list) >= 4:
        assert log_list[3].levelname == 'DEBUG'
        assert log_list[3].message.startswith('Debug message')
        assert log_list[3].origin == 'astropy.tests.test_logger'
def set_logger_verbosity(args):
    # Set logger level based on verbose flags
    if args.verbosity != 0:
        if args.verbosity == 1:
            logger.setLevel(logging.DEBUG)
        else: # anything >= 2
            logger.setLevel(1)
    elif args.quietness != 0:
        if args.quietness == 1:
            logger.setLevel(logging.WARNING)
        else: # anything >= 2
            logger.setLevel(logging.ERROR)
    else: # default
        logger.setLevel(logging.INFO)
Beispiel #22
0
    def __init__(
        self,
        parfile,
        timfile=None,
        warnings=False,
        fixprefiterrors=True,
        dofit=False,
        maxobs=None,
        units=False,
    ):
        """
        The same init function as used in libstempo

        :param parfile:
            Name of the parfile

        :param timfile:
            Name of the timfile, if we want to load it

        :param warnings:
            Whether we are shoing warnings

        :param fixprefiterrors:
            TODO: check what this should do

        :param maxobs:
            PINT has no need for a maxobs parameter. Included here for
            compatibility

        :param units:
            Whether or not we are using the 'units' interface of libstempo
        """
        if warnings:
            log.setLevel("INFO")
        else:
            log.setLevel("ERROR")

        self.loadparfile(parfile)

        if timfile is not None:
            self.loadtimfile(timfile)
        else:
            self.t = None
            self.deleted = None

        if dofit and self.t is not None:
            self.fit()

        self._units = units
Beispiel #23
0
def hmf_solve(newflux, newivar, K=4, nonnegative=False, epsilon=None, verbose=False):
    """Drop-in replacement for :func:`~pydl.pydlspec2d.spec1d.pca_solve`.

    Parameters
    ----------
    newflux : array-like
        The input spectral flux, assumed to have a common wavelength and
        redshift system.
    newivar : array-like
        The inverse variance of the spectral flux.
    K : :class:`int`, optional
        The number of dimensions of the factorization (default 4).
    nonnegative : :class:`bool`, optional
        Set this to ``True`` to perform nonnegative HMF.
    epsilon : :class:`float`, optional
        Regularization parameter.  Set to any non-zero float value to turn it on.
    verbose : :class:`bool`, optional
        If ``True``, print extra information.

    Returns
    -------
    :class:`dict`
        The HMF solution.
    """
    import numpy as np
    from astropy import log

    if verbose:
        log.setLevel("DEBUG")
    if nreturn is None:
        nreturn = nkeep
    if len(newflux.shape) == 1:
        nobj = 1
        npix = newflux.shape[0]
    else:
        nobj, npix = newflux.shape
    log.info("Building HMF from {0:d} object spectra.".format(nobj))
    fluxdict = dict()
    #
    # If there is only one object spectrum, then all we can do is return it.
    #
    if nobj == 1:
        fluxdict["flux"] = newflux.astype("f")
        return fluxdict
    a, g = hmf_iterate(newflux, newivar, K=K, nonnegative=nonnegative, epsilon=epsilon)
    fluxdict["acoeff"] = a
    fluxdict["flux"] = g
    return fluxdict
Beispiel #24
0
def main():
    log.setLevel("INFO")

    parser = argparse.ArgumentParser()
    parser.add_argument("bricks", type=int, nargs='*', help="Brick number(s)")
    parser.add_argument("instrument", help="PHAT instrument",
        choices=['phat_acs', 'phat_ir', 'phat_uv'])
    args = parser.parse_args()

    for brick in args.bricks:
        if args.instrument == 'phat_acs':
            process_acs(brick)
        elif args.instrument == 'phat_ir':
            process_ir(brick)
        elif args.instrument == 'phat_uv':
            process_uv(brick)
Beispiel #25
0
def test_compare_action_prepare():

    from ..actionangle import _action_prepare, _angle_prepare

    logger.setLevel(logging.ERROR)
    AA = np.random.uniform(0., 100., size=(1000, 6))
    t = np.linspace(0., 100., 1000)

    act_san, n_vectors = solver.solver(AA, N_max=6, symNx=2)
    A2, b2, n = _action_prepare(AA.T, N_max=6, dx=2, dy=2, dz=2)
    act_apw = np.array(solve(A2, b2))

    ang_san = solver.angle_solver(AA, t, N_max=6, symNx=2, sign=1)
    A2, b2, n = _angle_prepare(AA.T, t, N_max=6, dx=2, dy=2, dz=2)
    ang_apw = np.array(solve(A2, b2))

    assert np.allclose(act_apw, act_san)
Beispiel #26
0
def main():
    log.setLevel("INFO")

    parser = argparse.ArgumentParser()
    parser.add_argument("bricks", type=int, nargs='*', help="Brick number(s)")
    parser.add_argument("instrument",
                        help="PHAT instrument",
                        choices=['phat_acs', 'phat_ir', 'phat_uv'])
    args = parser.parse_args()

    for brick in args.bricks:
        if args.instrument == 'phat_acs':
            process_acs(brick)
        elif args.instrument == 'phat_ir':
            process_ir(brick)
        elif args.instrument == 'phat_uv':
            process_uv(brick)
Beispiel #27
0
def test_compare_action_prepare():

    from ..actionangle import _action_prepare, _angle_prepare

    logger.setLevel(logging.ERROR)
    AA = np.random.uniform(0., 100., size=(1000,6))
    t = np.linspace(0., 100., 1000)

    act_san,n_vectors = solver.solver(AA, N_max=6, symNx=2)
    A2,b2,n = _action_prepare(AA.T, N_max=6, dx=2, dy=2, dz=2)
    act_apw = np.array(solve(A2,b2))

    ang_san = solver.angle_solver(AA, t, N_max=6, symNx=2, sign=1)
    A2,b2,n = _angle_prepare(AA.T, t, N_max=6, dx=2, dy=2, dz=2)
    ang_apw = np.array(solve(A2,b2))

    assert np.allclose(act_apw, act_san)
Beispiel #28
0
def testing_K_sort(Kfile='Ks.fits', index=0, debug=False):
    if debug:
        log.setLevel('DEBUG')

    K_vals = fits.getdata(Kfile)[index]
    K_new = np.inf

    tasks = get_tasks(method='Bfactor', npeaks=1, cut=20)
    for job in tasks:
        _, _, npeaks, y, x, _, p = job.split(' ')
        x, y = int(x), int(y)
        K_new, K_old = K_vals[y, x], K_new

        assert K_old > K_new

        log.debug("K = {:7.2f} at (x, y) = ({:2d}, "
                  "{:2d}), {} done".format(K_new, x, y, p))
Beispiel #29
0
    def _get_logger(self):
        loglevel = logging.INFO
        log = logging.getLogger(self.LOG_FILENAME)
        if not getattr(log, 'handler_set', None):
            log.setLevel(logging.INFO)
            sh = logging.StreamHandler()
            sh.setFormatter(formatter)
            log.addHandler(sh)

            fh = logging.FileHandler(self.LOG_FILENAME)
            fh.setLevel(logging.INFO)
            fh.setFormatter(formatter)
            log.addHandler(fh)

            log.setLevel(loglevel)
            log.handler_set = True
        return log
Beispiel #30
0
    def load_cube(self, fitsfile=None, cube=None):
        """
        Load in a cube

        Parameters
        ----------
        fitsfile : fits
            File in fits format to be read in
        cube : spectral cube
            If fits file is not supplied - provide a spectral cube object
            instead

        """

        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            old_log = log.level
            log.setLevel('ERROR')

            # Read in the datacube
            if cube is None:
                _cube = SpectralCube.read(fitsfile).with_spectral_unit(
                    u.km / u.s, velocity_convention='radio')
            else:
                _cube = cube

            if _cube.spectral_axis.diff()[0] < 0:
                if np.abs(_cube.spectral_axis[0].value -
                          _cube[::-1].spectral_axis[-1].value) > 1e-5:
                    raise ImportError("Update to a more recent version of "
                                      "spectral-cube or reverse the axes "
                                      "manually.")
                _cube = _cube[::-1]

            # Trim cube if necessary
            if (self.ppv_vol[2] is not None) & (self.ppv_vol[3] is not None):
                _cube = _cube[:, int(self.ppv_vol[2]):int(self.ppv_vol[3]), :]
            if (self.ppv_vol[4] is not None) & (self.ppv_vol[5] is not None):
                _cube = _cube[:, :, int(self.ppv_vol[4]):int(self.ppv_vol[5])]

            self.cube = _cube
            # Generate the x axis common to the fitting process
            self.x, self.xtrim, self.trimids = get_x_axis(self)
            # Compute typical noise within the spectra
            self.rms_approx = compute_noise(self)
Beispiel #31
0
def testing_snr_sort(snrmap11=None,
                     snrmap22=None,
                     debug=False,
                     cut=5,
                     line='nh311',
                     n_cpu=7,
                     run=False):
    """
    Assures that the S/N ordering is being executed properly.
    Was written for an ammonia cube data, have yet to generalize.
    """
    if debug:
        log.setLevel('DEBUG')

    if snrmap11 is None and snrmap22 is None:
        snrmap11, snrmap22 = get_vla_snr()

    snr, snr_prev = {}, {'nh311': np.inf, 'nh322': np.inf}
    tasks_by_snr = get_tasks(n_cpu=n_cpu,
                             method='snr',
                             cut=cut,
                             line=line,
                             snr11=snrmap11,
                             snr22=snrmap22,
                             testing=True)
    for job in tasks_by_snr:
        _, _, _, npeaks, y, x, _, p = job.split(' ')
        x, y = int(x), int(y)
        snr['nh311'], snr['nh322'] = snrmap11[y, x], snrmap22[y, x]

        # make sure the snr job list progresses downwards
        assert snr[line] <= snr_prev[line]

        log.debug("S/R @ NH3 (1,1) = {:.2f}, "
                  "S/R @ NH3 (2,2) = {:.2f} at (x, y) = "
                  "({:2d}, {:2d}), {} done".format(snr['nh311'], snr['nh322'],
                                                   x, y, p))

        # used later for recurrent relation reasons...
        snr_prev['nh311'], snr_prev['nh322'] = snr['nh311'], snr['nh322']

    if run:
        pool = multiprocessing.Pool(processes=n_cpu)
        pool.map(work, tasks_by_snr)
Beispiel #32
0
    def __init__(self, parfile, timfile=None, warnings=False,
            fixprefiterrors=True, dofit=False, maxobs=None,
            units=False):
        """
        The same init function as used in libstempo

        :param parfile:
            Name of the parfile

        :param timfile:
            Name of the timfile, if we want to load it

        :param warnings:
            Whether we are shoing warnings

        :param fixprefiterrors:
            TODO: check what this should do

        :param maxobs:
            PINT has no need for a maxobs parameter. Included here for
            compatibility

        :param units:
            Whether or not we are using the 'units' interface of libstempo
        """
        if warnings:
            log.setLevel('INFO')
        else:
            log.setLevel('ERROR')

        self.loadparfile(parfile)

        if timfile is not None:
            self.loadtimfile(timfile)
        else:
            self.t = None
            self.deleted = None

        if dofit and self.t is not None:
            self.fit()

        self._units = units
Beispiel #33
0
def recreate_model(scouseobject, spectrum, bf):
    """
    Recreates model from parameters

    Parameters
    ----------
    scouseobject : Instance of the scousepy class
    spectrum : pyspeckit spectrum
        spectrum from which to recreate the model
    bf : instance of the fit class
        best-fitting model solution to the spectrum

    """

    # Make pyspeckit be quiet
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        old_log = log.level
        log.setLevel('ERROR')
        # generate a spectrum
        spec = get_spec(scouseobject, spectrum)
        spec.specfit.fittype = bf.fittype
        spec.specfit.fitter = spec.specfit.Registry.multifitters[bf.fittype]
        if bf.ncomps != 0.0:
            mod = np.zeros([len(scouseobject.xtrim), int(bf.ncomps)])
            for k in range(int(bf.ncomps)):
                modparams = bf.params[(
                    k * len(bf.parnames)):(k * len(bf.parnames)) +
                                      len(bf.parnames)]
                mod[:, k] = spec.specfit.get_model_frompars(
                    scouseobject.xtrim, modparams)
            totmod = np.nansum(mod, axis=1)
            res = (get_flux(scouseobject, spectrum)).value - totmod
        else:
            mod = np.zeros([len(scouseobject.xtrim), 1])
            res = (get_flux(scouseobject, spectrum)).value
        log.setLevel(old_log)

    return mod, res
Beispiel #34
0
def pytest_configure(config):
    """ called after command line options have been parsed
        and all plugins and initial conftest files been loaded.
    """

    import logging
    from astropy import log as logger

    if config.getoption('verbose') == 2:
        logger.setLevel(logging.DEBUG)

    elif config.getoption('verbose') == 1:
        logger.setLevel(logging.INFO)

    elif config.getoption('quiet'):
        logger.setLevel(logging.WARN)

    else:
        logger.setLevel(logging.INFO+1)
@login_required
def summary(job_name, params):
    output = ResultInfo(params)

    return render_template('summary.html',
                           title='Results for {}'.format(job_name),
                           output=output)


@app.route('/download/<int:filename>/', methods=['GET', 'POST'])
def download(filename, bucket_name):
    # Connect to S3
    s3conn = return_s3_connection({"aws_access_key_id": key,
                                   "aws_secret_access_key": secret})
    try:
        bucket = s3conn.get_bucket(bucket_name)
    except Exception:
        abort(404)

    if bucket.get_key(filename) is None:
        raise ValueError("The filename ({0}) does not exist in the given "
                         "bucket ({1})".format(filename, bucket_name))

    # redirect to the url of the file hosted on S3
    return redirect(urljoin(bucket_name, filename))


if __name__ == '__main__':
    log.setLevel(10)
    app.run(debug=True)
def main():
    script = os.path.splitext(os.path.basename(__file__))[0]
    log.info("[SCRIPT] {}".format(script))

    parser = argparse.ArgumentParser(
        description='Display each event in the file')
    parser.add_argument('-f',
                        '--file',
                        dest='input_path',
                        action='store',
                        default=get_path('gamma_test.simtel.gz'),
                        help='path to the input file. '
                        'Default = gamma_test.simtel.gz')
    parser.add_argument('-O',
                        '--origin',
                        dest='origin',
                        action='store',
                        default='hessio',
                        help='origin of the file: {}. Default = hessio'.format(
                            InputFile.origin_list()))
    parser.add_argument('-D',
                        dest='display',
                        action='store_true',
                        default=False,
                        help='display the camera events')
    parser.add_argument('--pdf',
                        dest='output_path',
                        action='store',
                        default=None,
                        help='path to store a pdf output of the plots')
    parser.add_argument('-t',
                        '--telescope',
                        dest='tel',
                        action='store',
                        type=int,
                        default=None,
                        help='telecope to view. '
                        'Default = All')

    calibration_arguments(parser)

    logger_detail = parser.add_mutually_exclusive_group()
    logger_detail.add_argument('-q',
                               '--quiet',
                               dest='quiet',
                               action='store_true',
                               default=False,
                               help='Quiet mode')
    logger_detail.add_argument('-v',
                               '--verbose',
                               dest='verbose',
                               action='store_true',
                               default=False,
                               help='Verbose mode')
    logger_detail.add_argument('-d',
                               '--debug',
                               dest='debug',
                               action='store_true',
                               default=False,
                               help='Debug mode')

    args = parser.parse_args()
    print('DEBUG type(args) {}'.format(type(args)))
    print('DEBUG args {}'.format(args))
    params = calibration_parameters(args)

    if args.quiet:
        log.setLevel(40)
    if args.verbose:
        log.setLevel(20)
    if args.debug:
        log.setLevel(10)

    log.debug("[file] Reading file")
    input_file = InputFile(args.input_path, args.origin)
    source = input_file.read()

    # geom_dict is a dictionary of CameraGeometry, with keys of
    # (num_pixels, focal_length), the parameters that are used to guess the
    # geometry of the telescope. By using these keys, the geometry is
    # calculated only once per telescope type as needed, reducing computation
    # time.
    # Creating a geom_dict at this point is optional, but is recommended, as
    # the same geom_dict can then be shared between the calibration and
    # CameraPlotter, again reducing computation time.
    # The dictionary becomes filled as a result of a dictionary's mutable
    # nature.
    geom_dict = {}

    # Calibrate events and fill geom_dict

    calibrated_source = calibrate_source(source, params, geom_dict)

    fig = plt.figure(figsize=(16, 7))
    if args.display:
        plt.show(block=False)
    pp = PdfPages(args.output_path) if args.output_path is not None else None
    for event in calibrated_source:
        tels = list(event.dl0.tels_with_data)
        if args.tel is None:
            tel_loop = tels
        else:
            if args.tel not in tels:
                continue
            tel_loop = [args.tel]
        log.debug(tels)
        for tel_id in tel_loop:
            display_telescope(event, tel_id, args.display, geom_dict, pp, fig)
    if pp is not None:
        pp.close()

    log.info("[COMPLETE]")
Beispiel #37
0
def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
    """
    Copy the primary WCS to the header as an alternate WCS
    with wcskey and name WCSNAME. It loops over all extensions in 'ext'

    Parameters
    ----------
    fname :  string or `astropy.io.fits.HDUList`
        file name or a file object
    ext :    int, tuple, str, or list of integers or tuples (e.g.('sci',1))
        fits extensions to work with
        If a string is provided, it should specify the EXTNAME of extensions
        with WCSs to be archived
    wcskey : string "A"-"Z" or " "
        if " ": get next available key if wcsname is also " " or try
        to get a key from WCSNAME value
    wcsname : string
        Name of alternate WCS description
    reusekey : boolean
        if True - overwrites a WCS with the same key

    Examples
    --------
    Copy the primary WCS of an in memory headrlet object to an
    alternate WCS with key 'T'

    >>> hlet=headerlet.createHeaderlet('junk.fits', 'hdr1.fits')
    >>> altwcs.wcskeys(hlet[1].header)
    ['A']
    >>> altwcs.archiveWCS(hlet, ext=[('SIPWCS',1),('SIPWCS',2)], wcskey='T')
    >>> altwcs.wcskeys(hlet[1].header)
    ['A', 'T']


    See Also
    --------
    wcsutil.restoreWCS: Copy an alternate WCS to the primary WCS

    """

    if isinstance(fname, str):
        f = fits.open(fname, mode='update')
    else:
        f = fname

    if not _parpasscheck(f, ext, wcskey, wcsname):
        closefobj(fname, f)
        raise ValueError("Input parameters problem")

    # Interpret input 'ext' value to get list of extensions to process
    ext = _buildExtlist(f, ext)

    if not wcskey and not wcsname:
        raise KeyError("Either wcskey or wcsname should be specified")

    if wcsname.strip() == "":
        try:
            wcsname = readAltWCS(f, ext[0], wcskey=" ")['WCSNAME']
        except KeyError:
            pass
    wcsext = ext[0]
    if wcskey != " " and wcskey in wcskeys(f[wcsext].header) and not reusekey:
        closefobj(fname, f)
        raise KeyError("Wcskey %s is aready used. \
        Run archiveWCS() with reusekey=True to overwrite this alternate WCS. \
        Alternatively choose another wcskey with altwcs.available_wcskeys()." % wcskey)
    elif wcskey == " ":
        # wcsname exists, overwrite it if reuse is True or get the next key
        if wcsname.strip() in wcsnames(f[wcsext].header).values():
            if reusekey:
                # try getting the key from an existing WCS with WCSNAME
                wkey = getKeyFromName(f[wcsext].header, wcsname)
                wname = wcsname
                if wkey == ' ':
                    wkey = next_wcskey(f[wcsext].header)
                elif wkey is None:
                    closefobj(fname, f)
                    raise KeyError("Could not get a valid wcskey from wcsname %s" % wcsname)
            else:
                closefobj(fname, f)
                raise KeyError("Wcsname %s is aready used. \
                Run archiveWCS() with reusekey=True to overwrite this alternate WCS. \
                Alternatively choose another wcskey with altwcs.available_wcskeys() or\
                choose another wcsname." % wcsname)
        else:
            wkey = next_wcskey(f[wcsext].header)
            if wcsname.strip():
                wname = wcsname
            else:
                # determine which WCSNAME needs to be replicated in archived WCS
                wnames = wcsnames(f[wcsext].header)
                if 'O' in wnames: del wnames['O']  # we don't want OPUS/original
                if len(wnames) > 0:
                    if ' ' in wnames:
                        wname = wnames[' ']
                    else:
                        akeys = string.ascii_uppercase
                        wname = "DEFAULT"
                        for key in akeys[-1::]:
                            if key in wnames:
                                wname = wnames
                                break
                else:
                    wname = "DEFAULT"
    else:
        wkey = wcskey
        wname = wcsname
    log.setLevel('WARNING')
    for e in ext:
        hdr = _getheader(f, e)
        w = pywcs.WCS(hdr, f)
        hwcs = w.to_header()

        if hwcs is None:
            continue

        if w.sip is not None:
            for i in range(1, w.naxis + 1):
                hwcs['CTYPE{0}'.format(i)] = hwcs['CTYPE{0}'.format(i)] + '-SIP'

        if w.wcs.has_cd():
            hwcs = pc2cd(hwcs, key=" ")

        wcsnamekey = 'WCSNAME' + wkey
        f[e].header[wcsnamekey] = wname

        try:
            old_wcsname = hwcs.pop('WCSNAME')
        except:
            pass

        for k in hwcs.keys():
            key = k[: 7] + wkey
            f[e].header[key] = hwcs[k]
    log.setLevel(default_log_level)
    closefobj(fname, f)
Beispiel #38
0
from pint import toa, utils, erfautils
import pint.observatories as obsmod
import math, shlex, subprocess, numpy
import astropy.constants as const
import astropy.units as u
from pint.utils import PosVel
from astropy import log
import os

from pinttestdata import testdir, datadir

log.setLevel('ERROR')
# for nice output info, set the following instead
#log.setLevel('INFO')

observatories = obsmod.read_observatories()

ls = u.def_unit('ls', const.c * 1.0 * u.s)

log.info("Reading TOAs into PINT")
ts = toa.get_TOAs(datadir + "/testtimes.tim",usepickle=False)
if log.level < 25:
    ts.print_summary()
ts.table.sort('index')

log.info("Calling TEMPO2")
#cmd = 'tempo2 -output general2 -f tests/testtimes.par tests/testtimes.tim -s "XXX {clock0} {clock1} {clock2} {clock3} {tt} {t2tb} {telSSB} {telVel} {Ttt}\n"'
cmd = 'tempo2 -output general2 -f ' + datadir+'/testtimes.par ' + datadir + \
      '/testtimes.tim -s "XXX {clock0} {clock1} {clock2} {clock3} {tt} {t2tb} {earth_ssb1} {earth_ssb2} {earth_ssb3} {earth_ssb4} {earth_ssb5} {earth_ssb6} {telEpos} {telEVel} {Ttt}\n"'
args = shlex.split(cmd)
Beispiel #39
0
import os
import glob
import math
import subprocess
import re
import sys
import datetime
import shutil
from decimal import Decimal
from astropy.io import fits
from astropy import wcs

from astropy import log

log.setLevel('ERROR')

from astropy import units as u
import ccdproc

import numpy as np

from collections import defaultdict


def logme(str):
    log.write(str + "\n")
    print str
    return

Beispiel #40
0
The main function is eigenvalues_Heassian() which outputs the three eigenvalues for the orbit inputted.
The action/angles/frequencies are also calculated for this orbit. """

__author__ = "spearson <*****@*****.**>"

# Standard library
import logging
import sys

# Third-party
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as inter
from astropy import log as logger
logger.setLevel(logging.DEBUG)
from astropy.constants import G

# Stream-Team
import streamteam.integrate as si
from streamteam.potential.lm10 import LM10Potential
from streamteam.potential.apw import PW14Potential
from streamteam.dynamics.actionangle import find_actions, fit_isochrone
from streamteam.potential import IsochronePotential
from streamteam.units import galactic

G = G.decompose(galactic).value

cache_path = "/home/spearson/Hessian/stream-team/hessian"

#---------------------- We want to check both LM10() triax & LM10(q1=1,q2=1,q3=1)-------------------------
Beispiel #41
0
    for lon in np.arange(25, 215+1, constants.STRIPWIDTH):
        for part in ['a', 'b']:
            path = os.path.join(constants.DESTINATION,
                                'concatenated',
                                'light',
                                'iphas-dr2-{0:03d}{1}-light.fits'.format(
                                                                    lon, part))
            instring += 'in={0} '.format(path)

    # Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set
    param = {'stilts': constants.STILTS,
             'in': instring,
             'out': output_filename}

    cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'
    mycmd = cmd.format(**param)
    log.debug(mycmd)
    status = os.system(mycmd)
    log.info('concat: '+str(status))

    return status


################################
# MAIN EXECUTION (FOR DEBUGGING)
################################

if __name__ == "__main__":
    log.setLevel('DEBUG')
    concatenate_one(215)
def make_spw_cube(spw='spw{0}',
                  spwnum=0,
                  fntemplate='OrionSourceI',
                  overwrite_existing=False,
                  bmaj_limits=None,
                  fnsuffix="",
                  filesuffix='image.pbcor.fits',
                  first_endchannel='*',
                  cropends=False,
                  minimize=True,
                  debug_mode=False,
                  add_beam_info=True):
    """
    Parameters
    ----------
    spw : str
        String template for the input/output name
    spwnum : int
        The spectral window number
    fntemplate : str
        Filename template (goes into the glob)
    overwrite_existing : bool
        Overwrite data in the output cube?
    cropends: bool or int
        Number of pixels to crop off the ends of an image
    minimize: bool
        Compute the spatial minimal subcube before building the cube?  Slices
        for all subsequent cubes will be computed from the first cube.
    """
    if debug_mode:
        lvl = log.getEffectiveLevel()
        log.setLevel('DEBUG')

    spw = spw.format(spwnum)

    big_filename = '{1}_{0}{2}_lines.fits'.format(spw, fntemplate, fnsuffix)

    header_fn = glob.glob(
        'OrionSourceI.B3.{0}.lines0-{4}.clarkclean1000.{3}'.format(
            spw, fntemplate, fnsuffix, filesuffix, first_endchannel))
    if len(header_fn) != 1:
        raise ValueError(
            "Found too many or too few matches: {0}".format(header_fn))
    else:
        header_fn = header_fn[0]

    # First set up an empty file
    if not os.path.exists(big_filename):
        log.info("Creating large cube based on header {0}".format(header_fn))

        if minimize:
            cube0 = SpectralCube.read(header_fn)
            slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                    spatial_only=True)
            # use the calculated 3rd dimension, plus the difference of the
            # x and y slices
            #header['NAXIS2'] = slices[1].stop-slices[1].start
            #header['NAXIS1'] = slices[2].stop-slices[2].start
            header = cube0[slices].header
        else:
            header = fits.getheader(header_fn)

        # Make an arbitrary, small data before prepping the header
        data = np.zeros((100, 100), dtype=np.float32)
        hdu = fits.PrimaryHDU(data=data, header=header)
        cdelt_sign = np.sign(hdu.header['CDELT3'])
        # Set the appropriate output size (this can be extracted from the LISTOBS)
        naxis3_in = header['NAXIS3']
        header['NAXIS3'] = nchans_total[spwnum]
        header_wcs = wcs.WCS(fits.getheader(header_fn))
        header_specwcs = header_wcs.sub([wcs.WCSSUB_SPECTRAL])
        if cdelt_sign == -1:
            ind0, ind1 = getinds(header_fn)
            #5/20/2017: redoing some of this, and the text below is frightening but no longer relevant
            # a +1 was on the next line before an edit on 4/10/2017
            # it may have been rendered irrelevant when I included +1
            # channel in each cube?  Not clear - the arithmetic no longer
            # makes sense but is empirically necessary.
            assert ind0 == 0

            # these reindex the cube so that it has an increasing cdelt.
            header['CRPIX3'] = 1  #nchans_total[spwnum]
            header['CRVAL3'] = header_specwcs.wcs_pix2world(
                [nchans_total[spwnum]], 1)[0][0]
            header['CDELT3'] = np.abs(header_specwcs.wcs.cdelt[0])

            # ensure that the new CRVAL evaluated at its own position matches
            # the CRVAL3.  This should be impossible to fail unless WCS itself
            # fails
            newheaderspecwcs = wcs.WCS(header).sub([wcs.WCSSUB_SPECTRAL])
            crval3 = newheaderspecwcs.wcs_pix2world([header['CRPIX3']],
                                                    1)[0][0]
            np.testing.assert_array_almost_equal_nulp(crval3, header['CRVAL3'])

        shape = (header['NAXIS3'], header['NAXIS2'], header['NAXIS1'])

        # Write to disk
        header.tofile(big_filename)
        # Using the 'append' io method, update the *header*
        with open(big_filename, 'rb+') as fobj:
            # Seek past the length of the header, plus the length of the
            # data we want to write.
            # The -1 is to account for the final byte that we are about to
            # write:
            # 'seek' works on bytes, so divide #bits / (bytes/bit)
            fobj.seek(
                len(header.tostring()) + (shape[0] * shape[1] * shape[2] *
                                          int(np.abs(header['BITPIX']) / 8)) -
                1)
            fobj.write(b'\0')

        big_cube = SpectralCube.read(big_filename)
        header_cube = SpectralCube.read(header_fn)
        # in both cases, SpectralCube sorts the extrema
        if cdelt_sign == 1:
            np.testing.assert_array_almost_equal_nulp(
                big_cube.spectral_extrema[0].value,
                header_cube.spectral_extrema[0].value)
            np.testing.assert_array_almost_equal_nulp(
                big_cube.wcs.wcs.cdelt, header_cube.wcs.wcs.cdelt)
        elif cdelt_sign == -1:
            np.testing.assert_array_almost_equal_nulp(
                big_cube.spectral_extrema[1].value,
                header_cube.spectral_extrema[1].value)
            np.testing.assert_array_almost_equal_nulp(
                big_cube.wcs.wcs.cdelt[-1] * -1, header_cube.wcs.wcs.cdelt[-1])

        log.info("Cube creation completed.  Now moving on to populating it.")

    # Find the appropriate files (this is NOT a good way to do this!  Better to
    # provide a list.  But wildcards are quick & easy...
    fileglob = "OrionSourceI.B3.{0}.lines*{3}".format(spw, fntemplate,
                                                      fnsuffix, filesuffix)
    files = glob.glob(fileglob)
    log.info("Files to be merged with glob {0}: ".format(fileglob))
    log.info(str(files))

    # open the file in update mode (it should have the right dims now)
    hdul = fits.open(big_filename, mode='update')
    main_wcs = wcs.WCS(hdul[0].header).sub([wcs.WCSSUB_SPECTRAL])

    if add_beam_info:
        shape = hdul[0].data.shape[0]
        if len(hdul) > 1 and isinstance(hdul[1], fits.BinTableHDU):
            pass
        else:
            hdul.append(
                fits.BinTableHDU(
                    np.recarray(shape,
                                names=['BMAJ', 'BMIN', 'BPA', 'CHAN', 'POL'],
                                formats=['f4', 'f4', 'f4', 'i4', 'i4'])))

    # sorted so that we deal with zero first, since it has potential to be a problem.
    for fn in ProgressBar(sorted(files)):
        log.info("inds={0} fn={1}".format(getinds(fn), fn))
        ind0, ind1 = getinds(fn)

        # this is not correct...?
        # or maybe it only applies if cropends is set....
        # if ind0 == 0:
        #     ind1 = ind1 + 1

        cdelt = fits.getheader(fn)['CDELT3']
        if 'cdelt_sign' not in locals():
            cdelt_sign = np.sign(cdelt)
            log.warn("cdelt_sign was not defined: overwriting a"
                     " previously-existing file.  "
                     "This may not be what you want; the data could be going "
                     "opposite the parent cube.  Check that the original "
                     "header is OK. sign(CDELT) is now {0}, "
                     "while for the big header it is {1}".format(
                         cdelt_sign,
                         np.sign(fits.getheader(big_filename)['CDELT3'])))

        if cropends:
            # don't crop 1st or last pixel in full cube
            if ind0 > 0:
                log.debug("ind0 going from {0} to {1}".format(
                    ind0, ind0 + cropends))
                ind0 = ind0 + cropends
                if cdelt_sign == 1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
                else:
                    dataind1 = -cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
            else:
                if cdelt_sign == 1:
                    dataind0 = 0
                    log.debug("dataind0 going to {0}".format(0))
                elif cdelt_sign == -1:
                    log.debug("dataind1 going to {0}".format(None))
                    dataind1 = None

            if (ind1 < nchans_total[spwnum] - 1):
                log.debug("ind1 going from {0} to {1}".format(
                    ind1, ind1 - cropends))
                ind1 = ind1 - cropends
                if cdelt_sign == 1:
                    dataind1 = -cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
                elif cdelt_sign == -1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
            else:
                if cdelt_sign == 1:
                    dataind1 = None
                else:
                    log.debug("dataind0 going to {0}".format(0))
                    dataind0 = 0
        else:
            dataind0 = 0
            dataind1 = None

        if cdelt_sign == -1:
            log.debug("Reversing indices from {0} {1} to ".format(ind0, ind1))
            ind1, ind0 = (nchans_total[spwnum] - ind0,
                          nchans_total[spwnum] - ind1)
            log.debug("{0} {1}".format(ind0, ind1))
            if ind0 < 0:
                ind0 = 0

        log.info("inds have been remapped to {0}, {1}".format(ind0, ind1))

        plane = hdul[0].data[ind0]
        if np.all(plane == 0) or overwrite_existing:
            log.info("Replacing indices {0}->{2} {1}".format(
                getinds(fn), fn, (ind0, ind1)))

            data = fits.getdata(fn)
            dwcs = wcs.WCS(fits.getheader(fn)).sub([wcs.WCSSUB_SPECTRAL])

            dataind1 = data.shape[0] + (dataind1 or 0)

            # handle the case where I made the indices NOT match the cube...
            # this is really stupid and should be removed because I should have
            # made the input cubes correct.  Oh well.
            if np.abs(ind1 - ind0) < np.abs(dataind1 - dataind0):
                dataind1 = dataind0 + np.abs(ind1 - ind0)

            if cdelt_sign == -1:
                dataind0, dataind1 = dataind1, dataind0
                dwcs0 = dwcs.wcs_pix2world([dataind0 - 1], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1], 0)[0][0]
            else:
                dwcs0 = dwcs.wcs_pix2world([dataind0], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1 - 1], 0)[0][0]
            hwcs0 = main_wcs.wcs_pix2world([ind0], 0)[0][0]
            hwcs1 = main_wcs.wcs_pix2world([ind1 - 1], 0)[0][0]

            if not np.isclose(hwcs0, dwcs0, atol=0.5 * np.abs(cdelt), rtol=0):
                log.error(
                    "current data, big cube indices: {0},{1} and {2},{3}".
                    format(dataind0, dataind1, ind0, ind1))
                raise ValueError(
                    "World coordinates of first pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                    .format(dwcs0, hwcs0, dwcs0 - hwcs0,
                            (dwcs0 - hwcs0) / cdelt))
            if not np.isclose(hwcs1, dwcs1, atol=0.5 * np.abs(cdelt), rtol=0):
                log.error(
                    "current data, big cube indices: {0},{1} and {2},{3}".
                    format(dataind0, dataind1, ind0, ind1))
                raise ValueError(
                    "World coordinates of last pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                    .format(dwcs1, hwcs1, dwcs1 - hwcs1,
                            (dwcs1 - hwcs1) / cdelt))

            if 'slices' not in locals():
                if minimize:
                    log.info("Determining slices")
                    cube0 = SpectralCube.read(header_fn)
                    slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                            spatial_only=True)
                    log.info("Slices are {0}".format(slices))
                else:
                    slices = (slice(None), ) * 3

            if bmaj_limits is not None:
                log.info("Identifying acceptable beams")
                beamtable = fits.open(fn)[1]
                ok_beam = ((beamtable.data['BMAJ'] > bmaj_limits[0]) &
                           (beamtable.data['BMAJ'] < bmaj_limits[1]))
                data[~ok_beam] = np.nan
                log.info("Found {0} bad beams of {1}".format((~ok_beam).sum(),
                                                             ok_beam.size))

            if cdelt_sign == -1:
                if dataind1 == 0:
                    dataslice = slice(dataind0 - 1, None, -1)
                elif dataind1 >= 1:
                    dataslice = slice(dataind0 - 1, dataind1 - 1, -1)
                else:
                    raise ValueError("Something is wrong with dataind0")
            else:
                dataslice = slice(dataind0, dataind1, 1)
            log.info("Dataslice is {0}".format(dataslice))

            assert hdul[0].data[ind0:ind1].shape == data[dataslice, slices[1],
                                                         slices[2]].shape

            if not debug_mode:
                if add_beam_info:
                    log.info("Adding beam information")
                    beamtable = fits.open(fn)[1]
                    hdul[1].data[ind0:ind1] = beamtable.data[dataslice]

                log.info("Inserting data")
                hdul[0].data[ind0:ind1, :, :] = data[dataslice, slices[1],
                                                     slices[2]]
                log.info("Flushing")
                hdul.flush()
                log.info("Done with iteration for {0}".format(fn))

    if debug_mode:
        log.setLevel(lvl)
def argparsing():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-f', '--file', dest='input_paths',
                        default=[get_path('gamma_test.simtel.gz')], nargs='*',
                        help='path to the input files to be combined for a '
                             'single charge resolution.')
    parser.add_argument('-O', '--origin', dest='origin', action='store',
                        choices=InputFile.origin_list(),
                        default='hessio', help='origin of the file')
    parser.add_argument('-t', '--telescope', dest='tel', action='store',
                        type=int, default=None, nargs='*',
                        help='list of telecopes to be included. '
                             'Default = All')
    parser.add_argument('-o', '--output', dest='output_path', action='store',
                        default=None,
                        help='path to store a pdf output of the plots. '
                             'default = display on screen instead')
    parser.add_argument('--comparison', dest='comparison', action='store',
                        default=None,
                        help='output path for a True Charge versus Measured'
                             'Charge graph. Default = do not plot graph')
    parser.add_argument('-M', '--maxpe', dest='maxpe', action='store',
                        default=None, type=float,
                        help='maximum pe to calculate the charge resolution'
                             ' up to. Default = maximum pe in file')
    parser.add_argument('--maxpeplot', dest='maxpeplot', action='store',
                        default=None, type=float,
                        help='maximum pe to plot up to. Default = maxpe')
    parser.add_argument('-B', '--binning', dest='binning', action='store',
                        default="log", choices=['none', 'normal', 'log'],
                        help='binning of the charge resoltion graph: '
                             '"none" = no binning, "normal" = bin, '
                             '"log" = bin in log space.')
    parser.add_argument('--normalx', dest='normalx', action='store_true',
                        default=False,
                        help='Use a normal x axis instead of the defualt log'
                             'axis')
    parser.add_argument('--normaly', dest='normaly', action='store_true',
                        default=False,
                        help='Use a normal y axis instead of the defualt log'
                             'axis')
    parser.add_argument('-E', '--example', dest='example', action='store_true',
                        default=False,
                        help='print an example runcard')
    parser.add_argument('-R', '--runcard', dest='runcard', action='store',
                        default=None,
                        help='path to a runcard text file with arguments that '
                             'override command line arguments. This run card '
                             'can allow complex combinations of files and '
                             'calibrations to compare the charge resolution '
                             'against each other.')
    parser.add_argument('--chargeres-names', dest='chargeres_names',
                        default=['default'], nargs='*',
                        help='chargres calculation to include in plot. '
                             'Only used for runcards.')
    parser.add_argument('--calib-help', dest='calib_help', action='store_true',
                        default=False,
                        help='display the arguments used for the camera '
                             'calibration')

    logger_detail = parser.add_mutually_exclusive_group()
    logger_detail.add_argument('-q', '--quiet', dest='quiet',
                               action='store_true', default=False,
                               help='Quiet mode')
    logger_detail.add_argument('-v', '--verbose', dest='verbose',
                               action='store_true', default=False,
                               help='Verbose mode')
    logger_detail.add_argument('-d', '--debug', dest='debug',
                               action='store_true', default=False,
                               help='Debug mode')

    args, excess_args = parser.parse_known_args()

    if args.quiet:
        log.setLevel(40)
    if args.verbose:
        log.setLevel(20)
    if args.debug:
        log.setLevel(10)

    if args.calib_help:
        params, unknown_args = calibration_parameters(excess_args,
                                                      args.origin,
                                                      args.calib_help)

    if args.example:
        print("""
# Each charge resolution block starts with [chargeres] and the names for
# this charge resolution.
# The options in each block are equivalent to the scripts help message.
# Options that seem to apply to plotting will only have effect in a
# plotting block.

[chargeres] test_file_local
#-f gamma_test.simtel.gz
-f ~/Software/outputs/sim_telarray/simtel_run4_gcts_hnsb.gz
-O hessio
--integrator 4
--integration-window 7 3
--integration-sigamp 2 4
--integration-lwt 0
--integration-calib_scale 1.05
--comparison ~/Downloads/test_file_local.pdf


# A second charge resolution block to also calculate the resolution with
# a different integrator so the two resolutions can be plotted against
# each other.

[chargeres] test_file_nei
#-f gamma_test.simtel.gz
-f ~/Software/outputs/sim_telarray/simtel_run4_gcts_hnsb.gz
-O hessio
--integrator 5
--integration-window 7 3
--integration-sigamp 2 4
--integration-lwt 0
--integration-calib_scale 1.05
--comparison ~/Downloads/test_file_nei.pdf

# A plotting block configures an output plot

[plot] normal_plot
--chargeres-names test_file_local test_file_nei
-o ~/Downloads/normal_plot.pdf
--binning normal
--normalx
--normaly

[plot] log_plot
--chargeres-names test_file_local test_file_nei
-o ~/Downloads/log_plot.pdf""")
        exit()

    chargeres_cmdlines = {}
    plot_cmdlines = {}

    if args.runcard is None:
        name = args.chargeres_names[0]
        chargeres_cmdlines[name] = sys.argv[1:]
        plot_cmdlines[name] = sys.argv[1:]
        chargeres_args = {name: args}
        plot_args = {name: args}
    else:
        chargeres_args = {}
        plot_args = {}
        current = None
        runcard = open(args.runcard)
        for line in runcard:
            if line.strip() and not line.startswith('#'):
                argument = line.split()[0]
                if argument == '[chargeres]':
                    name = line.split()[1:][0]
                    chargeres_cmdlines[name] = []
                    current = chargeres_cmdlines[name]
                    continue
                elif argument == '[plot]':
                    name = line.split()[1:][0]
                    plot_cmdlines[name] = []
                    current = plot_cmdlines[name]
                    continue
                current.extend(line.split())

        # Temp fill for checks
        for name, cmdline in chargeres_cmdlines.items():
            chargeres_args[name], unknown = parser.parse_known_args(cmdline)
        for name, cmdline in plot_cmdlines.items():
            plot_args[name], unknown = parser.parse_known_args(cmdline)

    # Check all chargeres_names exist
    for plot_name, args in plot_args.items():
        for name in args.chargeres_names:
            try:
                if name not in chargeres_args:
                    raise IndexError
            except IndexError:
                log.exception("[chargeres_names] For plot: {}, no chargeres "
                              "has the name: {}".format(plot_name, name))
                raise 

    return parser, chargeres_cmdlines, plot_cmdlines
import os
import sys
import time
import multiprocessing
from collections import OrderedDict
try:
    from urllib.request import urlopen
except ImportError:
    from urllib2 import urlopen

from astropy import log
from astropy.io import fits
from astropy.utils.console import ProgressBar


log.setLevel("INFO")

# Configuration constants
# Local directory containing a mirror of MAST TPF files:
DATA_STORE = "/media/gb/kdata/k2/target_pixel_files"
TMPDIR = "/tmp/"   # Location to download temporary files from MAST if needed
MAX_ATTEMPTS = 50  # How many times do we try to obtain & open a file?
SLEEP_BETWEEN_ATTEMPTS = 30  # seconds
IGNORE_SHORT_CADENCE = False


class TargetPixelFile(object):
    """Represent a Target Pixel File (TPF) as obtained from the MAST archive.

    Parameters
    ----------
Beispiel #45
0
# Standard library
from os import path, walk, remove, makedirs, sep
import re

# Third-party
from astropy import log as logger
logger.setLevel('INFO')

from nbconvert.preprocessors import ExecutePreprocessor, CellExecutionError
from nbconvert.exporters import RSTExporter
from nbconvert.writers import FilesWriter
import nbformat

IPYTHON_VERSION = 4

def clean_keyword(kw):
    """Given a keyword parsed from the header of one of the tutorials, return
    a 'cleaned' keyword that can be used by the filtering machinery.

    - Replaces spaces with capital letters
    - Removes . / and space
    """
    return kw.strip().title().replace('.', '').replace('/', '').replace(' ', '')

class NBTutorialsConverter(object):

    def __init__(self, nb_path, output_path=None, template_file=None,
                 overwrite=False, kernel_name=None):
        self.nb_path = path.abspath(nb_path)
        fn = path.basename(self.nb_path)
        self.path_only = path.dirname(self.nb_path)
    def run(self, **kwargs):
        args = self._parse_args()

        for k,v in kwargs.items():
            if hasattr(args, k):
                # overwrite with kwarg value
                setattr(args, k, v)

        np.random.seed(args.seed)

        if args.config_filename is None:
            raise ValueError("You must define 'config_filename.'")

        # Set logger level based on verbose flags
        if args.verbose:
            logger.setLevel(logging.DEBUG)
        elif args.quiet:
            logger.setLevel(logging.ERROR)
        else:
            logger.setLevel(logging.INFO)

        # if MPI, use load balancing
        if args.mpi:
            kwargs = dict(loadbalance=True)
        else:
            kwargs = dict()

        # get a pool object for multiprocessing / MPI
        pool = get_pool(mpi=args.mpi, **kwargs)
        if args.mpi:
            logger.info("|----------- Using MPI -----------|")
        else:
            logger.info("|----------- Running in serial -----------|")

        if args.index is None:
            index = None
        else:
            try:
                index = slice(*map(int, args.index.split(":")))
            except:
                try:
                    index = np.array(map(int,args.index.split(",")))
                except:
                    index = None

        # Instantiate the experiment class
        with self.ExperimentClass.from_config(cache_path=args.path,
                                              config_filename=args.config_filename,
                                              overwrite=args.overwrite) as experiment:
            experiment._ensure_cache_exists()

            if index is None:
                indices = np.arange(experiment.norbits, dtype=int)
            else:
                indices = np.arange(experiment.norbits, dtype=int)[index]

            try:
                pool.map(experiment, indices, callback=experiment.callback)
            except:
                pool.close()
                logger.error("Unexpected error!")
                raise
            else:
                pool.close()
Beispiel #47
0
import timeit
import numpy as np
import textwrap
import warnings
warnings.filterwarnings('ignore')
from astropy import log
log.setLevel(1000)

# Check correctness before doing timing tests
import pyradex
py_pop = [pyradex.pyradex(collider_densities={'oH2':900,'pH2':100},column=n, temperature=20)['pop_up'][0]
          for n in 10**np.arange(12,18)]

R = pyradex.Radex(collider_densities={'oH2':900,'pH2':100}, column=1e15, temperature=20)

R_noreload_pop = []
for n in 10**np.arange(12,18):
    R.column = n
    R.run_radex(reload_molfile=False, validate_colliders=False)
    R_noreload_pop.append(R.level_population[1])

R_pop = []
for n in 10**np.arange(12,18):
    R.column = n
    R.run_radex(reload_molfile=True, validate_colliders=True)
    R_pop.append(R.level_population[1])

R_reuse_pop = []
for n in 10**np.arange(12,18):
    R.column = n
    R.run_radex(reload_molfile=False, validate_colliders=False, reuse_last=True)
Beispiel #48
0

def bandmerge(clusterview):
    """Band-merge all fields."""
    util.setup_dir(MYDESTINATION)

    # Spread the work across the cluster
    field_ids = IPHASQC.field('id')
    results = clusterview.imap(bandmerge_one, field_ids)

    # Print a friendly message once in a while
    i = 0
    for status in results:
        i += 1
        if (i % 1000) == 0:
            log.info('Completed field {0}/{1}'.format(i, len(field_ids)))
    log.info('Bandmerging finished')



################################
# MAIN EXECUTION (FOR DEBUGGING)
################################

if __name__ == '__main__':
    if constants.DEBUGMODE:
        log.setLevel('INFO')
        #run_all(lon1=lon1, lon2=lon2, ncores=4)
        #bandmerge_one('5089o_jun2005')
        #bandmerge_one('3561_nov2003')
Beispiel #49
0
def combine1fiber(inloglam, objflux, newloglam, objivar=None, verbose=False,
                  **kwargs):
    """Combine several spectra of the same object, or resample a single spectrum.

    Parameters
    ----------
    inloglam : :class:`numpy.ndarray`
        Vector of log wavelength.
    objflux : :class:`numpy.ndarray`
        Input flux.
    newloglam : :class:`numpy.ndarray`
        Output wavelength pixels, vector of log wavelength.
    objivar : :class:`numpy.ndarray`, optional
        Inverse variance of the flux.
    verbose : :class:`bool`, optional
        If ``True``, set log level to DEBUG.

    Returns
    -------
    :func:`tuple` of :class:`numpy.ndarray`
        The resulting flux and inverse variance.

    Raises
    ------
    :exc:`ValueError`
        If input dimensions don't match.
    """
    #
    # Log
    #
    # log.enable_warnings_logging()
    if verbose:
        log.setLevel('DEBUG')
    #
    # Check that dimensions of inputs are valid.
    #
    npix = inloglam.size
    nfinalpix = len(newloglam)
    if objflux.shape != inloglam.shape:
        raise ValueError('Dimensions of inloglam and objflux do not agree.')
    if objivar is not None:
        if objivar.shape != inloglam.shape:
            raise ValueError('Dimensions of inloglam and objivar do not agree.')
    if 'finalmask' in kwargs:
        if kwargs['finalmask'].shape != inloglam.shape:
            raise ValueError('Dimensions of inloglam and finalmask do not agree.')
    if 'indisp' in kwargs:
        if kwargs['indisp'].shape != inloglam.shape:
            raise ValueError('Dimensions of inloglam and indisp do not agree.')
    #
    # Set defaults
    #
    EPS = np.finfo(np.float32).eps
    if 'binsz' in kwargs:
        binsz = kwargs['binsz']
    else:
        if inloglam.ndim == 2:
            binsz = inloglam[0, 1] - inloglam[0, 0]
        else:
            binsz = inloglam[1] - inloglam[0]
    if 'nord' in kwargs:
        nord = kwargs['nord']
    else:
        nord = 3
    if 'bkptbin' in kwargs:
        bkptbin = kwargs['bkptbin']
    else:
        bkptbin = 1.2 * binsz
    if 'maxsep' in kwargs:
        maxsep = kwargs['maxsep']
    else:
        maxsep = 2.0 * binsz
    if inloglam.ndim == 1:
        #
        # Set specnum = 0 for all elements
        #
        nspec = 1
        specnum = np.zeros(inloglam.shape, dtype=inloglam.dtype)
    else:
        nspec, ncol = inloglam.shape
        specnum = np.tile(np.arange(nspec), ncol).reshape(ncol, nspec).transpose()
    #
    # Use fullcombmask for modifying the pixel masks in the original input files.
    #
    fullcombmask = np.zeros(npix)
    newflux = np.zeros(nfinalpix, dtype=inloglam.dtype)
    newmask = np.zeros(nfinalpix, dtype='i4')
    newivar = np.zeros(nfinalpix, dtype=inloglam.dtype)
    newdisp = np.zeros(nfinalpix, dtype=inloglam.dtype)
    newsky = np.zeros(nfinalpix, dtype=inloglam.dtype)
    newdispweight = np.zeros(nfinalpix, dtype=inloglam.dtype)
    if objivar is None:
        nonzero = np.arange(npix, dtype='i4')
        ngood = npix
    else:
        nonzero = (objivar.ravel() > 0).nonzero()[0]
        ngood = nonzero.size
    #
    # ormask is needed to create andmask
    #
    andmask = np.zeros(nfinalpix, dtype='i4')
    ormask = np.zeros(nfinalpix, dtype='i4')
    if ngood == 0:
        #
        # In this case of no good points, set the nodata bit everywhere.
        # Also if noplug is set in the first input bit-mask, assume it
        # should be set everywhere in the output bit masks.  No other bits
        # are set.
        #
        warn('No good points!', Pydlspec2dUserWarning)
        bitval = sdss_flagval('SPPIXMASK', 'NODATA')
        if 'finalmask' in kwargs:
            bitval |= (sdss_flagval('SPPIXMASK', 'NOPLUG') *
                       (finalmask[0] & sdss_flagval('SPPIXMASK', 'NODATA')))
        andmask = andmask | bitval
        ormask = ormask | bitval
        return (newflux, newivar)
    else:
        #
        # Now let's break sorted wavelengths into groups where pixel
        # separations are larger than maxsep.
        #
        inloglam_r = inloglam.ravel()
        isort = nonzero[inloglam_r[nonzero].argsort()]
        wavesort = inloglam_r[isort]
        padwave = np.insert(wavesort, 0, wavesort.min() - 2.0*maxsep)
        padwave = np.append(padwave, wavesort.max() + 2.0*maxsep)
        ig1 = ((padwave[1:ngood+1]-padwave[0:ngood]) > maxsep).nonzero()[0]
        ig2 = ((padwave[2:ngood+2]-padwave[1:ngood+1]) > maxsep).nonzero()[0]
        if ig1.size != ig2.size:
            raise ValueError('Grouping tricks did not work!')
        #
        # Avoid flux-dependent bias when combining multiple spectra.
        # This call to djs_median contains a width that is both floating-point
        # and even, which is very strange.
        #
        if objivar is not None and objivar.ndim > 1:
            saved_objivar = objivar
            for spec in range(nspec):
                igood = (objivar[spec, :] > 0).nonzero()[0]
                if igood.size > 0:
                    # objivar[spec, igood] = djs_median(saved_objivar[spec, igood], width=100.)
                    objivar[spec, igood] = djs_median(saved_objivar[spec, igood], width=101)
        else:
            saved_objivar = None
        for igrp in range(ig1.size):
            ss = isort[ig1[igrp]:ig2[igrp]+1]
            if ss.size > 2:
                if objivar is None:
                    #
                    # Fit without variance
                    #
                    sset, bmask = iterfit(inloglam_r[ss],
                                          objflux.ravel()[ss],
                                          nord=nord, groupbadpix=True,
                                          requiren=1, bkspace=bkptbin,
                                          silent=True)
                else:
                    #
                    # Fit with variance
                    #
                    sset, bmask = iterfit(inloglam_r[ss],
                                          objflux.ravel()[ss],
                                          invvar=objivar.ravel()[ss],
                                          nord=nord, groupbadpix=True,
                                          requiren=1, bkspace=bkptbin,
                                          silent=True)
                if np.sum(np.absolute(sset.coeff)) == 0:
                    sset = None
                    bmask = np.zeros(len(ss))
                    warn('All B-spline coefficients have been set to zero!',
                         Pydlspec2dUserWarning)
            else:
                bmask = np.zeros(len(ss))
                sset = None
                warn('Not enough data for B-spline fit!', Pydlspec2dUserWarning)
            inside = ((newloglam >= (inloglam_r[ss]).min()-EPS) &
                      (newloglam <= (inloglam_r[ss]).max()+EPS)).nonzero()[0]
            #
            # It is possible for numinside to be zero, if the input data points
            # span an extremely small wavelength range, within which there are
            # no output wavelengths.
            #
            if sset is not None and len(inside) > 0:
                newflux[inside], bvalumask = sset.value(newloglam[inside])
                if bvalumask.any():
                    newmask[inside[bvalumask]] = 1
                log.debug('Masked {0:d} of {1:d} pixels.'.format((1-bmask).sum(), bmask.size))
                #
                # Determine which pixels should be masked based upon the spline
                # fit. Set the combinerej bit.
                #
                ireplace = ~bmask
                if ireplace.any():
                    #
                    # The following would replace the original flux values of
                    # masked pixels with b-spline evaluations.
                    #
                    # objflux[ss[ireplace]] = sset.value(inloglam[ss[ireplace]])
                    #
                    # Set the inverse variance of these pixels to zero.
                    #
                    if objivar is not None:
                        objivar.ravel()[ss[ireplace]] = 0.0
                        log.debug('Replaced {0:d} pixels in objivar.'.format(len(ss[ireplace])))
                    if 'finalmask' in kwargs:
                        finalmask[ss[ireplace]] = (finalmask[ss[ireplace]] |
                                                   sdss_flagval('SPPIXMASK',
                                                   'COMBINEREJ'))
            fullcombmask[ss] = bmask
        #
        # Restore objivar
        #
        if saved_objivar is not None:
            objivar = saved_objivar * (objivar > 0)
        #
        # Combine inverse variance and pixel masks.
        #
        # Start with all bits set in andmask
        #
        andmask[:] = -1
        for j in range(int(specnum.max())+1):
            these = (specnum.ravel() == j).nonzero()[0]
            if these.any():
                inbetween = ((newloglam >= inloglam_r[these].min()) &
                             (newloglam <= inloglam_r[these].max()))
                if inbetween.any():
                    jnbetween = inbetween.nonzero()[0]
                    #
                    # Conserve inverse variance by doing a linear interpolation
                    # on that quantity.
                    #
                    result = np.interp(newloglam[jnbetween], inloglam_r[these],
                                       (objivar.ravel()[these] *
                                        fullcombmask[these]))
                    #
                    # Grow the fullcombmask below to reject any new sampling
                    # containing even a partial masked pixel.
                    #
                    smask = np.interp(newloglam[jnbetween], inloglam_r[these],
                                      fullcombmask[these].astype(inloglam.dtype))
                    result *= smask >= (1.0 - EPS)
                    newivar[jnbetween] += result*newmask[jnbetween]
                lowside = np.floor((inloglam_r[these]-newloglam[0])/binsz).astype('i4')
                highside = lowside + 1
                if 'finalmask' in kwargs:
                    andmask[lowside] &= finalmask[these]
                    andmask[highside] &= finalmask[these]
                    ormask[lowside] |= finalmask[these]
                    ormask[highside] |= finalmask[these]
                #
                # Combine the dispersions + skies in the dumbest way possible
                # [sic].
                #
                if 'indisp' in kwargs:
                    newdispweight[jnbetween] += result
                    newdisp[jnbetween] += (result *
                                           np.interp(newloglam[jnbetween],
                                                     inloglam_r[these],
                                                     indisp.ravel()[these]))
                    newsky[jnbetween] += (result *
                                          np.interp(newloglam[jnbetween],
                                                    inloglam_r[these],
                                                    skyflux.ravel()[these]))
        if 'indisp' in kwargs:
            newdisp /= newdispweight + (newdispweight == 0)
            newsky /= newdispweight + (newdispweight == 0)
    #
    # Grow regions where 3 or more pixels are rejected together ???
    #
    foo = smooth(newivar, 3)
    badregion = np.absolute(foo) < EPS
    # badregion = foo == 0.0
    if badregion.any():
        warn('Growing bad pixel region, {0:d} pixels found.'.format(badregion.sum()),
             Pydlspec2dUserWarning)
        ibad = badregion.nonzero()[0]
        lowerregion = np.where(ibad-2 < 0, 0, ibad-2)
        upperregion = np.where(ibad+2 > nfinalpix-1, nfinalpix-1, ibad+2)
        newivar[lowerregion] = 0.0
        newivar[upperregion] = 0.0
    #
    # Replace NaNs in combined spectra; this should really never happen.
    #
    inff = ((~np.isfinite(newflux)) | (~np.isfinite(newivar)))
    if inff.any():
        warn('{0:d} NaNs in combined spectra.'.format(inff.sum()),
             Pydlspec2dUserWarning)
        newflux[inff] = 0.0
        newivar[inff] = 0.0
    #
    # Interpolate over masked pixels, just for aesthetic purposoes.
    #
    goodpts = newivar > 0
    if 'aesthetics' in kwargs:
        amethod = kwargs['aesthetics']
    else:
        amethod = 'traditional'
    newflux = aesthetics(newflux, newivar, method=amethod)
    # if 'interpolate' in kwargs:
    #     newflux = pydlutils.image.djs_maskinterp(newflux,~goodpts,const=True)
    # else:
    #     newflux[~goodpts] = newflux[goodpts].mean()
    if goodpts.any():
        minglam = newloglam[goodpts].min()
        maxglam = newloglam[goodpts].max()
        ibad = ((newloglam < minglam) | (newloglam > maxglam))
        if ibad.any():
            ormask[ibad] |= sdss_flagval('SPPIXMASK', 'NODATA')
            andmask[ibad] |= sdss_flagval('SPPIXMASK', 'NODATA')
    #
    # Replace values of -1 in the andmask with 0.
    #
    andmask *= (andmask != -1)
    return (newflux, newivar)
from __future__ import print_function
from reproject.reproject import reproject as pyreproject
from FITS_tools.hcongrid import hcongrid_hdu, wcsalign
from montage_wrapper import reproject_hdu as montage

import warnings
warnings.filterwarnings('ignore')

# should be relative imports, but I want to use this with %run...
import headers
import data
import tempfile

from astropy.io import fits
from astropy import log
log.setLevel("WARN")
#log.disable_warnings_logging()
#log.disable_exception_logging()

import numpy as np

import timeit

reproject_cmds = ('pyreproject','montage_hdu','hcongrid_hdu','wcsalign')

def montage_hdu(hdu_in, hdr_out):
    hdr = tempfile.NamedTemporaryFile()
    hdr_out.totextfile(hdr.name)
    mon = montage(hdu_in, header=hdr.name, silence=True)
    hdr.close()
    with open(cache_file, 'w') as f:
        pickle.dump(result, f)

    return

def main(mpi=False):
    pool = get_pool(mpi=mpi)

    # regular orbit, so variance should be small, but appears large due to aliasing...
    tasks = []
    for p in [1,2,3,4]:
        for window_width in 2**np.arange(5,8+1,1):
            for nsteps_per_period in 2**np.arange(8, 14+1, 2):
                tasks.append((p,window_width,nsteps_per_period))

    pool.map(worker, tasks)
    pool.close()
    sys.exit(0)


if __name__ == "__main__":
    from argparse import ArgumentParser
    import logging
    parser = ArgumentParser()
    parser.add_argument("--mpi", dest="mpi", default=False, action="store_true",
                        help="Use an MPI pool.")
    args = parser.parse_args()

    logger.setLevel(logging.INFO)
    main(args.mpi)
from astropy.table import Table, Column
from astropy import units as u
from astropy import coordinates
import powerlaw
import pylab as pl
from astropy.io import fits
from astropy import wcs
from matplotlib.patches import Circle
import matplotlib
import os
from astropy import log
import contextlib

devnull = open(os.devnull, 'w')

log.setLevel('CRITICAL')

pl.style.use('classic')
pl.matplotlib.rc_file('pubfiguresrc')
pl.mpl.rcParams['font.size'] = 14.0
pl.mpl.rcParams['axes.titlesize'] = 16.0
pl.mpl.rcParams['axes.labelsize'] = 15.0
pl.mpl.rcParams['axes.prop_cycle'] = matplotlib.cycler(
    'color', ('338ADD', '9A44B6', 'A60628', '467821', 'CF4457', '188487',
              'E24A33', 'b', 'r', 'g', 'm', 'k'))

core_velo_tbl = Table.read(paths.tpath("core_velocities.ipac"),
                           format="ascii.ipac")
core_phot_tbl = Table.read(paths.tpath("continuum_photometry.ipac"),
                           format='ascii.ipac')
cores_merge = Table.read(paths.tpath('core_continuum_and_line.ipac'),
Beispiel #53
0
    group = parser.add_mutually_exclusive_group()
    group.add_argument("--procs", dest="n_procs", default=1,
                       type=int, help="Number of processes.")
    group.add_argument("--mpi", dest="mpi", default=False,
                       action="store_true", help="Run with MPI.")

    parser.add_argument("--id", dest="apogee_id", default=None, required=True,
                        type=str, help="APOGEE ID")
    parser.add_argument("-n", "--num-samples", dest="n_samples", default=2**20,
                        type=str, help="Number of prior samples.")

    args = parser.parse_args()

    # Set logger level based on verbose flags
    if args.verbose:
        logger.setLevel(logging.DEBUG)
    elif args.quiet:
        logger.setLevel(logging.ERROR)
    else:
        logger.setLevel(logging.INFO)

    np.random.seed(args.seed)
    if args.mpi:
        logger.info("Running with MPI")
        _kwargs = {'pool': 'MPIPool'}
    elif args.n_procs != 0:
        logger.info("Running with multiprocessing on {} cores".format(args.n_procs))
        _kwargs = {'pool': 'MultiPool', 'processes': args.n_procs}
    else:
        logger.info("Running serial")
        _kwargs = {'pool': 'SerialPool'}
Beispiel #54
0
    parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
                        default=False, help="Be chatty! (default = False)")
    parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
                        default=False, help="Be quiet! (default = False)")
    parser.add_argument("-n", "--nameregex", default=None,
                        help="A regular expression to select the names of the "
                             "notebooks to be processed.  If not given, all "
                             "notebooks will be used.")

    parser.add_argument('action', nargs='+', choices=['run', 'convert'],
                        help='The action(s) to take when running the script. '
                             '"run" means to just run the notebooks, while '
                             '"convert" will use nbconvert to turn them to '
                             'convert them to HTML.')

    args = parser.parse_args()

    # Set logger level based on verbose flags
    if args.verbose:
        logger.setLevel('DEBUG')
    elif args.quiet:
        logger.setLevel('ERROR')
    else:
        logger.setLevel('INFO')

    for action in args.action:
        if action == 'run':
            run_notebooks(args.nameregex)
        elif action == 'convert':
            convert_notebooks(args.nameregex)
Beispiel #55
0
#! /usr/bin/env python
import argparse
from ccdproc import CCDData
from joblib import Parallel, delayed
import os.path
from astropy.time import Time
from astropy import log
log.setLevel('WARNING') # CCDData throws too much info at you
try:
    from astroscrappy import detect_cosmics
    from ccdproc import cosmicray_lacosmic
    CLNMTHD = 'ccdproc.cosmicray_lacosmic'
except ImportError:
    from cosmics_wrapper import cosmicray_lacosmic
    CLNMTHD = 'cosmics.cosmicray_lacosmic'
    

def main():
    parser = argparse.ArgumentParser(description='Perform LACosmic cleaning of images')
    parser.add_argument('filenames',nargs='+',help='List of files to clean.')
    parser.add_argument('-odir',metavar='outdir',required=True,type=str,help='Output directory for files.')
    #parser.add_argument('-mode',choices=['lacosmic','median'],default='lacosmic',help='Specify mode of operation (default=lacosmic)')
    parser.add_argument('-sclip',metavar='sigclip',type=float,default=5,help='Laplacian-to-noise limit for cosmic ray detection. Lower values will flag more pixels as cosmic rays (default=5).')
    parser.add_argument('-sfrac',metavar='sigfrac',type=float,default=0.3,help='Fractional detection limit for neighboring pixels. For cosmic ray neighbor pixels, a Laplacian-to-noise detection limit of sigfrac * sigclip will be used. (default=0.3).')
    parser.add_argument('-objlim',type=float,default=5,help='Minimum contrast between Laplacian image and the fine structure image. Increase this value if cores of bright stars are flagged as cosmic rays (default=5).')
    parser.add_argument('-satlevel',type=float,default=65535,help='Saturation level of the image (electrons). This value is used to detect saturated stars and pixels at or above this level are added to the mask (default=65535)')
    parser.add_argument('-niter',type=int,default=5,help='umber of iterations of the LA Cosmic algorithm to perform (default=5).')
    #parser.add_argument('-thresh',metavar='threshold',type=float,default=5,help='Threshold for detecting cosmic rays [median] (default=5).')
    #parser.add_argument('-mbox',type=float,default=11,help='Median box for detecting cosmic rays [mbox] (default=11).')
    parser.add_argument('-njobs',type=int,default=1,help='Process images in parallel. "-1" is all CPUs (default=1).')
    parser.add_argument('--c',action='store_true',help='Clobber (overwrite) on output')
Beispiel #56
0
#print(c.ND_params)
##[[-3.32707525e-01 -3.17529953e-01]
## [ 1.23915169e+03  1.40014752e+03]]
#

#oc = CorObsData(fname, default_ND_params=RUN_LEVEL_DEFAULT_ND_PARAMS)
#print(c.ND_params - oc.ND_params)
#print(c.ND_angle - oc.ND_angle)
#print(c.desired_center - oc.desired_center)
#print(c.obj_center - oc.obj_center)
#print(c.obj_center, c.desired_center)
#c.write('/tmp/test.fits', overwrite=True)

if __name__ == '__main__':
    from IoIO_working_corobsdata.IoIO import CorObsData
    log.setLevel('DEBUG')
    old_default_ND_params \
        = [[  3.63686271e-01,   3.68675375e-01],
           [  1.28303305e+03,   1.39479846e+03]]
    fname = '/data/io/IoIO/raw/2021-04_Astrometry/Jupiter_ND_centered.fit'
    #fname = '/data/io/IoIO/raw/2021-04-25/Sky_Flat-0001_R.fit'
    c = CorData.read(fname)  #, plot_ND_edges=True)
    oc = CorObsData(fname, default_ND_params=RUN_LEVEL_DEFAULT_ND_PARAMS)
    print(c.ND_params - oc.ND_params)
    print(c.ND_angle - oc.ND_angle)
    print(c.desired_center - oc.desired_center)
    print(c.obj_center - oc.obj_center)
    print(c.obj_center, c.desired_center)
    c.write('/tmp/test.fits', overwrite=True)

    #log.setLevel('DEBUG')
Beispiel #57
0
import numpy as np
from astropy import log as logger
from scipy.linalg import solve
import pytest

# Project
from ...integrate import DOPRI853Integrator
from ...potential import (IsochronePotential, HarmonicOscillatorPotential,
                          LeeSutoTriaxialNFWPotential)
from ...units import galactic
from ..actionangle import *
from ..core import *
from ..plot import *
from .helpers import *

logger.setLevel(logging.ERROR)

def test_generate_n_vectors():
    # test against Sanders' method
    nvecs = generate_n_vectors(N_max=6, dx=2, dy=2, dz=2)
    nvecs_sanders = sanders_nvecs(N_max=6, dx=2, dy=2, dz=2)
    assert np.all(nvecs == nvecs_sanders)

    nvecs = generate_n_vectors(N_max=6, dx=1, dy=1, dz=1)
    nvecs_sanders = sanders_nvecs(N_max=6, dx=1, dy=1, dz=1)
    assert np.all(nvecs == nvecs_sanders)

def test_fit_isochrone():
    # integrate orbit in Isochrone potential, then try to recover it
    true_m = 2.81E11
    true_b = 11.