예제 #1
0
def test_row_tuple_column_slice():
    """
    Test getting and setting a row using a tuple or list of column names
    """
    t = table.QTable([[1, 2, 3] * u.m,
                      [10., 20., 30.],
                      [100., 200., 300.],
                      ['x', 'y', 'z']], names=['a', 'b', 'c', 'd'])
    # Get a row for index=1
    r1 = t[1]
    # Column slice with tuple of col names
    r1_abc = r1['a', 'b', 'c']  # Row object for these cols
    r1_abc_repr = ['<Row index=1>',
                   '   a       b       c   ',
                   '   m                   ',
                   'float64 float64 float64',
                   '------- ------- -------',
                   '    2.0    20.0   200.0']
    assert repr(r1_abc).splitlines() == r1_abc_repr

    # Column slice with list of col names
    r1_abc = r1[['a', 'b', 'c']]
    assert repr(r1_abc).splitlines() == r1_abc_repr

    # Make sure setting on a tuple or slice updates parent table and row
    r1['c'] = 1000
    r1['a', 'b'] = 1000 * u.cm, 100.
    assert r1['a'] == 10 * u.m
    assert r1['b'] == 100
    assert t['a'][1] == 10 * u.m
    assert t['b'][1] == 100.
    assert t['c'][1] == 1000

    # Same but using a list of column names instead of tuple
    r1[['a', 'b']] = 2000 * u.cm, 200.
    assert r1['a'] == 20 * u.m
    assert r1['b'] == 200
    assert t['a'][1] == 20 * u.m
    assert t['b'][1] == 200.

    # Set column slice of column slice
    r1_abc['a', 'c'] = -1 * u.m, -10
    assert t['a'][1] == -1 * u.m
    assert t['b'][1] == 200.
    assert t['c'][1] == -10.

    # Bad column name
    with pytest.raises(KeyError) as err:
        t[1]['a', 'not_there']
    assert "'not_there'" in str(err.value)

    # Too many values
    with pytest.raises(ValueError) as err:
        t[1]['a', 'b'] = 1 * u.m, 2, 3
    assert 'right hand side must be a sequence' in str(err.value)

    # Something without a length
    with pytest.raises(ValueError) as err:
        t[1]['a', 'b'] = 1
    assert 'right hand side must be a sequence' in str(err.value)
예제 #2
0
def test_validate_write_kwargs():
    out = StringIO()
    t = table.QTable([[1, 2], [1, 2]], names=['a', 'b'])

    with pytest.raises(TypeError, match=r"write\(\) argument 'fast_writer' must be a "
                       r"\(<class 'bool'>, <class 'str'>\) object, "
                       r"got <class 'int'> instead"):
        ascii.write(t, out, fast_writer=12)
예제 #3
0
    def photometry_to_table(
            self,
            output: str = None,
            best: bool = False,
            fmts: List[str] = ("ascii.ecsv", "ascii.csv")
    ):
        """
        Converts the photometry information, which is stored internally as a dictionary, into an astropy QTable.
        :param output: Where to write table.
        :return:
        """

        if output is None:
            output = self.build_photometry_table_path()

        # if self.photometry_tbl is None:

        tbls = []
        for instrument_name in self.photometry:
            instrument = inst.Instrument.from_params(instrument_name)
            for filter_name in self.photometry[instrument_name]:
                fil = instrument.filters[filter_name]

                if best:
                    phot_dict, _ = self.select_photometry_sep(fil=filter_name, instrument=instrument_name)
                    phot_dict["band"] = filter_name
                    phot_dict["instrument"] = instrument_name
                    phot_dict["lambda_eff"] = u.check_quantity(
                        number=fil.lambda_eff,
                        unit=units.Angstrom
                    )
                    # tbl = table.QTable([phot_dict])
                    tbls.append(phot_dict)
                    print("phot_dict:")
                    print(phot_dict)

                else:
                    for epoch in self.photometry[instrument_name][filter_name]:
                        phot_dict = self.photometry[instrument_name][filter_name][epoch].copy()
                        phot_dict["band"] = filter_name
                        phot_dict["instrument"] = instrument_name
                        phot_dict["lambda_eff"] = u.check_quantity(
                            number=fil.lambda_eff,
                            unit=units.Angstrom
                        )
                        # tbl = table.QTable([phot_dict])
                        tbls.append(phot_dict)

        print(tbls)
        if best:
            self.photometry_tbl = table.vstack(tbls)
        else:
            self.photometry_tbl = table.QTable(tbls)

        if output is not False:
            for fmt in fmts:
                self.photometry_tbl.write(output.replace(".ecsv", fmt[fmt.find("."):]), format=fmt, overwrite=True)
        return self.photometry_tbl
예제 #4
0
def test_write_formatted_mixin(fast_writer):
    """
    Test fix for #8680 where writing a QTable with a quantity mixin generates
    an exception if a format is specified.
    """
    out = StringIO()
    t = table.QTable([[1, 2], [1, 2] * u.m], names=['a', 'b'])
    ascii.write(t, out, fast_writer=fast_writer, formats={'a': '%02d', 'b': '%.2f'})
    assert out.getvalue().splitlines() == ['a b',
                                           '01 1.00',
                                           '02 2.00']
예제 #5
0
def test_quantity_mixin(tmpdir):

    t = table.QTable()
    t['a'] = [1, 2, 3]
    t['b'] = ['x', 'y', 'z']
    t['c'] = [2.0, 5.0, 8.2] * u.m

    def check(ff):
        assert isinstance(ff['table']['c'], u.Quantity)

    helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
예제 #6
0
파일: catalog.py 프로젝트: jleagle94/sedkit
    def __init__(self,
                 name='SED Catalog',
                 marker='circle',
                 color='blue',
                 verbose=True,
                 **kwargs):
        """Initialize the Catalog object"""
        # Metadata
        self.verbose = verbose
        self.name = name
        self.marker = marker
        self.color = color
        self.wave_units = q.um
        self.flux_units = q.erg / q.s / q.cm**2 / q.AA

        # List all the results columns
        self.cols = [
            'name', 'age', 'age_unc', 'distance', 'distance_unc', 'parallax',
            'parallax_unc', 'radius', 'radius_unc', 'spectral_type',
            'spectral_type_unc', 'SpT', 'SpT_fit', 'membership', 'reddening',
            'fbol', 'fbol_unc', 'mbol', 'mbol_unc', 'Lbol', 'Lbol_unc',
            'Lbol_sun', 'Lbol_sun_unc', 'Mbol', 'Mbol_unc', 'logg', 'logg_unc',
            'mass', 'mass_unc', 'Teff', 'Teff_unc', 'Teff_evo', 'Teff_evo_unc',
            'Teff_bb', 'SED'
        ]

        # A master table of all SED results
        self.results = at.QTable(names=self.cols, dtype=['O'] * len(self.cols))
        self.results.add_index('name')

        # Set the units
        self.results['age'].unit = q.Gyr
        self.results['age_unc'].unit = q.Gyr
        self.results['distance'].unit = q.pc
        self.results['distance_unc'].unit = q.pc
        self.results['parallax'].unit = q.mas
        self.results['parallax_unc'].unit = q.mas
        self.results['radius'].unit = q.Rsun
        self.results['radius_unc'].unit = q.Rsun
        self.results['fbol'].unit = q.erg / q.s / q.cm**2
        self.results['fbol_unc'].unit = q.erg / q.s / q.cm**2
        self.results['Lbol'].unit = q.erg / q.s
        self.results['Lbol_unc'].unit = q.erg / q.s
        self.results['mass'].unit = q.Msun
        self.results['mass_unc'].unit = q.Msun
        self.results['Teff'].unit = q.K
        self.results['Teff_unc'].unit = q.K
        self.results['Teff_bb'].unit = q.K
        self.results['Teff_evo'].unit = q.K
        self.results['Teff_evo_unc'].unit = q.K

        # Try to set attributes from kwargs
        for k, v in kwargs.items():
            setattr(self, k, v)
예제 #7
0
def test_column_value_access():
    """Can a column's underlying data consistently be accessed via `.value`,
    whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?"""
    data = np.array([1, 2, 3])
    tbl = table.QTable({'a': table.Column(data),
                        'b': table.MaskedColumn(data),
                        'c': u.Quantity(data),
                        'd': time.Time(data, format='mjd')})
    assert type(tbl['a'].value) == np.ndarray
    assert type(tbl['b'].value) == np.ma.MaskedArray
    assert type(tbl['c'].value) == np.ndarray
    assert type(tbl['d'].value) == np.ndarray
예제 #8
0
def test_row_tuple_column_slice_transaction():
    """
    Test that setting a row that fails part way through does not
    change the table at all.
    """
    t = table.QTable([[10., 20., 30.], [1, 2, 3] * u.m], names=['a', 'b'])
    tc = t.copy()

    # First one succeeds but second fails.
    with pytest.raises(ValueError) as err:
        t[1]['a', 'b'] = (-1, -1 * u.s)  # Bad unit
    assert "'s' (time) and 'm' (length) are not convertible" in str(err.value)
    assert t[1] == tc[1]
예제 #9
0
def aggregate_one(res_fname, mlband):
    with read_results.PCAOutput.from_fname(res_fname) as res:
        with res.get_drp_logcube(mpl_v) as drp, res.get_dap_maps(mpl_v, daptype) as dap:
        
            plateifu = res[0].header['PLATEIFU']
            plate, ifu = plateifu.split('-')

            stellarmass = totalmass.StellarMass(
                res, pca_system, drp, dap, drpall.loc[plateifu],
                cosmo, mlband=mlband)

            mstar_map = stellarmass.mstar[stellarmass.bands_ixs[mlband], ...]
            tauVmu_med = res.param_dist_med('tau_V mu')
            tauV1mmu_med = res.param_dist_med('tau_V (1 - mu)')
            tauV_med = tauVmu_med + tauV1mmu_med

            mean_atten_mwtd = np.average(
                tauV_med, weights=(mstar_map * ~res.mask))
            std_atten_mwtd = np.sqrt(np.average(
                (tauV_med - mean_atten_mwtd)**2., weights=(mstar_map * ~res.mask)))

            mass_in_ifu = stellarmass.mstar_in_ifu[stellarmass.bands_ixs[stellarmass.mlband]]
            sollum_in_ifu = stellarmass.sollum_bands.to(m.bandpass_sol_l_unit).sum(axis=(1, 2))
            sollum_nsa = stellarmass.nsa_absmags_cosmocorr.to(
                m.bandpass_sol_l_unit,
                totalmass.bandpass_flux_to_solarunits(stellarmass.absmag_sun))
            ml_fluxwt = stellarmass.logml_fnuwt
            outerml_ring = stellarmass.ml_ring()

            sollum_nsa_names = list(map(
                lambda n: 'sollum_nsa_{}'.format(n),
                stellarmass.bands))
            sollum_in_ifu_names = list(map(
                lambda n: 'sollum_in_ifu_{}'.format(n),
                stellarmass.bands))

    data = [plateifu, mean_atten_mwtd, std_atten_mwtd,
            mass_in_ifu, *sollum_in_ifu, *sollum_nsa,
            ml_fluxwt.to(m.m_to_l_unit), outerml_ring.to(m.m_to_l_unit)]
    names = ['plateifu', 'mean_atten_mwtd' ,'std_atten_mwtd',
             'mass_in_ifu', *sollum_in_ifu_names, *sollum_nsa_names,
             'ml_fluxwt', 'outerml_ring']

    qt = t.QTable()
    for d, n in zip(data, names):
        qt[n] = np.atleast_1d(d)

    return qt
예제 #10
0
    def from_tree(cls, node, ctx):

        # This is getting meta, guys
        meta = node.get('meta', {})

        # This enables us to support files that use the table definition from
        # the ASDF Standard, rather than the custom one that Astropy defines.
        if cls._compat:
            return table.Table(node['columns'], meta=meta)

        if node.get('qtable', False):
            t = table.QTable(meta=node.get('meta', {}))
        else:
            t = table.Table(meta=node.get('meta', {}))

        for name, col in zip(node['colnames'], node['columns']):
            t[name] = col

        return t
예제 #11
0
 def get_nearest_calib_rows(self, mjd: float, n: int = 7):
     # self.retrieve_calibration_table()
     row_prime = self.get_nearest_calib_row(mjd=mjd)
     rows = [row_prime]
     mjd_low = mjd - 1
     mjd_high = mjd + 1
     while len(rows) < n:
         row = self.get_nearest_calib_row(mjd=mjd_high)
         if row not in rows:
             rows.append(row)
         row = self.get_nearest_calib_row(mjd=mjd_low)
         if row not in rows:
             rows.append(row)
         # print(mjd_low, mjd_high)
         mjd_low -= 1
         mjd_high += 1
     tbl = table.QTable(rows=rows, names=rows[0].colnames)
     tbl.sort("mjd_obs")
     return tbl
예제 #12
0
    def to_table(self):
        '''
        make table of stellar-mass results
        '''

        tab = t.QTable()
        tab['plateifu'] = [self.drpall_row['plateifu']]

        # tabulate mass in IFU
        tab['mass_in_ifu'] = self.mstar_in_ifu[
            None, ...][:, self.bands_ixs[self.mlband]]
        nsa_absmag = self.nsa_absmags_cosmocorr
        #tab['nsa_absmag'].meta['bands'] = self.bands
        ifu_absmag = (self.ifu_flux_bands.to(u.ABmag) - self.distmod)
        #tab['ifu_absmag'].meta['bands'] = self.bands
        missing_flux = ((nsa_absmag + self.distmod).to(m.Mgy) -
                        (ifu_absmag + self.distmod).to(m.Mgy)).clip(
                            a_min=0. * m.Mgy, a_max=np.inf * m.Mgy)

        for i, b in enumerate(self.bands):
            outer_flux = missing_flux[i]
            if outer_flux <= 0. * m.Mgy:
                tab['outer_absmag_{}'.format(b)] = np.inf * u.ABmag
                tab['outer_lum_{}'.format(b)] = -np.inf * u.dex(
                    m.bandpass_sol_l_unit)
            else:
                tab['outer_absmag_{}'.format(b)] = outer_flux.to(
                    u.ABmag) - self.distmod
                tab['outer_lum_{}'.format(b)] = tab['outer_absmag_{}'.format(
                    b)].to(
                        u.dex(m.bandpass_sol_l_unit),
                        bandpass_flux_to_solarunits(StellarMass.absmag_sun[i]))

        tab['outer_ml_ring'] = self.ml_ring()
        #tab['outer_ml_ring'].meta['band'] = self.mlband

        tab['ml_fluxwt'] = self.logml_fnuwt
        #tab['ml_fluxwt'].meta['band'] = self.mlband

        tab['distmod'] = self.distmod[None, ...]

        return tab
예제 #13
0
def test_qtable_column_conversion():
    """
    Ensures that a QTable that gets assigned a unit switches to be Quantity-y
    """
    qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])

    assert isinstance(qtab['i'], table.column.Column)
    assert isinstance(qtab['f'], table.column.Column)

    qtab['i'].unit = 'km/s'
    assert isinstance(qtab['i'], u.Quantity)
    assert isinstance(qtab['f'], table.column.Column)

    # should follow from the above, but good to make sure as a #4497 regression test
    assert isinstance(qtab['i'][0], u.Quantity)
    assert isinstance(qtab[0]['i'], u.Quantity)
    assert not isinstance(qtab['f'][0], u.Quantity)
    assert not isinstance(qtab[0]['f'], u.Quantity)

    # Regression test for #5342: if a function unit is assigned, the column
    # should become the appropriate FunctionQuantity subclass.
    qtab['f'].unit = u.dex(u.cm / u.s**2)
    assert isinstance(qtab['f'], u.Dex)
예제 #14
0
def load_master_table(
    tbl: Union[None, table.Table],
    tbl_columns: dict,
    tbl_path: str,
    force: bool = False,
):
    if force or tbl is None:
        colnames, dtypes, un = _construct_column_lists(columns=tbl_columns)

        if not os.path.isfile(tbl_path):
            tbl = table.QTable(data=[[-999]] * len(colnames),
                               names=colnames,
                               units=un,
                               dtype=dtypes)
            for i, colname in enumerate(colnames):
                print(i, colname)
                if isinstance(dtypes[i], str):
                    tbl[colname][0] = "0" * 32
            tbl.write(tbl_path, format="ascii.ecsv")
        tbl = table.QTable.read(tbl_path, format="ascii.ecsv")
        for i, colname in enumerate(colnames):
            add_column(tbl=tbl, colname=colname, dtype=dtypes[i], unit=un[i])

    return tbl
예제 #15
0
def main(gammafile, protonfile, electronfile, outputfile):
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    particles = {
        "gamma": {
            "file": gammafile,
            "target_spectrum": CRAB_HEGRA,
        },
        "proton": {
            "file": protonfile,
            "target_spectrum": IRFDOC_PROTON_SPECTRUM,
        },
        "electron": {
            "file": electronfile,
            "target_spectrum": IRFDOC_ELECTRON_SPECTRUM,
        },
    }

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_file(p["file"])
        p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    # calculate theta / distance between reco and assuemd source position
    gammas["theta"] = calculate_theta(
        gammas,
        assumed_source_az=gammas["true_az"],
        assumed_source_alt=gammas["true_alt"],
    )

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    theta_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=25))
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=5))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    theta_cuts_opt = calculate_percentile_cut(
        gammas[gammas['selected_gh']]["theta"],
        gammas[gammas['selected_gh']]["reco_energy"],
        theta_bins,
        percentile=68,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        min_value=0.05 * u.deg,
    )

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts_opt, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # calculate sensitivity
    signal_hist = create_histogram_table(gammas[gammas["selected"]],
                                         bins=sensitivity_bins)
    background_hist = estimate_background(
        background[background["selected_gh"]],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cuts_opt,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity = calculate_sensitivity(signal_hist,
                                        background_hist,
                                        alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = (s["relative_sensitivity"] *
                                 spectrum(s['reco_energy_center']))

    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    # calculate sensitivity using unoptimised cuts
    gammas["theta_unop"] = gammas["theta"].to_value(u.deg) <= np.sqrt(0.03)
    gammas["gh_unop"] = gammas["gh_score"] > 0.85

    theta_cut_unop = table.QTable()
    theta_cut_unop['low'] = theta_cuts_opt['low']
    theta_cut_unop['high'] = theta_cuts_opt['high']
    theta_cut_unop['center'] = theta_cuts_opt['center']
    theta_cut_unop['cut'] = np.sqrt(0.03) * u.deg

    signal_hist_unop = create_histogram_table(gammas[gammas["theta_unop"]
                                                     & gammas["gh_unop"]],
                                              bins=sensitivity_bins)
    background_hist_unop = estimate_background(
        background[background["gh_score"] > 0.85],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cut_unop,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity_unop = calculate_sensitivity(signal_hist_unop,
                                             background_hist_unop,
                                             alpha=ALPHA)
    sensitivity_unop["flux_sensitivity"] = (
        sensitivity_unop["relative_sensitivity"] *
        spectrum(sensitivity_unop['reco_energy_center']))
    hdus.append(fits.BinTableHDU(sensitivity_unop, name="SENSITIVITY_UNOP"))

    log.info('Calculating IRFs')
    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(
        gammas[gammas["selected"]],
        true_energy_bins,
    )
    ang_res = angular_resolution(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
    )
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )
    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts_opt["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto(outputfile, overwrite=True)
예제 #16
0
def generate_data(path=resource_filename('locals', 'data/fake/'),
                  mag_range=(11.13, 18)):
    """Generate a fake JWST pipeline catalog replete with photometry and spectra
    using a range of model atmospheres
    
    Parameters
    ----------
    path: str
        The path to the target directory
    """
    # Get some random spectra
    try:
        files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]
    except:
        files = glob.glob(
            '/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits'
        )[::20]

    # Make a fake source catalog (with only essential columns for now)
    catpath = os.path.join(path, 'fake_source_catalog.ecsv')
    ids = list(range(len(files)))
    coords = SkyCoord([89.7455] * len(ids), [-29.05744] * len(ids),
                      unit='deg',
                      frame='icrs')
    cat = at.QTable([ids, coords], names=('id', 'icrs_centroid'))
    cat.write(catpath)

    # Open the x1d file
    header = fits.getheader(
        resource_filename('locals', 'data/template_x1d.fits'))

    # Make Spectrum objects from models at R=150
    wavelength = np.arange(0.05, 2.6, 0.0001)[::66] * q.um

    # Normalize the spectra to a random F200W magnitude
    spectra = []
    f200w = Bandpass('NIRISS.F200W')
    f200w.wave_units = q.um
    for file in files:

        # Create Spectrum
        flux = fits.getdata(file)[-1][::66] * q.erg / q.s / q.cm**2 / q.AA
        unc = flux / 50.
        spec = Spectrum(wavelength, flux, unc)

        # Normalize to F200W
        mag = np.random.uniform(*mag_range)
        norm_spec = spec.renormalize(mag, f200w)
        spectra.append(norm_spec)

    # Make a separate x1d file and photometry file for each bandpass
    # containing data for each source
    for band in NIRISS_bands:

        try:

            # Get the Bandpass object
            bp = Bandpass(band)
            bp.wave_units = q.um

            # Make x1d file for spectra
            x1d_file = os.path.join(path, '{}_x1d.fits'.format(band))
            x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))

            # Make csv file for photometry
            phot_file = os.path.join(path, '{}_phot.csv'.format(band))
            phot_data = at.Table(names=('id', 'band', 'magnitude',
                                        'magnitude_unc'),
                                 dtype=(int, 'S20', float, float))

            # Iterate over spectra
            for id, (f, spec) in enumerate(zip(files, spectra)):

                # Trim spectrum to bandpass for x1d file
                spec = Spectrum(*spec.spectrum,
                                trim=[
                                    (0 * q.um, bp.WavelengthMin * 1E-4 * q.um),
                                    (bp.WavelengthMax * 1E-4 * q.um, 10 * q.um)
                                ])

                # Calculate magnitude and add to photometry table
                mag, mag_unc = spec.synthetic_magnitude(bp, force=True)
                phot_data.add_row([id, band, mag, mag_unc])

                # Add source spectrum params for verification
                params = f.split('/')[-1].split('-')
                header['TEFF'] = int(params[0].replace('lte', ''))
                header['LOGG'] = float(params[1][:4])
                header['FEH'] = float(params[-6][:-8].split('+')[-1])
                header['FILEPATH'] = f
                header['PUPIL'] = band

                # Put spectrum in x1d fits file
                data = fits.BinTableHDU(data=np.rec.array(
                    list(zip(*spec.data)),
                    formats='float32,float32,float32',
                    names='WAVELENGTH,FLUX,ERROR'),
                                        header=header)
                data.name = 'EXTRACT1D'

                x1d_hdu.append(data)

            # Write the photometry file
            phot_data.write(phot_file, format='ascii.csv')
            del phot_data

            # Write the x1d file
            x1d_hdu.writeto(x1d_file, overwrite=True)
            del x1d_hdu

        except IOError:
            pass
예제 #17
0
ax.set_ylabel("temperature")
ax.set_title(f"ch #{ch}")
ax.legend()

fig.tight_layout()
fig.savefig(output_dir / f"temp_vs_subrefz.{image_format}")
if do_plot:
    plt.show()
else:
    plt.clf()
    plt.close()

# 4th step: Gauss-fit
print("#4: Gauss-fit")

alldata = table.QTable(names=("scan_speed", "peak", "z_mean", "z_stddev",
                              "slope", "intercept"))

if maxid[0] < minid[0]:
    minid.insert(0, np.nan)
if minid[-1] < maxid[-1]:
    minid.append(np.nan)

amp0 = params["fitting"]["amplitude"]
z0 = params["fitting"]["z_mean"]
s0 = params["fitting"]["z_stddev"]
sl = params["fitting"]["slope"]
ic = params["fitting"]["intercept"]

g_init = models.Gaussian1D(amplitude=amp0, mean=z0,
                           stddev=s0) + models.Linear1D(sl, ic)
fit_g = fitting.LevMarLSQFitter()
예제 #18
0
mask[exchs] = False
mask[np.where(scanarray_cal.kidtp != 1)] = False
masked_cube_array = cube_array[:, :, mask]

weight = dc.ones_like(masked_cube_array)
cont_array = fc.makecontinuum(masked_cube_array, weight=weight)
dc.io.savefits(cont_array, cont_obs_fits, dropdeg=True, overwrite=True)

# 5th step: 2D-Gauss fit on the continuum map
print("#5: 2D-Gauss fit on the continuum map")

alldata = table.QTable(names=(
    "subref_x",
    "subref_y",
    "peak",
    "x_mean",
    "y_mean",
    "x_stddev",
    "y_stddev",
    "theta",
))

amplitude = float(cont_array.max().values)
x_mean = float(
    cont_array.where(cont_array == cont_array.max(), drop=True).x.values)
y_mean = float(
    cont_array.where(cont_array == cont_array.max(), drop=True).y.values)
x_stddev = params["fitting"]["x_stddev"]
y_stddev = params["fitting"]["y_stddev"]
theta = params["fitting"]["theta"]

f = fc.gauss_fit(
예제 #19
0
def into_pixels(xdata,
                ydata,
                nx=None,
                ny=None,
                xscale=None,
                yscale=None,
                xlim=None,
                ylim=None,
                x="x",
                y="y",
                id="id",
                n="N",
                quiet=False):
    """
    Put a 2D dataset into pixels. The code returns two objects:
    1) an astropy QTable for the pixels with the pixel ID number, x-centre, 
       y-centre, and number of objects -- the properties of the pixel grid 
       are also output in the Qtable metadata;
    2) an array containing the pixel ID number of all input objects -- 
       objects outside the limits of the pixel grid are given a pixel ID of -1.
    
    INPUTS
      xdata : first coordinate of data (refered to as "x")
      ydata : second coordinate of data (refered to as "y")
    
    OPTIONS
      nx : number of pixels in x [default None] (*)
      ny : number of pixels in y [default None] (*)
      xscale : scale of x pixels [default None] (*)
      yscale : scale of y pixels [default None] (*)
      xlim : limits of pixelised area in x [default None] (*)
      ylim : limits of pixelised area in y [default None] (*)
      x : name for x-coordinate column of output table [default "x"]
      y : name for y-coordinate column of output table [default "y"]
      id : name for pixel ID column of output table [default "id"]
      n : name for number of datapoints column of output table [default "N"]
      quiet : suppress text outputs? [default False]
    
    NOTES
      (*) Some pixels settings must be provided, but it is not necessary to 
        provide all options. The behaviour of the code for different 
        combinations of inputs is outlined here:
        1) No settings are provided: the code will fail.
        2) Limits only: the code will fail.
        3) Number of pixels only: the data limits are used for the pixel
           limits. Then the behaviour follows from case 5.
        4) Scale only: the data limits are used but with the lower limit
           rounded down to the nearest scale factor and the upper limit
           rounded up to the nearest scale factor. Then the behaviour follows 
           from case 7.
        5) Limits and number of pixels: scale is calculated. Then the 
           behaviour follows from case 8.
        6) Scale and number of pixels: limits are chosen so that the centre 
           of the pixel grid coincides with the centre of the data. Then the 
           behaviour follows from case 8.
        7) Limits and scale: number of pixels is calculated. Then the 
           behaviour proceeds as case 8.
        8) Number of pixels, pixel scale and limits are all given: the code 
           checks that they are all consistent, and fails if not.
    """

    # throw an error if no settings are given for the x pixels
    if not xlim and not xscale and not nx:
        print("ERROR: Please provide pixel settings for the x-coordinate.")
        return

    # throw an error if no settings are given for the y pixels
    if not ylim and not yscale and not ny:
        print("ERROR: Please provide pixel settings for the y-coordinate.")
        return

    # do no proceed if only limits are given for x-coordinate
    if xlim and not xscale and not nx:
        print("ERROR: Please provide required number of pixels or pixel "\
            +"scale for x-coordinate as well as limits of pixelised region.")
        return

    # do no proceed if only limits are given for y-coordinate
    if ylim and not yscale and not ny:
        print("ERROR: Please provide required number of pixels or pixel "\
            +"scale for y-coordinate as well as limits of pixelised region.")
        return

    # calculate limits, if needed
    if not xlim:
        if nx and xscale:
            xmid = xdata.min() + xdata.ptp() / 2.
            xlim = (xmid - nx / 2. * xscale, xmid + nx / 2. * xscale)
        elif not xscale:
            xlim = (xdata.min(), xdata.max())
        else:
            xlim = (np.floor(xdata.min() / xscale) * xscale,
                    np.ceil(xdata.max() / xscale) * xscale)
    if not ylim:
        if ny and yscale:
            ymid = ydata.min() + ydata.ptp() / 2.
            ylim = (ymid - ny / 2. * yscale, ymid + ny / 2. * yscale)
        elif not yscale:
            ylim = (ydata.min(), ydata.max())
        else:
            ylim = (np.floor(ydata.min() / yscale) * yscale,
                    np.ceil(ydata.max() / yscale) * yscale)

    # calculate pixel scale, if needed
    if not xscale: xscale = (xlim[1] - xlim[0]) / nx
    if not yscale: yscale = (ylim[1] - ylim[0]) / ny

    # calculate number of pixels, if needed
    if not nx: nx = int(np.round((xlim[1] - xlim[0]) / xscale))
    if not ny: ny = int(np.round((ylim[1] - ylim[0]) / yscale))

    # make sure pixel numbers are integers
    if nx != int(nx):
        print("You have a non-integer number of x pixels.")
        return
    if ny != int(ny):
        print("You have a non-integer number of y pixels.")
        return

    # total number of pixels
    npix = nx * ny

    # check that everything is consistent
    dx = 1 - (xlim[1] - xlim[0]) / nx / xscale
    dy = 1 - (ylim[1] - ylim[0]) / ny / yscale
    if np.abs(dx) > 1e-3 or np.abs(dy) > 1e-3:
        if np.abs(dx) > 1e-3:            print("ERROR: Your x-coordinate scales, limits, "\
+"and pixel numbers are inconsistent.")
        if np.abs(dy) > 1e-3:            print("ERROR: Your y-coordinate scales, limits, "\
+"and pixel numbers are inconsistent.")
        return

    if not quiet:
        print("\nbin 2D data into pixels")
        print("")
        print("  x coordinate: {:}".format(x))
        print("  y coordinate: {:}".format(y))
        print("")
        print("  x scale: {:} /pixel".format(xscale))
        print("  y scale: {:} /pixel".format(yscale))
        print("")
        print("  x limits: {:} to {:}".format(*xlim))
        print("  y limits: {:} to {:}".format(*ylim))
        print("")
        print("  x pixels: {:}".format(nx))
        print("  y pixels: {:}".format(ny))
        print("  total pixels: {:}".format(npix))

    # make QTable for pixels
    pix = table.QTable()
    pix[id] = range(npix)

    # pixel centres
    xx = np.linspace(xlim[0] / xscale + 0.5, xlim[1] / xscale - 0.5,
                     nx) * xscale
    yy = np.linspace(ylim[0] / yscale + 0.5, ylim[1] / yscale - 0.5,
                     ny) * yscale
    pix[x], pix[y] = [p.reshape(npix) for p in np.meshgrid(xx, yy)]

    # pixel number for each datapoint
    data_pix = (np.round((xdata-xx.min())/xscale) \
        + np.round((ydata-yy.min())/yscale)*nx).astype(int)
    data_pix[(xdata<xlim[0])|(xdata>xlim[1])|(ydata<ylim[0])|(ydata>ylim[1])]\
        = -1

    # number of datapoints in each pixel
    pix[n] = np.histogram(data_pix, range=(-0.5, npix - 0.5), bins=npix)[0]

    # put grid properties into metadata
    pix.meta = {
        "npix": npix,
        "nx": nx,
        "ny": ny,
        "xmin": min(xlim),
        "xmax": max(xlim),
        "ymin": min(ylim),
        "ymax": max(ylim),
        "xscale": xscale,
        "yscale": yscale,
    }

    return pix, data_pix
예제 #20
0
def main():
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"])
        p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50))
    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    # the cut is calculated in the same bins as the sensitivity,
    # but then interpolated to 10x the resolution.
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts_coarse = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=sensitivity_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    # interpolate to 50 bins per decade
    theta_center = bin_center(theta_bins)
    inter_center = bin_center(sensitivity_bins)
    theta_cuts = table.QTable({
        "low":
        theta_bins[:-1],
        "high":
        theta_bins[1:],
        "center":
        theta_center,
        "cut":
        np.interp(np.log10(theta_center / u.TeV),
                  np.log10(inter_center / u.TeV), theta_cuts_coarse['cut']),
    })

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    sensitivity["flux_sensitivity"] = (
        sensitivity["relative_sensitivity"] *
        spectrum(sensitivity['reco_energy_center']))

    log.info('Calculating IRFs')
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(gammas[gammas["selected"]],
                                             reco_energy_bins,
                                             energy_type="reco")
    ang_res = angular_resolution(gammas[gammas["selected_gh"]],
                                 reco_energy_bins,
                                 energy_type="reco")
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
예제 #21
0
ax.set_title("Residual")
ax.tick_labels.hide_y()
ax.axis_labels.hide_y()

plt.tight_layout(pad=4.0, w_pad=0.5)
plt.savefig(output_dir / f"continuum_model.{image_format}")
if do_plot:
    plt.show()
else:
    plt.clf()
    plt.close()

# 5th step: 2D-Gauss fit on the cube map
print("#5: 2D-Gauss fit on the cube map")

alldata = table.QTable()

h = fc.gauss_fit(
    cube_array,
    mode="deg",
    amplitude=f.peak,
    x_mean=f.x_mean,
    y_mean=f.y_mean,
    x_stddev=f.x_stddev,
    y_stddev=f.y_stddev,
    theta=f.theta,
    fixed={"x_mean": True, "y_mean": True, "theta": True},
)

for exch in exchs:
    h.peak[exch] = np.nan
예제 #22
0
fig.tight_layout()
fig.savefig(output_dir / f"continuum_image.{image_format}")
if do_plot:
    plt.show()
else:
    plt.clf()
    plt.close()

# 4th step: 2D-Gauss fit on the continuum map
print("#4: 2D-Gauss fit on the continuum map")

alldata = table.QTable(
    names=(
        "peak",
        "x_mean",
        "y_mean",
        "x_stddev",
        "y_stddev",
        "theta",
    )
)

amplitude = float(cont_array.max().values)
x_mean = float(cont_array.where(cont_array == cont_array.max(), drop=True).x.values)
y_mean = float(cont_array.where(cont_array == cont_array.max(), drop=True).y.values)
x_stddev = params["fitting"]["x_stddev"]
y_stddev = params["fitting"]["y_stddev"]
theta = params["fitting"]["theta"]

f = fc.gauss_fit(
    cont_array,
    mode="deg",
예제 #23
0
def into_vorbins(data_pix,
                 pix,
                 targetSN,
                 x="x",
                 y="y",
                 id="id",
                 n="N",
                 npix="Npix",
                 sn="SN",
                 signal=None,
                 noise=None,
                 quiet=False,
                 vquiet=True):
    """
    Put pixels into Voronoi bins. This is a wrapper for voronoi.bin2d (see 
    https://github.com/lauralwatkins/voronoi) that does a lot of the tedious 
    housekeeping. There are two outputs:
    1) an astropy QTable for the bins with the bin ID number, x-centre, 
       y-centre, number of objects, number of pixels, signal-to-noise, signal 
       and noise;
    2) an array containing the bin ID number of all datapoints.
    The code also adds to columns to the input pixel table (pix): "bin" 
    records the bin ID number of the pixels, and "Nbin" records the number of 
    stars in the bin to which the pixel belongs.
    
    INPUTS
      data_pix : pixel number of each datapoint
      pix : pixel grid for the data
      targetSN : target signal-to-noise required for binning
    
    OPTIONS
      x : name for x-coordinate column of input/output table [default "x"]
      y : name for y-coordinate column of input/output table [default "y"]
      id : name for bin ID column of output table [default "id"]
      n : name for number of datapoints column of inout/output table
        [default "N"]
      npix : name for number of pixels column of output table [default "Npix"]
      sn : name for signal-to-noise column of output table [default "SN"]
      signal : name for signal column of input/output table [default None](*)
      noise : name for noise column of input/output table [default None](**)
      quiet : suppress text outputs for this code? [default False]
      vquiet : suppress text outputs for Voronoi call? [default True]
    
    (*) The code uses the number of objects in the pixel for the "signal" in
    the pixel, unless a column name is passed for the signal data.
    (**) The code also assumes that the noise in the pixel is the square-root
    of the signal in the pixel (useful if the signal is the number of objects
    in the pixel), unless a column name is passed for the noise data.
    """

    import voronoi

    # fail if there are no columns called n, x or y
    for key in (n, x, y):
        if key not in pix.colnames:
            print("ERROR: Could not find column '{:}' in pixel table."\
                .format(key))
            return

    # settings for Voronoi binning
    good = pix[n] > 0
    if not signal: signal = n
    pix_signal = pix[good][signal]
    if not noise: pix_noise = np.sqrt(pix[good][signal])
    else: pix_noise = pix[good][noise]

    # need to have pixels on same scale for Voronoi
    xp = ((pix[x] - pix[x].min()) / pix.meta["xscale"])[good]
    yp = ((pix[y] - pix[y].min()) / pix.meta["yscale"])[good]

    # do the Voronoi binning
    bin = table.QTable()
    pix["bin"] = -np.ones(len(pix), dtype="int")
    pix["bin"][good], bin[x], bin[y], bin[sn], bin[npix], vscale \
        = voronoi.bin2d(xp, yp, pix_signal, pix_noise, targetSN, graphs=False,
        quiet=vquiet)
    bin["id"] = range(len(bin))

    # adjust bins back to real scale
    bin[x] = bin[x] * pix.meta["xscale"] + pix[x].min()
    bin[y] = bin[y] * pix.meta["yscale"] + pix[y].min()

    # bin number for each datapoint
    data_bin = pix["bin"][data_pix]

    # number of datapoints in each bin
    bin[n] = np.array([sum(data_bin == i) for i in range(len(bin))])

    # reorder columns
    bin = bin[id, x, y, n, npix, sn]

    # make columns to record the signal and noise in each bin
    if signal == n: signal = "signal"
    if not noise: noise = "noise"
    bin[signal] = [sum(pix_signal[pix[good]["bin"] == b["id"]]) for b in bin]
    bin[noise] = [np.sqrt(sum(pix_noise[pix[good]["bin"]==b["id"]]**2)) \
        for b in bin]

    # number of datapoints in bin to which pixel belongs
    pix["Nbin"] = -np.ones(len(pix), dtype="int")
    pix["Nbin"][good] = bin[pix[good]["bin"]][n]

    # signal and noise in bin to which pixel belongs
    pix[signal + "_bin"] = [np.nan] * len(pix)
    pix[noise + "_bin"] = [np.nan] * len(pix)
    pix[signal + "_bin"][good] = bin[pix[good]["bin"]][signal]
    pix[noise + "_bin"][good] = bin[pix[good]["bin"]][noise]

    if not quiet:
        print("\nVoronoi binning of pixels\n")
        print("  bins: {:}".format(len(bin)))
        print("  min S/N per bin: {:}".format(bin[sn].min()))
        print("  max S/N per bin: {:}".format(bin[sn].max()))
        print("  avg S/N per bin: {:}".format(bin[sn].mean()))

    return bin, data_bin