Exemple #1
0
    def get_molecules(self, cache=True):
        """
        Scrape the list of valid molecules
        """
        if cache and hasattr(self, '_molecule_dict'):
            return self._molecule_dict
        elif cache and os.path.isfile(self.moldict_path):
            with open(self.moldict_path, 'r') as f:
                md = json.load(f)
            return md

        main_url = 'http://home.strw.leidenuniv.nl/~moldata/'
        response = self._request('GET', main_url, cache=cache)
        response.raise_for_status()

        soup = BeautifulSoup(response.content)

        links = soup.find_all('a', href=True)
        datfile_urls = [
            url for link in ProgressBar(links)
            for url in self._find_datfiles(link['href'], base_url=main_url)
        ]

        molecule_re = re.compile(
            r'http://[a-zA-Z0-9.]*/~moldata/datafiles/([A-Z0-9a-z_+@-]*).dat')
        molecule_dict = {
            molecule_re.search(url).groups()[0]: url
            for url in datfile_urls
        }

        with open(self.moldict_path, 'w') as f:
            s = json.dumps(molecule_dict)
            f.write(s)

        return molecule_dict
Exemple #2
0
def build_hdf5_dbase(ascii_dbase_root, hdf5_dbase_root, ask_before=True):
    """
    Assemble HDF5 file from raw ASCII CHIANTI database
    """
    # Check and ask
    if os.path.isfile(hdf5_dbase_root):
        return None
    if not os.path.isfile(os.path.join(ascii_dbase_root, 'VERSION')):
        raise FileNotFoundError(
            f'No CHIANTI database found at {ascii_dbase_root}')
    if ask_before:
        question = f"No HDF5 database found at {hdf5_dbase_root}. Build it now?"
        answer = query_yes_no(question, default='yes')
        if not answer:
            return None

    # Build database
    all_files = []
    tmp = get_masterlist(ascii_dbase_root)
    for k in tmp:
        all_files += tmp[k]
    with ProgressBar(len(all_files)) as progress:
        with h5py.File(hdf5_dbase_root, 'a') as hf:
            for af in all_files:
                parser = fiasco.io.Parser(af,
                                          ascii_dbase_root=ascii_dbase_root)
                df = parser.parse()
                if df is None:
                    warnings.warn(f'Not including {af} in {hdf5_dbase_root}')
                else:
                    parser.to_hdf5(hf, df)
                progress.update()
Exemple #3
0
    def getRankedTiles(self, verbose=True, fitsfilename=None,
                       skymapdata=False, res=256):
        if fitsfilename:
            skymap = getSkymapData(fitsfilename, res=res)
        elif skymapdata:
            skymap = [skymapdata[0], skymapdata[1],
                      skymapdata[2], skymapdata[3]]

        self.pixel_id_all = skymap[0]
        self.point_ra_all = skymap[1]
        self.point_dec_all = skymap[2]
        self.point_pVal_all = skymap[3]

        pvalTile = []
        TileProbSum = 0.0
        if verbose:
            print('Computing Ranked-Tiles...')

        x = list(self.tileData.keys())
        z = itemgetter(*x)(self.tileData)
        if verbose:
            with ProgressBar(len(z)) as bar:
                for ii in z:
                    pvalTile.append(np.sum(skymap[-1][ii[0]]))
                    bar.update()
        else:
            for ii in z:
                pvalTile.append(np.sum(skymap[-1][ii[0]]))

        pvalTile = np.array(pvalTile)
        sorted_indices = np.argsort(-1*pvalTile)
        output = np.vstack((self.IDs[sorted_indices],
                            pvalTile[sorted_indices])).T
        df = pd.DataFrame(output, columns=['tile_index', 'tile_prob'])
        return df
def diffplot_t_of_c(density=4.5, columns=np.linspace(12,15), temperatures=[25,50,75,100,125,150]):
    grid_exp = np.empty([len(columns), len(temperatures)])
    grid_ML = np.empty([len(columns), len(temperatures)])
    for icol, column in enumerate(ProgressBar(columns)):
        for item, temperature in enumerate(temperatures):
            constraints,mf,density,column,temperature = make_model(density=density, temperature=temperature, column=column)
            grid_exp[icol,item] = constraints['expected_temperature']
            grid_ML[icol,item] = constraints['temperature_chi2']

    pl.figure(1).clf()

    for ii,(tem,color) in enumerate(zip(temperatures,('r','g','b','c','m','orange'))):
        pl.plot(columns, grid_exp[:,ii], color=color)
        pl.plot(columns, grid_ML[:,ii], '--', color=color)
        pl.hlines(tem, columns.min(), columns.max(), label='T={0}K'.format(tem), color=color)
    pl.plot([], 'k', label='Expectation Value')
    pl.plot([], 'k--', label='Maximum Likelihood')
    pl.xlabel("log N(H$_2$CO) [cm$^{-2}$]")
    pl.ylabel("Temperature (K)")
    pl.legend(loc='best', fontsize=14)

    pl.figure(2).clf()

    for ii,(tem,color) in enumerate(zip(temperatures,('r','g','b','c','m','orange'))):
        pl.plot(columns, (grid_exp[:,ii]-tem)/tem, color=color, label='T={0}K'.format(tem))
        pl.plot(columns, (grid_ML[:,ii]-tem)/tem, '--', color=color)
    pl.plot([], 'k', label='Expectation Value')
    pl.plot([], 'k--', label='Maximum Likelihood')
    pl.xlabel("log N(H$_2$CO) [cm$^{-2}$]")
    pl.ylabel("Fractional Difference\n(recovered-input)/input")
    pl.legend(loc='best', fontsize=14)
    pl.ylim(-0.5,0.5)
    pl.grid()

    return columns, grid_exp, grid_ML
Exemple #5
0
def download_hitran(m, i, numin, numax):
    """
    Download HITRAN data for a particular molecule. Based on fetch function from
    hapi.py.

    Parameters
    ----------
    m : int
        HITRAN molecule number
    i : int
        HITRAN isotopologue number
    numin : real
        lower wavenumber bound
    numax : real
        upper wavenumber bound
    """
    iso_id = str(ISO[(m, i)][ISO_INDEX['id']])
    mol_name = ISO[(m, i)][ISO_INDEX['mol_name']]
    filename = os.path.join(cache_location, '{0}.data'.format(mol_name))
    CHUNK = 64 * 1024
    data = dict(iso_ids_list=iso_id, numin=numin, numax=numax)
    with open(filename, 'w') as fp:
        response = commons.send_request(HITRAN_URL,
                                        data,
                                        10,
                                        request_type='GET')
        if 'Content-Length' in response.headers:
            total_length = response.headers.get('Content-Length')
            pb = ProgressBar(int(total_length))
        for chunk in response.iter_content(chunk_size=CHUNK):
            fp.write(chunk.decode('utf-8'))
            try:
                pb.update(CHUNK)
            except NameError:
                pass
Exemple #6
0
def combine_masks(maskdir, outputdir='./', outputfile='mastermask.fits',
                  write_fits=True):
    """
    Will take all masks contained within a directory and combine them into a
    single mask

    Parameters
    ----------
    maskdir : string
        directory containing the masks you would like to combine. Masks need to
        be in fits format
    outputdir : string (optional)
        output directory (default will be current directory)
    outputfile : string (optional)
        output file name (default is mastermask.fits)
    write_fits : bool
        whether or not you want to output to fits (default==True)
    """
    input_files = glob.glob(maskdir+'*.fits')
    mask = fits.getdata(input_files[0], header=True)
    header=mask[1]
    mastermask=np.zeros_like(mask[0])
    for file in ProgressBar(input_files):
        mask = fits.getdata(file, header=False)
        mastermask+=mask

    if write_fits:
        hdu = fits.PrimaryHDU(mastermask, header=header)
        hdu.writeto(outputdir+outputfile, overwrite=True)

    return mastermask
Exemple #7
0
def _get_barycorr_bvcs_withvels(coos, loc, injupyter=False):
    """
    Gets the barycentric correction of the test data from the
    http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
    Requires the https://github.com/tronsgaard/barycorr python interface to that
    site.

    Provided to reproduce the test data above, but not required to actually run
    the tests.
    """
    import barycorr
    from astropy.utils.console import ProgressBar

    bvcs = []
    for coo in ProgressBar(coos, ipython_widget=injupyter):
        res = barycorr.bvc(test_input_time.utc.jd,
                           coo.ra.deg,
                           coo.dec.deg,
                           lat=loc.geodetic[1].deg,
                           lon=loc.geodetic[0].deg,
                           pmra=coo.pm_ra_cosdec.to_value(u.mas / u.yr),
                           pmdec=coo.pm_dec.to_value(u.mas / u.yr),
                           parallax=coo.distance.to_value(
                               u.mas, equivalencies=u.parallax()),
                           rv=coo.radial_velocity.to_value(u.m / u.s),
                           epoch=test_input_time.utc.jd,
                           elevation=loc.geodetic[2].to(u.m).value)
        bvcs.append(res)
    return bvcs * u.m / u.s
Exemple #8
0
    def init_progressbar(self):
        """
        Initialise the progress bar.

        This only happens if run command is called with ``progressbar=True``.
        """
        self.progressbar = ProgressBar(self.command_count())
Exemple #9
0
    def run(self, observations, which='all'):
        """
        Run IACT basic image estimation for a list of observations.

        Parameters
        ----------
        observations : `~gammapy.data.ObservationList`
            List of observations

        Returns
        -------
        sky_images : `~gammapy.image.SkyImageList`
            List of sky images
        """
        from astropy.utils.console import ProgressBar
        result = SkyImageList()

        if 'all' in which:
            which = ['counts', 'exposure', 'background', 'excess', 'flux', 'psf']

        for name in which:
            result[name] = self._get_empty_skyimage(name)

        for observation in ProgressBar(observations):
            if 'exposure' in which:
                exposure = self._exposure(observation)
                result['exposure'].paste(exposure)
                # TODO: improve SkyImage.paste() so that it enforces compatibility
                # of units when doing the sum. The fix below can then be removed.
                result['exposure'].unit = exposure.unit

            if 'counts' in which:
                counts = self._counts(observation)
                # TODO: on the left side of the field of view there is one extra
                # row of pixels in the counts image compared to the exposure and
                # background image. Check why this happends and remove the fix below
                not_has_exposure = ~(exposure.data > 0)
                counts.data[not_has_exposure] = 0
                result['counts'].paste(counts)

            if 'background' in which:
                background = self._background(counts, exposure, observation)['background']

                # TODO: include stacked alpha and on/off exposure images
                result['background'].paste(background)

            if 'excess' in which:
                excess = self.excess(SkyImageList([counts, background]))
                result['excess'].paste(excess)

            if 'flux' in which:
                flux = self.flux(SkyImageList([counts, background, exposure]))
                result['flux'].paste(flux)
                # TODO: improve SkyImage.paste() so that it enforces compatibility
                # of units when doing the sum. The fix below can then be removed.
                result['flux'].unit = flux.unit

        if 'psf' in which:
            result['psf'] = self.psf(observations)
        return result
Exemple #10
0
def create_modelcube(self, njobs=1, verbose=True):
    """
    Generates a "clean" datacube from the scousepy decomposition. Returns a
    clean cube

    Parameters
    ----------
    self : instance of the scousepy class
    njobs : Number
        number of cpus
    verbose: bool
        verbose output

    """

    # Time it
    starttime = time.time()

    cube = self.cube
    x = np.array(cube.world[:, 0, 0][0])
    if (self.ppv_vol[0] is not None) & (self.ppv_vol[1] is not None):
        trimids = np.where((x > self.ppv_vol[0]) & (x < self.ppv_vol[1]))[0]

    _cube = cube[min(trimids):max(trimids) + 1, :, :]
    _modelcube = np.full_like(_cube, np.nan)

    if verbose:
        print("")
        print("Generating models:")
        print("")

    args = [self]
    inputs = [[key] + args for key in self.indiv_dict.keys()]
    if njobs == 1:
        mods = ProgressBar.map(genmodel, inputs)
    else:
        mods = parallel_map(genmodel, inputs, numcores=njobs)
    mergedmods = [mod for mod in mods]
    mergedmods = np.asarray(mergedmods)

    if verbose:
        print("")
        print("Creating model cube:")
        print("")
        progress_bar = ProgressBar(self.indiv_dict.keys())

    for i, key in enumerate(self.indiv_dict.keys()):
        _modelcube[:, self.indiv_dict[key].coordinates[0],
                   self.indiv_dict[key].coordinates[1]] = mergedmods[i]
        if verbose:
            progress_bar.update()

    endtime = time.time()
    if verbose:
        print("")
        print('Process completed in: {0} minutes'.format(
            (endtime - starttime) / 60.))
        print("")

    return SpectralCube(data=_modelcube, wcs=_cube.wcs)
Exemple #11
0
def create_hdf5_archive(hdf5_path,
                        hologram_paths,
                        n_z,
                        metadata={},
                        compression='lzf',
                        overwrite=False):
    """
    Create HDF5 file structure for holograms and phase/intensity
    reconstructions.

    Parameters
    ----------
    hdf5_path : string
        Name of new HDF5 archive
    hologram_paths : string
        List of all holograms
    n_z : int
        Number of z-stacks to allocate space for
    meta : dict
        Metadata to store with in top-level of the HDF5 archive

    Returns
    -------
    f : `~h5py.File`
        Opened HDF5 file
    """
    if os.path.exists(hdf5_path) and not overwrite:
        raise ValueError("File {0} already exists. To overwrite it, "
                         "use `overwrite=True`.".format(hdf5_path))

    f = h5py.File(hdf5_path, 'w')

    first_image = tiff_to_ndarray(hologram_paths[0])

    # Create datasets for holograms, fill it in with holograms, metadata
    f.create_dataset('holograms',
                     dtype=first_image.dtype,
                     shape=(len(hologram_paths), first_image.shape[0],
                            first_image.shape[1]),
                     compression=compression)

    # Update attributes on `holograms` with metadata
    f['holograms'].attrs.update(metadata)

    holograms_dset = f['holograms']
    print('Loading holograms into file {0}...'.format(hdf5_path))
    with ProgressBar(len(hologram_paths)) as bar:
        for i, path in enumerate(hologram_paths):
            holograms_dset[i, :, :] = tiff_to_ndarray(path)
            bar.update()

    # Create empty datasets for reconstructions
    reconstruction_dtype = np.complex128
    f.create_dataset('reconstructed_wavefields',
                     dtype=reconstruction_dtype,
                     shape=(len(hologram_paths), n_z, first_image.shape[0],
                            first_image.shape[1]),
                     compression=compression)
    return f
Exemple #12
0
    def fit_bins(
        self,
        min_bands_per_bin: float = None,
        neccessary_bands: list = None,
        bins_from_df: bool = False,
        **kwargs,
    ):
        """" """

        print(f"Fitting {self.nbins} time bins.\n")

        if "bands" in kwargs:
            if min_bands_per_bin is None:
                min_bands_per_bin = len(kwargs["bands"])
            print(f"Bands which are fitted: {kwargs['bands']}")
            bands = kwargs["bands"]
        else:
            if min_bands_per_bin is None:
                min_bands_per_bin = 2
            print(f"Fitting all bands")
            bands = binned_lc_df.telescope_band.unique()

        if not neccessary_bands:
            neccessary_bands = []
            print("No band MUST be present in each bin to be fit")
        else:
            print(f"{neccessary_bands} MUST be present in each bin to be fit")

        print(
            f"At least {min_bands_per_bin} bands must be present in each bin to be fit"
        )

        if bins_from_df:
            print("Using predefined bins in dataframe")

        binned_lc_df = self.get_mean_magnitudes(
            bands=bands,
            min_bands_per_bin=min_bands_per_bin,
            neccessary_bands=neccessary_bands,
            bins_from_df=bins_from_df,
        )

        fitparams = {}

        progress_bar = ProgressBar(len(binned_lc_df.mean_obsmjd.unique()))

        for index, mjd in enumerate(binned_lc_df.mean_obsmjd.unique()):
            _df = binned_lc_df.query(f"mean_obsmjd == {mjd}")
            result = self.fit_one_bin(binned_lc_df=_df, **kwargs)
            fitparams.update({index: result})
            progress_bar.update(index)

        progress_bar.update(len(binned_lc_df.mean_obsmjd.unique()))

        with open(os.path.join(self.fit_dir, f"{self.fittype}.json"),
                  "w") as outfile:
            json.dump(fitparams, outfile)
Exemple #13
0
def download_url(to_download_urls,
                 download_location,
                 show_progress=True,
                 notebook=False,
                 verbose=True,
                 overwrite=False,
                 nprocess=None,
                 cookies=None,
                 **kwargs):
    """ """
    if nprocess is None:
        nprocess = 1
    elif nprocess < 1:
        raise ValueError("nprocess must 1 or higher (None means 1)")

    if nprocess == 1:
        # Single processing
        if verbose:
            warnings.warn("No parallel downloading")
        for url, fileout in zip(to_download_urls, download_location):
            download_single_url(url,
                                fileout=fileout,
                                show_progress=show_progress,
                                notebook=notebook,
                                overwrite=overwrite,
                                verbose=verbose,
                                cookies=cookies,
                                **kwargs)
    else:
        # Multi processing
        import multiprocessing
        if show_progress:
            from astropy.utils.console import ProgressBar
            bar = ProgressBar(len(to_download_urls), ipython_widget=notebook)
        else:
            bar = None

        if verbose:
            warnings.warn("parallel downloading ; asking for %d processes" %
                          nprocess)

        # Passing arguments
        overwrite_ = [overwrite] * len(to_download_urls)
        verbose_ = [verbose] * len(to_download_urls)
        with multiprocessing.Pool(nprocess) as p:
            # Da Loop
            for j, result in enumerate(
                    p.imap_unordered(
                        _download_,
                        zip(to_download_urls, download_location, overwrite_,
                            verbose_))):
                if bar is not None:
                    bar.update(j)

            if bar is not None:
                bar.update(len(to_download_urls))
Exemple #14
0
def _convolve_model_dir_2(model_dir, filters, overwrite=False, memmap=True):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_cube = SEDCube.read(os.path.join(model_dir, 'flux.fits'), order='nu',
                            memmap=memmap)

    par_table = load_parameter_table(model_dir)

    if not np.all(par_table['MODEL_NAME'] == sed_cube.names):
        raise ValueError("Model names in SED cube and parameter file do not match")

    log.info("{0} SEDs found in {1}".format(sed_cube.n_models, model_dir))

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=sed_cube.names,
                              apertures=sed_cube.apertures,
                              initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = [f.rebin(sed_cube.nu) for f in filters]

    # We do the unit conversion - if needed - at the last minute
    val_factor = sed_cube.val.unit.to(u.mJy)
    unc_factor = sed_cube.unc.unit.to(u.mJy)

    # Loop over apertures
    for i_ap in ProgressBar(range(sed_cube.n_ap)):

        sed_val = sed_cube.val[:, i_ap, :]
        sed_unc = sed_cube.val[:, i_ap, :]

        for i, f in enumerate(binned_filters):

            response = f.response.astype(sed_val.dtype)

            fluxes[i].flux[:, i_ap] = np.sum(sed_val * response, axis=1) * val_factor
            fluxes[i].error[:, i_ap] = np.sqrt(np.sum((sed_unc * response) ** 2, axis=1)) * unc_factor

    for i, f in enumerate(binned_filters):

        fluxes[i].central_wavelength = f.central_wavelength
        fluxes[i].apertures = sed_cube.apertures
        fluxes[i].model_names = sed_cube.names

        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Exemple #15
0
 def animate_evolve(self, Evolver, System, Plotter, time, chunks=int(1e3), chunksize=int(1e3), progress=True):
     """Animate an evolver"""
     from matplotlib import animation
     import matplotlib
     matplotlib.use('TkAgg')
     import matplotlib.pyplot as plt
     Q = self.evolve_queue(Evolver, System, time, chunks, chunksize)
     with ProgressBar(chunks) as PBar:
         anim = animation.FuncAnimation(Plotter.figure, self._animate_callback, self._packet_callback(System, Q), interval=1, fargs=(System, Plotter, PBar))
         plt.show()
Exemple #16
0
def get_singv_tau11(singv_para):
    '''
    Take a GAS DR1 parameter maps and return optical depth of the 1-1 line.
    Parameters
    ----------
    sigv_para : str or ndarray
        The GAS DR1 parameter cube (i.e., maps), either as a file name or as a 3D ndarray cube.
    Returns
    -------
    tau11 : ndarray
        A map of model optical depths for ammonia (1-1)
    '''

    # Note: the efficiency could benifit from multi-core processing

    if type(singv_para) == str:
        parcube = fits.getdata(singv_para)

    else:
        parcube = singv_para

    # Create a dummy spectral-axis in km/s as a place holder to acquire tau
    xarr = np.linspace(0.0, 10.0, 10, endpoint=True)
    xarr = SpectroscopicAxis(xarr * u.km / u.s,
                             velocity_convention='radio',
                             refX=freq_dict['oneone'] * u.Hz).as_unit(u.GHz)

    # set ntot elements with zero values to NaN
    parcube[:, parcube[2] == 0.0] = np.nan

    yy, xx = np.indices(parcube.shape[1:])
    nanvals = np.any(~np.isfinite(parcube), axis=0)
    isvalid = np.any(parcube, axis=0) & ~nanvals
    valid_pixels = zip(xx[isvalid], yy[isvalid])

    tau11 = np.zeros(parcube.shape[1:])

    def model_a_pixel(xy):
        x, y = int(xy[0]), int(xy[1])
        kwargs = {
            'tkin': parcube[0, y, x],
            'tex': parcube[1, y, x],
            'ntot': parcube[2, y, x],
            'width': parcube[3, y, x],
            'xoff_v': parcube[4, y, x],
            'fortho': parcube[5, y, x],
            'return_tau': True,
        }
        tau = ammonia.cold_ammonia(xarr, **kwargs)
        tau11[y, x] = tau['oneone']

    for xy in ProgressBar(list(valid_pixels)):
        model_a_pixel(xy)

    return tau11
Exemple #17
0
def cache_light_curves():
    """
    Run this after running `choose_targets.ipynb` in order to cache light curves
    into a local HDF5 archive.

    Examples
    --------
    >>> from salter import cache_light_curves; cache_light_curves()
    """
    if os.path.exists(light_curves_path):
        raise ValueError('Light curves file already exists, at {0}'.format(
            light_curves_path))

    if not os.path.exists(kic_numbers_path):
        raise ValueError("You must first run the `choose_targets.ipynb` "
                         "notebook before running `cache_light_curves`")

    kics = ascii.read(kic_numbers_path, format='no_header')['col1']

    client = kplr.API()

    # Create archive
    f = h5py.File(light_curves_path, 'w')

    with ProgressBar(len(kics)) as bar:
        for kic in kics:
            if str(kic) not in f.keys():
                # Find a KIC
                star = client.star(kic)

                # Download the lightcurves for this KOI.
                lightcurves = star.get_light_curves(short_cadence=False)

                # Loop over the datasets and read in the data.
                time, flux, ferr, quality, quarter = [], [], [], [], []

                for i, lc in enumerate(lightcurves):
                    with lc.open() as lc_file:
                        # The lightcurve data are in the first FITS HDU.
                        hdu_data = lc_file[1].data

                        time.append(hdu_data["time"])
                        flux.append(hdu_data["sap_flux"])
                        ferr.append(hdu_data["sap_flux_err"])
                        quality.append(hdu_data["sap_quality"])
                        quarter.append(i * np.ones_like(hdu_data["time"]))

                data = np.vstack(
                    list(
                        map(np.concatenate,
                            [time, flux, ferr, quality, quarter]))).T
                f.create_dataset(str(kic), data=data)
                f.flush()
                bar.update()
    f.close()
Exemple #18
0
 def resolve(self):
     # Ensure the output dir exists
     destination_dir = os.path.join(self.cfg['vphas']['resolved_cat_dir'],
                                    self.name)
     if not os.path.exists(destination_dir):
         os.makedirs(destination_dir)
     # Start resolving, with fields ordered by seeing
     order = np.argsort(self.catalogset.table['psffwhm_i'])
     log.info('Resolving duplicate detections.')
     for idx in ProgressBar(order):
         self._resolve_one(idx)
Exemple #19
0
    def scatter_energies(self, events, prng=np.random):
        """
        Scatter photon energies with the RMF and produce the 
        corresponding channel values.

        Parameters
        ----------
        events : dict of np.ndarrays
            The energies and positions of the photons. 
        prng : :class:`~numpy.random.RandomState` object or :mod:`~numpy.random`, optional
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is the :mod:`~numpy.random` module.
        """
        eidxs = np.argsort(events["energy"])
        sorted_e = events["energy"][eidxs]

        detectedChannels = []

        # run through all photon energies and find which bin they go in
        fcurr = 0
        last = sorted_e.shape[0]

        with ProgressBar(last) as pbar:
            for (k, low), high in zip(enumerate(self.elo), self.ehi):
                # weight function for probabilities from RMF
                weights = np.nan_to_num(np.float64(self.data["MATRIX"][k]))
                weights /= weights.sum()
                # build channel number list associated to array value,
                # there are groups of channels in rmfs with nonzero probabilities
                trueChannel = []
                f_chan = ensure_numpy_array(
                    np.nan_to_num(self.data["F_CHAN"][k]))
                n_chan = ensure_numpy_array(
                    np.nan_to_num(self.data["N_CHAN"][k]))
                for start, nchan in zip(f_chan, n_chan):
                    if nchan == 0:
                        trueChannel.append(start)
                    else:
                        trueChannel += list(range(start, start + nchan))
                trueChannel = np.array(trueChannel)
                if len(trueChannel) > 0:
                    e = sorted_e[fcurr:last]
                    nn = np.logical_and(low <= e, e < high).sum()
                    channelInd = prng.choice(len(weights), size=nn, p=weights)
                    detectedChannels.append(trueChannel[channelInd])
                    fcurr += nn
                    pbar.update(fcurr)

        for key in events:
            events[key] = events[key][eidxs]
        events[self.header["CHANTYPE"]] = np.concatenate(detectedChannels)

        return events
Exemple #20
0
 def configure_loop_simulations(self, interface, **kwargs):
     """
     Configure hydrodynamic simulations for each loop object
     """
     self.simulation_type = interface.name
     with ProgressBar(len(self.loops),
                      ipython_widget=kwargs.get('notebook',
                                                True)) as progress:
         for loop in self.loops:
             interface.configure_input(loop)
             progress.update()
def spectral_regrid(cube, outgrid):
    """
    Spectrally regrid a cube onto a new spectral output grid

    (this is apparently redundant with regrid_cube_hdu)
    """

    assert isinstance(cube, SpectralCube)

    inaxis = cube.spectral_axis.to(outgrid.unit)

    indiff = np.mean(np.diff(inaxis))
    outdiff = np.mean(np.diff(outgrid))
    if outdiff < 0:
        outgrid = outgrid[::-1]
        outdiff = np.mean(np.diff(outgrid))
    if indiff < 0:
        cubedata = cube.filled_data[::-1]
        inaxis = cube.spectral_axis.to(outgrid.unit)[::-1]
        indiff = np.mean(np.diff(inaxis))
    else:
        cubedata = cube.filled_data[:]
    if indiff < 0 or outdiff < 0:
        raise ValueError("impossible.")

    assert np.all(np.diff(outgrid) > 0)
    assert np.all(np.diff(inaxis) > 0)

    np.testing.assert_allclose(np.diff(outgrid),
                               outdiff,
                               err_msg="Output grid must be linear")

    if outdiff > 2 * indiff:
        raise ValueError("Input grid has too small a spacing.  It needs to be "
                         "smoothed prior to resampling.")

    newcube = np.empty([outgrid.size, cube.shape[1], cube.shape[2]])

    yy, xx = np.indices(cube.shape[1:])

    pb = ProgressBar(xx.size)
    for ix, iy in (zip(xx.flat, yy.flat)):
        newcube[:, iy, ix] = np.interp(outgrid.value, inaxis.value,
                                       cubedata[:, iy, ix].value)
        pb.update()

    newheader = cube.header
    newheader['CRPIX3'] = 1
    newheader['CRVAL3'] = outgrid[0].value
    newheader['CDELT3'] = outdiff.value
    newheader['CUNIT3'] = outgrid.unit.to_string('FITS')

    return fits.PrimaryHDU(data=newcube, header=newheader)
Exemple #22
0
 def evolve_system(self, system, total_time, chunksize=int(1e3), chunks=1000, quiet=False, callback=None):
     """Evolve over many iterations with a given total time."""
     log.info("Evolving {}".format(system))
     
     self.read_packet(system.create_packet())
     end_time = self.Time + system.nondimensionalize(total_time).value
     log.debug("Total evolution time: {}".format(end_time))
     
     file = None if not quiet else io.StringIO()
     callback = callback if callback is not None else lambda i,p : system.read_packet(p)
     with ProgressBar(chunks, file=file) as pbar:
         iters = self.evolve_cb(end_time, chunksize=chunksize, chunks=chunks, callback=callback_progressbar_wrapper(callback, pbar))
     return iters
Exemple #23
0
def mass_photometry(fname, outfile):
    filenames = glob('/lustre/aoc/students/bmcclell/w51/' + fname)

    objs = []
    print('Loading files')
    pb = ProgressBar(len(filenames))
    for f in filenames:
        rs = dendrocat.RadioSource(fits.open(f))
        objs.append(rs)
        pb.update()

    #n = np.shape(objs[0].data)[0]
    #center = regions.PixCoord(n/2, n/2)
    #reg = regions.CirclePixelRegion(center, 3200)
    #mask = reg.to_mask()
    #img = mask.to_image((n, n)).astype(bool)
    #objs[0].data = np.where(img==True, objs[0].data, np.nan)

    # Debugging
    #plt.figure()
    #plt.imshow(objs[0].data)
    #plt.show()
    #objs[0].threshold = 4.5
    #objs[0].min_value = 1.1e-4
    #objs[0].min_delta = 1.2*objs[0].min_value
    #objs[0].to_catalog()
    #objs[0].autoreject()
    #objs[0].reject([44001, 44032])
    #dendrocat.utils.save_regions(objs[0].catalog, '/users/bmcclell/nrao/reg/test_mass_photometry.reg')
    #print('Autorejection complete')

    t = Table.read('/users/bmcclell/nrao/cat/w51IRS2_photometered.dat',
                   format='ascii')
    #for col in t.colnames:
    #    if 'GHz' in col:
    #        t.remove_column(col)

    mc = dendrocat.MasterCatalog(*objs, catalog=t)
    print('\nMaster Catalog made')
    start = time.time()
    mc.photometer(dendrocat.ellipse)
    stop = time.time()
    print('Ellipse apertures photometered. Time: {} s'.format(stop - start))
    start = time.time()
    mc.photometer(dendrocat.annulus)
    stop = time.time()
    print('Annulus apertures photometered. Time: {} s'.format(stop - start))
    start = time.time()
    mc.catalog.write(outfile, format='ascii', overwrite=True)
    stop = time.time()
    print('Catalog written. Time: {} s'.format(stop - start))
Exemple #24
0
def build_hdf5_dbase(ascii_dbase_root, hdf5_dbase_root, files=None):
    """
    Assemble HDF5 file from raw ASCII CHIANTI database.

    Parameters
    ----------
    ascii_dbase_root : `str`
        Path to top of CHIANTI database tree
    hdf5_dbase_root : `str`
        Path to HDF5 file
    files : `list` or `dict`, optional
        A list of files to update in the HDF5 database. By default,
        this is all of the files in `ascii_dbase_root`. If a `dict`, the
        dictionary keys must contain filenames and the items corresponding
        expected md5 hash of the file. Builind the database will fail if any
        of the md5 hashes is not as expected.
    """
    if files is None:
        files = []
        tmp = get_masterlist(ascii_dbase_root)
        for k in tmp:
            files += tmp[k]
    with ProgressBar(len(files)) as progress:
        with h5py.File(hdf5_dbase_root, 'a') as hf:
            for f in files:
                parser = fiasco.io.Parser(f, ascii_dbase_root=ascii_dbase_root)
                if isinstance(files, dict):
                    expected = files[f]
                    actual = md5hash(parser.full_path)
                    if expected != actual:
                        raise RuntimeError(
                            f'Hash of {parser.full_path} ({actual}) did not match expected hash ({expected})'
                        )
                try:
                    df = parser.parse()
                except MissingASCIIFileError as e:
                    # FIXME: use the logger here
                    warnings.warn(
                        f'{e}. Not including {f} in {hdf5_dbase_root}')
                else:
                    parser.to_hdf5(hf, df)
                progress.update()
            # Build an index for quick lookup of all ions in database
            from fiasco import list_ions  # import here to avoid circular imports
            # Delete it if it already exists to ensure the index is rebuilt
            if 'ion_index' in hf:
                del hf['ion_index']
            ion_list = list_ions(hdf5_dbase_root)
            ds = hf.create_dataset('ion_index',
                                   data=np.array(ion_list).astype(np.string_))
            ds.attrs['unit'] = 'SKIP'
Exemple #25
0
def create_archive():
    paths = sorted(glob('tmp/??.fits'))

    image_shape = fits.getdata(paths[0]).shape

    f = h5py.File('tmp/archive.hdf5', 'w')

    if 'images' not in f:
        dset = f.create_dataset("images",
                                shape=(image_shape[0], image_shape[1],
                                       len(paths)))
    else:
        dset = f['images']

    brightest_star_coords_init = np.array([2, 2])

    master_flat_path = 'tmp/masterflat.fits'
    master_dark_path = 'tmp/masterdark.fits'

    flat = fits.getdata(master_flat_path)
    dark = fits.getdata(master_dark_path)

    from skimage.feature import peak_local_max

    mid = image_shape[0] // 2

    times = []
    airmass = []
    with ProgressBar(len(paths)) as bar:
        for i, path in enumerate(paths):

            raw_image = fits.getdata(path) / flat
            times.append(fits.getheader(path)['JD'])
            airmass.append(fits.getheader(path)['AIRMASS'])

            coordinates = peak_local_max(raw_image,
                                         min_distance=5,
                                         num_peaks=1,
                                         exclude_border=0)
            y_mean = int(coordinates[:, 1].mean())
            x_mean = int(coordinates[:, 0].mean())

            firstroll = np.roll(raw_image, mid - y_mean, axis=1)
            rolled_image = np.roll(firstroll, mid - x_mean, axis=0)

            dset[:, :, i] = rolled_image

            bar.update()
    np.savetxt('tmp/times.txt', times)
    np.savetxt('tmp/airmass.txt', airmass)
    f.close()
Exemple #26
0
 def flatten_serial(self, loops, interpolated_loop_coordinates, hf, emission_model=None):
     if emission_model is None:
         raise ValueError('Emission model is required')
     with ProgressBar(len(self.channels)*len(loops),ipython_widget=True) as progress:
         for channel in self.channels:
             start_index = 0
             dset = hf[channel['name']]
             flattened_emissivities = self.flatten_emissivities(channel, emission_model)
             for loop, interp_s in zip(loops, interpolated_loop_coordinates):
                 c = self.calculate_counts(channel, loop, emission_model, flattened_emissivities)
                 y = self.interpolate_and_store(c, loop, interp_s)
                 self.commit(y, dset, start_index)
                 start_index += interp_s.shape[0]
                 progress.update()
Exemple #27
0
 def movie(self, filename, system, queue=None, progress=True, save_kwargs=dict(), **kwargs):
     """Write a movie."""
     import matplotlib
     import matplotlib.pyplot as plt
     
     # Disable LaTeX while animating!
     matplotlib.rcParams['text.usetex'] = False
     
     out = None if progress else io.StringIO()
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         with ProgressBar(system.engine.iterations, file=out) as PBar:
             anim = self.get_animation(system, queue, PBar, buffer=0, **kwargs)
             anim.save(filename, **save_kwargs)
Exemple #28
0
def main():
    parser = argparse.ArgumentParser(description='Make finding charts')
    parser.add_argument('table', type=str, help='Input table')
    parser.add_argument('-pdf',
                        type=str,
                        help='If specified, output to directory')
    parser.add_argument(
        '-fmt',
        type=str,
        default='ascii.tab',
        help='Specify table format for astropy (default = ascii.tab')
    parser.add_argument('-survey',
                        type=str,
                        default='2MASS-J',
                        help='Pull images from this survey (default = 2MASS-J')
    parser.add_argument('-radius',
                        type=float,
                        default=120,
                        help='FOV radius in arcsec (default = 120)')

    args = parser.parse_args()

    table = Table.read(args.table, format=args.fmt)

    if args.pdf:
        pp = PdfPages(args.pdf, keep_empty=False)
    else:
        pp = None

    for row in ProgressBar(table):
        coord = SkyCoord(ra=row['ra'],
                         dec=row['dec'],
                         unit=(u.hourangle, u.deg))
        if row['ID'] in ['S72', 'S52', 'S49', 'S09']:
            log = True
        else:
            log = False
        fc = FindingChart(coord,
                          name=row['ID'],
                          survey=args.survey,
                          radius=args.radius,
                          log=log)
        if pp:
            fc.save(pp)
        else:
            fc.show()

    if pp:
        print 'Writing finding charts to %s' % args.pdf
        pp.close()
Exemple #29
0
 def animate(self, system, queue=None, progress=True, **kwargs):
     """Animation."""
     import matplotlib
     import matplotlib.pyplot as plt
     
     # Disable LaTeX while animating!
     matplotlib.rcParams['text.usetex'] = False
     
     out = None if progress else io.StringIO()
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         with ProgressBar(system.engine.iterations, file=out) as progressbar:
             anim = self.get_animation(system, queue, progressbar, **kwargs)
             plt.show()
def make_spw_cube(spw='spw0'):

    # First set up an empty file
    if not os.path.exists('SgrB2_12m_{0}_lines.fits'.format(spw)):
        header = fits.getheader(
            'calibrated.ms.line.{0}.channels0to384.image.fits'.format(spw))
        # Make an arbitrary, small data before prepping the header
        data = np.zeros((100, 100), dtype=np.float32)
        hdu = fits.PrimaryHDU(data=data, header=header)
        # Set the appropriate output size (this can be extracted from the LISTOBS)
        header['NAXIS3'] = 7680
        # Write to disk
        header.tofile('SgrB2_12m_{0}_lines.fits'.format(spw))
        # Using the 'append' io method, update the *header*
        with open('SgrB2_12m_{0}_lines.fits'.format(spw), 'rb+') as fobj:
            # Seek past the length of the header, plus the length of the
            # data we want to write.
            # The -1 is to account for the final byte that we are about to
            # write:
            # 'seek' works on bytes, so divide #bits / (bytes/bit)
            fobj.seek(
                len(header.tostring()) +
                (header['NAXIS1'] * header['NAXIS2'] * header['NAXIS3'] *
                 np.abs(header['BITPIX']) / 8) - 1)
            fobj.write('\0')

    # Find the appropriate files (this is NOT a good way to do this!  Better to
    # provide a list.  But wildcards are quick & easy...
    files = glob.glob("*{0}.chan*fits".format(spw))
    log.info(str(files))

    # Extract the appropriate pixel indices from the file name.
    # A more sophisticated approach is probably better, in which the individual
    # cubes are inspected for their start/end frequencies.
    # But, on the other hand, for this process to make any sense at all, you
    # have to have done the original cube imaging right
    def getinds(fn):
        inds = re.search('channels([0-9]*)to([0-9]*)', fn).groups()
        return [int(ii) for ii in inds]

    # open the file in update mode (it should have the right dims now)
    hdul = fits.open('SgrB2_12m_{0}_lines.fits'.format(spw), mode='update')
    for fn in ProgressBar(files):
        log.info("{0} {1}".format(getinds(fn), fn))
        ind0, ind1 = getinds(fn)
        plane = hdul[0].data[ind0]
        if np.all(plane == 0):
            log.info("Replacing indices {0} {1}".format(getinds(fn), fn))
            hdul[0].data[ind0:ind1, :, :] = fits.getdata(fn)
            hdul.flush()