Exemplo n.º 1
0
def download_hitran(m, i, numin, numax):
    """
    Download HITRAN data for a particular molecule. Based on fetch function from
    hapi.py.

    Parameters
    ----------
    m : int
        HITRAN molecule number
    i : int
        HITRAN isotopologue number
    numin : real
        lower wavenumber bound
    numax : real
        upper wavenumber bound
    """
    iso_id = str(ISO[(m, i)][ISO_INDEX["id"]])
    mol_name = ISO[(m, i)][ISO_INDEX["mol_name"]]
    filename = os.path.join(cache_location, "{0}.data".format(mol_name))
    CHUNK = 64 * 1024
    data = dict(iso_ids_list=iso_id, numin=numin, numax=numax)
    with open(filename, "w") as fp:
        response = commons.send_request(HITRAN_URL, data, 10, request_type="GET")
        if "Content-Length" in response.headers:
            total_length = response.headers.get("Content-Length")
            pb = ProgressBar(int(total_length))
        for chunk in response.iter_content(chunk_size=CHUNK):
            fp.write(chunk.decode("utf-8"))
            try:
                pb.update(CHUNK)
            except NameError:
                pass
Exemplo n.º 2
0
    def init_progressbar(self):
        """
        Initialise the progress bar.

        This only happens if run command is called with ``progressbar=True``.
        """
        self.progressbar = ProgressBar(self.command_count())
Exemplo n.º 3
0
    def _download_file(self, url, local_filepath, timeout=None, auth=None):
        """
        Download a file.  Resembles `astropy.utils.data.download_file` but uses
        the local ``_session``
        """
        response = self._session.get(url, timeout=timeout, stream=True,
                                      auth=auth)
        if 'content-length' in response.headers:
            length = int(response.headers['content-length'])
        else:
            length = 1

        pb = ProgressBar(length)

        blocksize = astropy.utils.data.conf.download_block_size

        bytes_read = 0

        with open(local_filepath, 'wb') as f:
            for block in response.iter_content(blocksize):
                f.write(block)
                bytes_read += blocksize
                pb.update(bytes_read if bytes_read <= length else length)

        response.close()
    def ratio_to_dens_slow(ratio, c11, c22):
        """
        Shape:
            ratio [z,y,x]
            c11 [y,x]
            c22 [y,x]
        """

        assert c11.size == c22.size == ratio[0,:,:].size

        fshape = [ratio.shape[0], ratio.shape[1]*ratio.shape[2]]
        rrs = ratio.reshape(fshape).T

        outc = (ratio*0).reshape(fshape) + np.nan

        # set up a grid...

        pb = ProgressBar((c11.size))
        for ii,(r,c1,c2) in enumerate(zip(rrs, c11.flat, c22.flat)):
            #print r.shape,c1,c2
            if np.isfinite(c1) and np.isfinite(c2) and np.any(np.isfinite(r)):
                tauratio, ok = get_tau_ratio(c1,c2)

                inds = np.argsort(tauratio[ok])
                outc[:,ii] = np.interp(r, tauratio[ok][inds], dens[ok][inds], np.nan, np.nan)
            pb.update()
        #pb.finish()

        return outc.reshape(ratio.shape)
Exemplo n.º 5
0
    def compute_transform(self, show_progress=True, scale_normalization=True,
                          use_pyfftw=False, threads=1, pyfftw_kwargs={}):
        '''
        Compute the wavelet transform at each scale.

        Parameters
        ----------
        show_progress : bool, optional
            Show a progress bar during the creation of the covariance matrix.
        scale_normalization: bool, optional
            Compute the transform with the correct scale-invariant
            normalization.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        pyfftw_kwargs : Passed to
            See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
            for a list of accepted kwargs.
        '''

        if use_pyfftw:
            if PYFFTW_FLAG:
                use_fftn = fftn
                use_ifftn = ifftn
            else:
                warn("pyfftw not installed. Using numpy.fft functions.")
                use_fftn = np.fft.fftn
                use_ifftn = np.fft.ifftn
        else:
            use_fftn = np.fft.fftn
            use_ifftn = np.fft.ifftn

        n0, m0 = self.data.shape
        A = len(self.scales)

        self._Wf = np.zeros((A, n0, m0), dtype=np.float)

        factor = 2
        if not scale_normalization:
            factor = 4
            Warning("Transform values are only reliable with the proper scale"
                    " normalization. When disabled, the slope of the transform"
                    " CANNOT be used for physical interpretation.")

        pix_scales = self._to_pixel(self.scales).value

        if show_progress:
            bar = ProgressBar(len(pix_scales))

        for i, an in enumerate(pix_scales):
            psi = MexicanHat2DKernel(an)

            self._Wf[i] = \
                convolve_fft(self.data, psi, normalize_kernel=False,
                             fftn=use_fftn, ifftn=use_ifftn).real * \
                an**factor

            if show_progress:
                bar.update(i + 1)
Exemplo n.º 6
0
    def simulate(self, max_its=np.inf, max_time=np.inf * u.s):
        """Simulates the plasma as set up, either for the given number of
        iterations or until the simulation reaches the given time.
        Parameters
        ----------
        max_its : int
            Tells the simulation to run for a set number of iterations.
        max_time : astropy.units.Quantity
            Maximum total (in-simulation) time to allow the simulation to run.
            Must have units of time.
        Examples
        --------
        # >>> # Run a simulation for exactly one thousand iterations.
        # >>> myplasma.simulate(max_time=1000)
        # >>> # Run a simulation for up to half an hour of simulation time.
        # >>> myplasma.simulate(max_time=30*u.minute)
        """
        if np.isinf(max_its) and np.isinf(max_time.value):
            raise ValueError("Either max_time or max_its must be set.")

        physics = self.simulation_physics
        dt = physics.dt

        if np.isinf(max_time):
            pb = ProgressBar(max_its)
        else:
            pb = ProgressBar(int(max_time / dt))

        with pb as bar:
            while (physics.current_iteration < max_its
                   and physics.current_time < max_time):
                physics.time_stepper()
                bar.update()
Exemplo n.º 7
0
def extract_poly_slice(cube, polygons, width=1.0):

    nx = len(polygons)
    nz = cube.shape[0]

    slice = np.zeros((nz, nx))

    p = ProgressBar(len(polygons))

    for i, polygon in enumerate(polygons):

        p.update()

        # Find bounding box
        bbxmin = int(round(np.min(polygon.x))-1)
        bbxmax = int(round(np.max(polygon.x))+2)
        bbymin = int(round(np.min(polygon.y))-1)
        bbymax = int(round(np.max(polygon.y))+2)

        # Loop through pixels that might overlap
        for xmin in np.arange(bbxmin, bbxmax):
            for ymin in np.arange(bbymin, bbymax):

                area = square_polygon_overlap_area(xmin-0.5, xmin+0.5,
                                                   ymin-0.5, ymin+0.5,
                                                   polygon.x, polygon.y)

                if area > 0:
                    slice[:, i] += cube[:, ymin, xmin] * area

    print("")

    return slice
Exemplo n.º 8
0
def flat_cubes(date, lbda_min=7000, lbda_max=9000, ref="dome"):
    """ """
    baseroot = io.CUBE_PROD_ROOTS["cube"]["root"]
    newroot = io.CUBE_PROD_ROOTS["flat"]["root"]

    # -------------- #
    # The Reference  #
    # -------------- #
    reffile = io.get_night_cubes(date, kind="cube", target=ref)

    if len(reffile) == 0:
        raise ValueError("No cube reference for target %s in night %s" %
                         (ref, date))

    refcube = get_sedmcube(reffile[0])
    flatfied = refcube.get_slice(lbda_min, lbda_max, usemean=True)
    print(flatfied.mean())

    # ----------------- #
    # Build flat cubes  #
    # ----------------- #
    def build_flat_cube(cubefile):
        cube_ = get_sedmcube(cubefile)
        print(cubefile)
        cube_.scale_by(flatfied)
        cube_.writeto(cube_.filename.replace(baseroot, newroot))

    from astropy.utils.console import ProgressBar
    cubefiles = io.get_night_cubes(date, kind="cube")
    print(cubefiles)
    ProgressBar.map(build_flat_cube, cubefiles)
Exemplo n.º 9
0
def create_modelcube(self, njobs=1, verbose=True):
    """
    Generates a "clean" datacube from the scousepy decomposition. Returns a
    clean cube

    Parameters
    ----------
    self : instance of the scousepy class
    njobs : Number
        number of cpus
    verbose: bool
        verbose output

    """

    # Time it
    starttime = time.time()

    cube = self.cube
    x = np.array(cube.world[:, 0, 0][0])
    if (self.ppv_vol[0] is not None) & (self.ppv_vol[1] is not None):
        trimids = np.where((x > self.ppv_vol[0]) & (x < self.ppv_vol[1]))[0]

    _cube = cube[min(trimids):max(trimids) + 1, :, :]
    _modelcube = np.full_like(_cube, np.nan)

    if verbose:
        print("")
        print("Generating models:")
        print("")

    args = [self]
    inputs = [[key] + args for key in self.indiv_dict.keys()]
    if njobs == 1:
        mods = ProgressBar.map(genmodel, inputs)
    else:
        mods = parallel_map(genmodel, inputs, numcores=njobs)
    mergedmods = [mod for mod in mods]
    mergedmods = np.asarray(mergedmods)

    if verbose:
        print("")
        print("Creating model cube:")
        print("")
        progress_bar = ProgressBar(self.indiv_dict.keys())

    for i, key in enumerate(self.indiv_dict.keys()):
        _modelcube[:, self.indiv_dict[key].coordinates[0],
                   self.indiv_dict[key].coordinates[1]] = mergedmods[i]
        if verbose:
            progress_bar.update()

    endtime = time.time()
    if verbose:
        print("")
        print('Process completed in: {0} minutes'.format(
            (endtime - starttime) / 60.))
        print("")

    return SpectralCube(data=_modelcube, wcs=_cube.wcs)
Exemplo n.º 10
0
def download_hitran(m, i, numin, numax):
    """
    Download HITRAN data for a particular molecule. Based on fetch function from
    hapi.py.

    Parameters
    ----------
    m : int
        HITRAN molecule number
    i : int
        HITRAN isotopologue number
    numin : real
        lower wavenumber bound
    numax : real
        upper wavenumber bound
    """
    iso_id = str(ISO[(m, i)][ISO_INDEX['id']])
    mol_name = ISO[(m, i)][ISO_INDEX['mol_name']]
    filename = os.path.join(cache_location, '{0}.data'.format(mol_name))
    CHUNK = 64 * 1024
    data = dict(iso_ids_list=iso_id, numin=numin, numax=numax)
    with open(filename, 'w') as fp:
        response = commons.send_request(HITRAN_URL,
                                        data,
                                        10,
                                        request_type='GET')
        if 'Content-Length' in response.headers:
            total_length = response.headers.get('Content-Length')
            pb = ProgressBar(int(total_length))
        for chunk in response.iter_content(chunk_size=CHUNK):
            fp.write(chunk.decode('utf-8'))
            try:
                pb.update(CHUNK)
            except NameError:
                pass
Exemplo n.º 11
0
def generate_2d_parametermap(scouseobject, spectrum_parameter, verbose=False):
    """
    Create a 2D map of a given spectral parameter

    Parameters
    ----------
    scouseobject : Instance of the scousepy class

    """
    blankmap = np.zeros(scouseobject.cube.shape[1:])
    blankmap[:] = np.nan

    if verbose:
        print("")
        progress_bar = ProgressBar(len(scouseobject.indiv_dict.items()))

    for ind, spec in scouseobject.indiv_dict.items():
        cy, cx = spec.coordinates
        if getattr(spec.model, 'ncomps') != 0:
            blankmap[cy, cx] = getattr(spec.model, spectrum_parameter)
        else:
            blankmap[cy, cx] = np.nan
        if verbose:
            progress_bar.update()

    return blankmap
Exemplo n.º 12
0
def surface_solution_numerical():  # inward extension of J=-J_p at r=R.
    global n, Jp, kx, L, delta, radius, s, sigmai, phix_full
    global Js, Jscheck, Hs

    Js = -Jp
    amps = np.zeros(n, dtype=np.cdouble)
    Jprefac = np.sqrt(6.0) / (16.0 * np.pi**3) * kx**2 * L / delta
    amps = -np.pi * Jprefac / (kx * radius) * np.exp(-kx * radius * np.abs(s) -
                                                     1j * s * sigmai)
    Jscheck = np.zeros(n, dtype=np.cdouble)
    Hs = np.zeros(n, dtype=np.cdouble)
    print('Solving surface solution...\n')
    pb = ProgressBar(n * n)
    for i in range(n):
        for j in range(n):
            z = np.abs(kx * radius * s[j])
            i0 = spherical_in(0, z, derivative=False)
            di0 = spherical_in(0, z, derivative=True)
            rat = di0 / i0
            Hs[i] = Hs[i] + (ds / 2.0 / np.pi) * np.exp(
                1j * s[j] * sigma[i]) * amps[j] * (-np.abs(s[j]) * rat / 3.0 /
                                                   phix_full[i])
            Jscheck[i] = Jscheck[i] + (ds / 2.0 / np.pi) * np.exp(
                1j * s[j] * sigma[i]) * amps[j]
            pb.update()
def fit_ch3cn_lines(spectra, save_prefix, velo=56*u.km/u.s, ampguess=-0.01):
    all_ch3cn = table.vstack([ch3cn, ch3cn_v])

    all_ch3cn.add_column(table.Column(name='FittedAmplitude', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedCenter', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedWidth', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedAmplitudeError', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedCenterError', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedWidthError', data=np.zeros(len(all_ch3cn))))

    vkms = velo.to(u.km/u.s).value
    pl.figure(1).clf()
    ax = pl.gca()

    pb = ProgressBar(len(spectra) * len(all_ch3cn))

    ii = 0
    for sp in spectra:
        sp.xarr.convert_to_unit(u.GHz)
        mid = np.median(sp.data)
        for line in all_ch3cn:
            frq = line['Freq-GHz']*u.GHz
            if sp.xarr.in_range(frq*(1-velo/constants.c)):
                offset = ii*0.000 + mid
                ii += 1
                sp.xarr.convert_to_unit(u.km/u.s, refX=frq)
                sp.plotter(axis=ax, clear=False, offset=offset)
                sp.specfit(fittype='vheightgaussian',
                           guesses=[mid, ampguess, vkms, 2],)
                line['FittedAmplitude'] = sp.specfit.parinfo['AMPLITUDE0'].value
                line['FittedCenter'] = sp.specfit.parinfo['SHIFT0'].value
                line['FittedWidth'] = sp.specfit.parinfo['WIDTH0'].value
                line['FittedAmplitudeError'] = sp.specfit.parinfo['AMPLITUDE0'].error
                line['FittedCenterError'] = sp.specfit.parinfo['SHIFT0'].error
                line['FittedWidthError'] = sp.specfit.parinfo['WIDTH0'].error
                sp.xarr.convert_to_unit(u.GHz)
            pb.update()

    pl.xlim(vkms-14, vkms+14)
    #pl.ylim(0, offset)
    pl.draw()
    pl.show()
    pl.savefig(save_prefix+"_spectra_overlay.png")

    pl.figure(2).clf()
    pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedWidth'], 'o')
    pl.xlabel("E$_U$ (K)")
    pl.ylabel("$\sigma$ (km/s)")
    pl.ylim(0,3.5)
    pl.savefig(save_prefix+"_sigma_vs_eupper.png")


    pl.figure(3).clf()
    pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedCenter'], 'o')
    pl.xlabel("E$_U$ (K)")
    pl.ylabel("$v_{lsr}$ (km/s)")
    pl.ylim(vkms-3, vkms+3)
    pl.savefig(save_prefix+"_vcen_vs_eupper.png")
Exemplo n.º 14
0
 def _map(self, func, items):
     # FIXME: ProgressBar.map(..., multiprocess=True) uses imap_unordered,
     # but we want the result to come back in order. This should be fixed,
     # or at least correctly documented, in Astropy.
     if self.multiprocess:
         _, result = zip(*sorted(ProgressBar.map(_mapfunc(func),
                                                 list(enumerate(items)),
                                                 multiprocess=True)))
         return list(result)
     else:
         return ProgressBar.map(func, items, multiprocess=False)
def spectral_regrid(cube, outgrid):
    """
    Spectrally regrid a cube onto a new spectral output grid

    (this is apparently redundant with regrid_cube_hdu)
    """

    assert isinstance(cube, SpectralCube)

    inaxis = cube.spectral_axis.to(outgrid.unit)

    indiff = np.mean(np.diff(inaxis))
    outdiff = np.mean(np.diff(outgrid))
    if outdiff < 0:
        outgrid = outgrid[::-1]
        outdiff = np.mean(np.diff(outgrid))
    if indiff < 0:
        cubedata = cube.filled_data[::-1]
        inaxis = cube.spectral_axis.to(outgrid.unit)[::-1]
        indiff = np.mean(np.diff(inaxis))
    else:
        cubedata = cube.filled_data[:]
    if indiff < 0 or outdiff < 0:
        raise ValueError("impossible.")

    assert np.all(np.diff(outgrid) > 0)
    assert np.all(np.diff(inaxis) > 0)

    np.testing.assert_allclose(np.diff(outgrid),
                               outdiff,
                               err_msg="Output grid must be linear")

    if outdiff > 2 * indiff:
        raise ValueError("Input grid has too small a spacing.  It needs to be "
                         "smoothed prior to resampling.")

    newcube = np.empty([outgrid.size, cube.shape[1], cube.shape[2]])

    yy, xx = np.indices(cube.shape[1:])

    pb = ProgressBar(xx.size)
    for ix, iy in (zip(xx.flat, yy.flat)):
        newcube[:, iy, ix] = np.interp(outgrid.value, inaxis.value,
                                       cubedata[:, iy, ix].value)
        pb.update()

    newheader = cube.header
    newheader['CRPIX3'] = 1
    newheader['CRVAL3'] = outgrid[0].value
    newheader['CDELT3'] = outdiff.value
    newheader['CUNIT3'] = outgrid.unit.to_string('FITS')

    return fits.PrimaryHDU(data=newcube, header=newheader)
def spectral_regrid(cube, outgrid):
    """
    Spectrally regrid a cube onto a new spectral output grid

    (this is apparently redundant with regrid_cube_hdu)
    """

    assert isinstance(cube, SpectralCube)

    inaxis = cube.spectral_axis.to(outgrid.unit)

    indiff = np.mean(np.diff(inaxis))
    outdiff = np.mean(np.diff(outgrid))
    if outdiff < 0:
        outgrid=outgrid[::-1]
        outdiff = np.mean(np.diff(outgrid))
    if indiff < 0:
        cubedata = cube.filled_data[::-1]
        inaxis = cube.spectral_axis.to(outgrid.unit)[::-1]
        indiff = np.mean(np.diff(inaxis))
    else:
        cubedata = cube.filled_data[:]
    if indiff < 0 or outdiff < 0:
        raise ValueError("impossible.")

    assert np.all(np.diff(outgrid) > 0)
    assert np.all(np.diff(inaxis) > 0)

    np.testing.assert_allclose(np.diff(outgrid), outdiff,
                               err_msg="Output grid must be linear")

    if outdiff > 2 * indiff:
        raise ValueError("Input grid has too small a spacing.  It needs to be "
                         "smoothed prior to resampling.")

    newcube = np.empty([outgrid.size, cube.shape[1], cube.shape[2]])

    yy,xx = np.indices(cube.shape[1:])

    pb = ProgressBar(xx.size)
    for ix, iy in (zip(xx.flat, yy.flat)):
        newcube[:,iy,ix] = np.interp(outgrid.value, inaxis.value,
                                     cubedata[:,iy,ix].value)
        pb.update()

    newheader = cube.header
    newheader['CRPIX3'] = 1
    newheader['CRVAL3'] = outgrid[0].value
    newheader['CDELT3'] = outdiff.value
    newheader['CUNIT3'] = outgrid.unit.to_string('FITS')

    return fits.PrimaryHDU(data=newcube, header=newheader)
Exemplo n.º 17
0
def mass_photometry(fname, outfile):
    filenames = glob('/lustre/aoc/students/bmcclell/w51/' + fname)

    objs = []
    print('Loading files')
    pb = ProgressBar(len(filenames))
    for f in filenames:
        rs = dendrocat.RadioSource(fits.open(f))
        objs.append(rs)
        pb.update()

    #n = np.shape(objs[0].data)[0]
    #center = regions.PixCoord(n/2, n/2)
    #reg = regions.CirclePixelRegion(center, 3200)
    #mask = reg.to_mask()
    #img = mask.to_image((n, n)).astype(bool)
    #objs[0].data = np.where(img==True, objs[0].data, np.nan)

    # Debugging
    #plt.figure()
    #plt.imshow(objs[0].data)
    #plt.show()
    #objs[0].threshold = 4.5
    #objs[0].min_value = 1.1e-4
    #objs[0].min_delta = 1.2*objs[0].min_value
    #objs[0].to_catalog()
    #objs[0].autoreject()
    #objs[0].reject([44001, 44032])
    #dendrocat.utils.save_regions(objs[0].catalog, '/users/bmcclell/nrao/reg/test_mass_photometry.reg')
    #print('Autorejection complete')

    t = Table.read('/users/bmcclell/nrao/cat/w51IRS2_photometered.dat',
                   format='ascii')
    #for col in t.colnames:
    #    if 'GHz' in col:
    #        t.remove_column(col)

    mc = dendrocat.MasterCatalog(*objs, catalog=t)
    print('\nMaster Catalog made')
    start = time.time()
    mc.photometer(dendrocat.ellipse)
    stop = time.time()
    print('Ellipse apertures photometered. Time: {} s'.format(stop - start))
    start = time.time()
    mc.photometer(dendrocat.annulus)
    stop = time.time()
    print('Annulus apertures photometered. Time: {} s'.format(stop - start))
    start = time.time()
    mc.catalog.write(outfile, format='ascii', overwrite=True)
    stop = time.time()
    print('Catalog written. Time: {} s'.format(stop - start))
Exemplo n.º 18
0
def download_file(url, outdir=rawpath):
    r = requests.get(url, verify=False, stream=True)
    _, cdisp = cgi.parse_header(r.headers['content-disposition'])
    outfilename = cdisp['filename']
    fullname = os.path.join(outdir, outfilename)

    pb = ProgressBar(int(r.headers['content-length']))

    with open(fullname, 'wb') as f:
        for chunk in r.iter_content(chunk_size=1024):
            f.write(chunk)
            f.flush()
            pb.update(pb._current_value + 1024)

    return fullname
Exemplo n.º 19
0
def download_file(url, outdir=rawpath):
    r = requests.get(url, verify=False, stream=True)
    _, cdisp = cgi.parse_header(r.headers['content-disposition'])
    outfilename = cdisp['filename']
    fullname = os.path.join(outdir, outfilename)

    pb = ProgressBar(int(r.headers['content-length']))

    with open(fullname, 'wb') as f:
        for chunk in r.iter_content(chunk_size=1024):
            f.write(chunk)
            f.flush()
            pb.update(pb._current_value + 1024)

    return fullname
Exemplo n.º 20
0
def get_progressbar(gen, notebook=False):
    """
    """
    from astropy.utils.console import ProgressBar

    if not notebook:
        gen = ProgressBar(gen)
    else:
        try:
            gen = ProgressBar(gen, ipython_widget=True)
        except ImportError as e:
            warnings.warn('ProgressBar in notebook not working. Is ipywidgets installed?')
            raise e

    return gen
def fit_all_tex(xaxis, cube, cubefrequencies, indices, degeneracies,
                ecube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy,xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count=0

    for ii,jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:,ii,jj])):
            tmap[ii,jj] = np.nan
        else:
            if replace_bad:
                uplims = nupper_of_kkms(replace_bad, cubefrequencies,
                                        einsteinAij[indices], degeneracies,).value
            else:
                uplims = None

            nuppers = nupper_of_kkms(cube[:,ii,jj], cubefrequencies,
                                     einsteinAij[indices], degeneracies,
                                    )
            if ecube is not None:
                nupper_error = nupper_of_kkms(ecube[:,ii,jj], cubefrequencies,
                                              einsteinAij[indices], degeneracies,).value
                uplims = 3 * nupper_error
                if replace_bad:
                    raise ValueError("replace_bad is ignored now...")
            else:
                nupper_error = None

            fit_result = fit_tex(xaxis, nuppers.value,
                                 errors=nupper_error,
                                 uplims=uplims)
            tmap[ii,jj] = fit_result[1].value
            Nmap[ii,jj] = fit_result[0].value
        pb.update(count)
        count+=1

    return tmap,Nmap
Exemplo n.º 22
0
def extract_poly_slice(cube, polygons):

    nx = len(polygons)
    nz = cube.shape[0]

    total_slice = np.zeros((nz, nx))
    total_area = np.zeros((nz, nx))

    p = ProgressBar(len(polygons))

    for i, polygon in enumerate(polygons):

        p.update()

        # Find bounding box
        bbxmin = int(round(np.min(polygon.x)) - 1)
        bbxmax = int(round(np.max(polygon.x)) + 2)
        bbymin = int(round(np.min(polygon.y)) - 1)
        bbymax = int(round(np.max(polygon.y)) + 2)

        # Clip to cube box
        bbxmin = max(bbxmin, 0)
        bbxmax = min(bbxmax, cube.shape[2])
        bbymin = max(bbymin, 0)
        bbymax = min(bbymax, cube.shape[1])

        # Loop through pixels that might overlap
        for xmin in np.arange(bbxmin, bbxmax):
            for ymin in np.arange(bbymin, bbymax):

                area = square_polygon_overlap_area(xmin - 0.5, xmin + 0.5,
                                                   ymin - 0.5, ymin + 0.5,
                                                   polygon.x, polygon.y)

                if area > 0:
                    dataslice = cube[:, ymin, xmin]
                    good_values = np.isfinite(dataslice)
                    if np.any(good_values):
                        total_slice[good_values,
                                    i] += dataslice[good_values] * area
                        total_area[good_values, i] += area

    total_slice[total_area == 0.] = np.nan
    total_slice[total_area > 0.] /= total_area[total_area > 0.]

    print("")

    return total_slice
def fit_all_tex(xaxis, cube, cubefrequencies, indices, degeneracies,
                ecube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy,xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count=0

    for ii,jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:,ii,jj])):
            tmap[ii,jj] = np.nan
        else:
            if replace_bad:
                uplims = nupper_of_kkms(replace_bad, cubefrequencies,
                                        einsteinAij[indices], degeneracies,).value
            else:
                uplims = None

            nuppers = nupper_of_kkms(cube[:,ii,jj], cubefrequencies,
                                     einsteinAij[indices], degeneracies,
                                    )
            if ecube is not None:
                nupper_error = nupper_of_kkms(ecube[:,ii,jj], cubefrequencies,
                                              einsteinAij[indices], degeneracies,).value
                uplims = 3 * nupper_error
                if replace_bad:
                    raise ValueError("replace_bad is ignored now...")
            else:
                nupper_error = None

            fit_result = fit_tex(xaxis, nuppers.value,
                                 errors=nupper_error,
                                 uplims=uplims)
            tmap[ii,jj] = fit_result[1].value
            Nmap[ii,jj] = fit_result[0].value
        pb.update(count)
        count+=1

    return tmap,Nmap
Exemplo n.º 24
0
def _convolve_model_dir_2(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_cube = SEDCube.read(os.path.join(model_dir, 'flux.fits'), order='nu')

    par_table = load_parameter_table(model_dir)

    if not np.all(par_table['MODEL_NAME'] == sed_cube.names):
        raise ValueError(
            "Model names in SED cube and parameter file do not match")

    log.info("{0} SEDs found in {1}".format(sed_cube.n_models, model_dir))

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(model_names=sed_cube.names,
                        apertures=sed_cube.apertures,
                        initialize_arrays=True) for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = [f.rebin(sed_cube.nu) for f in filters]

    # We do the unit conversion - if needed - at the last minute
    val_factor = sed_cube.val.unit.to(u.mJy)
    unc_factor = sed_cube.unc.unit.to(u.mJy)

    # Loop over apertures
    for i_ap in ProgressBar(range(sed_cube.n_ap)):

        sed_val = sed_cube.val[i_ap].transpose()
        sed_unc = sed_cube.val[i_ap].transpose()

        for i, f in enumerate(binned_filters):

            response = f.response.astype(sed_val.dtype)

            fluxes[i].flux[:, i_ap] = np.sum(sed_val * response,
                                             axis=1) * val_factor
            fluxes[i].error[:, i_ap] = np.sqrt(
                np.sum((sed_unc * response)**2, axis=1)) * unc_factor

    for i, f in enumerate(binned_filters):

        fluxes[i].central_wavelength = f.central_wavelength
        fluxes[i].apertures = sed_cube.apertures
        fluxes[i].model_names = sed_cube.names

        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Exemplo n.º 25
0
 def calculate_emissivity(self, savefile, **kwargs):
     """
     Calculate and store emissivity for every ion in the model
     """
     notebook = kwargs.get('notebook', True)
     self.emissivity_savefile = savefile
     with h5py.File(savefile, 'w') as hf:
         with ProgressBar(len(self._ion_list),
                          ipython_widget=notebook) as progress:
             for ion in self:
                 pop = ion.level_populations(self.density)
                 # NOTE: populations not available for every ion
                 if pop is None:
                     warnings.warn(
                         f'Cannot compute level populations for {ion.ion_name}'
                     )
                     continue
                 upper_level = ion.transitions.upper_level[~ion.transitions.
                                                           is_twophoton]
                 wavelength = ion.transitions.wavelength[~ion.transitions.
                                                         is_twophoton]
                 A = ion.transitions.A[~ion.transitions.is_twophoton]
                 i_upper = fiasco.util.vectorize_where(
                     ion._elvlc['level'], upper_level)
                 emissivity = pop[:, :, i_upper] * A * u.photon
                 emissivity = emissivity[:, :, np.argsort(wavelength)]
                 wavelength = np.sort(wavelength)
                 grp = hf.create_group(ion.ion_name)
                 ds = grp.create_dataset('wavelength',
                                         data=wavelength.value)
                 ds.attrs['units'] = wavelength.unit.to_string()
                 ds = grp.create_dataset('emissivity', data=emissivity.data)
                 ds.attrs['units'] = emissivity.unit.to_string()
                 progress.update()
Exemplo n.º 26
0
    def run(self):

        SERVER = os.environ["PY4SCI_SERVER"]
        USER = os.environ["PY4SCI_USER"]

        import getpass
        from ftplib import FTP
        from astropy.utils.console import ProgressBar

        ftp = FTP(SERVER)
        ftp.login(user=USER, passwd=getpass.getpass())

        ftp.cwd('/public_html/PY4SCI_WS_2013_14')

        for slides in ProgressBar.iterate(
                glob.glob('lectures/*.html') + ['lectures/custom.css'] +
                glob.glob('problems/data/*') + glob.glob('problems/*.html') +
                glob.glob('practice/data/*') + glob.glob('practice/*.html')):
            try:
                remote_size = ftp.size(slides)
            except:
                remote_size = None
            local_size = os.path.getsize(slides)
            if local_size != remote_size:
                ftp.storbinary('STOR ' + slides, open(slides, 'rb'))

        ftp.storbinary('STOR index.html', open('index.html', 'rb'))

        ftp.quit()
Exemplo n.º 27
0
    def run(self):

        SERVER = os.environ["PY4SCI_SERVER"]
        USER = os.environ["PY4SCI_USER"]

        import getpass
        from ftplib import FTP
        from astropy.utils.console import ProgressBar

        ftp = FTP(SERVER)
        ftp.login(user=USER, passwd=getpass.getpass())

        ftp.cwd('/public_html/PY4SCI_WS_2013_14')

        for slides in ProgressBar.iterate(glob.glob('lectures/data/*')
                                          + glob.glob('lectures/*.html')
                                          + ['lectures/custom.css']
                                          + glob.glob('problems/data/*')
                                          + glob.glob('problems/*.html')
                                          + glob.glob('practice/data/*')
                                          + glob.glob('practice/*.html')):
            try:
                remote_size = ftp.size(slides)
            except:
                remote_size = None
            local_size = os.path.getsize(slides)
            if local_size != remote_size:
                ftp.storbinary('STOR ' + slides, open(slides, 'rb'))

        ftp.storbinary('STOR index.html', open('index.html', 'rb'))

        ftp.quit()
Exemplo n.º 28
0
    def run(self):

        SERVER = os.environ["PY4SCI_SERVER"]
        USER = os.environ["PY4SCI_USER"]

        import getpass
        from ftplib import FTP
        from astropy.utils.console import ProgressBar

        ftp = FTP(SERVER)
        ftp.login(user=USER, passwd=getpass.getpass())

        ftp.cwd('/public_html/astropy4herts')

        for slides in ProgressBar.iterate(glob.glob('notebooks/data/*')
                                          + glob.glob('notebooks/*.html')):
            try:
                remote_size = ftp.size(slides)
            except:
                remote_size = None
            local_size = os.path.getsize(slides)
            if local_size != remote_size:
                ftp.storbinary('STOR ' + slides, open(slides, 'rb'))

        ftp.storbinary('STOR notebooks.html', open('notebooks.html', 'rb'))

        ftp.quit()
Exemplo n.º 29
0
    def run(self, maxiter = 4, verbose = False):
        """
        Full artillery :-)
        - Find saturated stars
        - Run maxiter L.A.Cosmic iterations (stops if no more cosmics are found)

        Stops if no cosmics are found or if maxiter is reached.
        """
                        
        if self.satlevel > 0 and self.satstars is None:
            self.findsatstars(verbose=verbose)

        if verbose:
            print("Starting %i L.A.Cosmic iterations ..." % maxiter)
        for i in ProgressBar(range(1, maxiter+1)):
            if verbose:
                print("Iteration %i" % i)
                        
            iterres = self.lacosmiciteration(verbose=verbose)
            if verbose:
                print("%i cosmic pixels (%i new)" % (iterres["niter"], iterres["nnew"]))
                        
            #self.clean(mask = iterres["mask"]) # No, we want clean to operate on really clean pixels only !
            # Thus we always apply it on the full mask, as lacosmic does :
            self.clean(verbose=verbose)
            # But note that for huge cosmics, one might want to revise this.
            # Thats why I added a feature to skip saturated stars !
            
            if iterres["niter"] == 0:
                break
Exemplo n.º 30
0
def _slow_reader(index_map, data):
    """
    Loop over each valid pixel in the index_map and add its coordinates and
    data to the flux_by_structure and indices_by_structure dicts

    This is slower than _fast_reader but faster than that implementation would
    be without find_objects.  The bottleneck is doing `index_map == idx` N
    times.
    """
    flux_by_structure, indices_by_structure = {}, {}
    # Do a fast iteration through d.data, adding the indices and data values
    # to the two dictionaries declared above:
    indices = np.array(np.where(index_map > -1)).transpose()

    log.debug('Creating index maps for {0} coordinates...'.format(
        len(indices)))
    for coord in ProgressBar(indices):
        coord = tuple(coord)
        idx = index_map[coord]
        if idx in flux_by_structure:
            flux_by_structure[idx].append(data[coord])
            indices_by_structure[idx].append(coord)
        else:
            flux_by_structure[idx] = [data[coord]]
            indices_by_structure[idx] = [coord]

    return flux_by_structure, indices_by_structure
Exemplo n.º 31
0
def write_data_to_csv(data, filename, mode="new"):
    """
    Write a dataset to a CSV-formatted file with a data header.

    Parameters
    ----------
    data : `dict`
        The data to be written as a dictionary of NumPy arrays
    filename : `str`
        The filename to write the data to.
    mode : {'new', 'append'}, optional
        Write a "new" file or "append" to an existing file?
    """
    if mode == "new":
        fmode = "w"
    elif mode == "append":
        fmode = "a+"
    if sys.version_info >= (3, 0, 0):
        f = open(filename, fmode, newline='')
    else:
        f = open(filename, fmode + 'b')
    w = csv.DictWriter(f, list(data.keys()))
    if mode == "new":
        w.writeheader()
    num_points = len(list(data.values())[0])
    for i in ProgressBar(list(range(num_points))):
        row = dict([(k, v[i]) for k, v in list(data.items())])
        w.writerow(row)
    f.close()
Exemplo n.º 32
0
    def init_progressbar(self):
        """
        Initialise the progress bar.

        This only happens if run command is called with ``progressbar=True``.
        """
        self.progressbar = ProgressBar(self.command_count())
Exemplo n.º 33
0
    def get_molecules(self, cache=True):
        """
        Scrape the list of valid molecules
        """
        if cache and hasattr(self, '_molecule_dict'):
            return self._molecule_dict
        elif cache and os.path.isfile(self.moldict_path):
            with open(self.moldict_path, 'r') as f:
                md = json.load(f)
            return md

        main_url = 'http://home.strw.leidenuniv.nl/~moldata/'
        response = self._request('GET', main_url, cache=cache)
        response.raise_for_status()

        soup = BeautifulSoup(response.content)

        links = soup.find_all('a', href=True)
        datfile_urls = [
            url for link in ProgressBar(links)
            for url in self._find_datfiles(link['href'], base_url=main_url)
        ]

        molecule_re = re.compile(
            r'http://[a-zA-Z0-9.]*/~moldata/datafiles/([A-Z0-9a-z_+@-]*).dat')
        molecule_dict = {
            molecule_re.search(url).groups()[0]: url
            for url in datfile_urls
        }

        with open(self.moldict_path, 'w') as f:
            s = json.dumps(molecule_dict)
            f.write(s)

        return molecule_dict
Exemplo n.º 34
0
def kpub_import(args=None):
    """Import publications from a csv file.

    The csv file must contain entries of the form "bibcode,mission,science".
    The actual metadata of each publication will be grabbed using the ADS API,
    hence this routine may take 10-20 minutes to complete.
    """
    parser = argparse.ArgumentParser(
        description="Batch-import papers into the Kepler/K2 publication list "
        "from a CSV file. The CSV file must have three columns "
        "(bibcode,mission,science) separated by commas. "
        "For example: '2004ApJ...610.1199G,kepler,astrophysics'.")
    parser.add_argument('-f',
                        metavar='dbfile',
                        type=str,
                        default=DEFAULT_DB,
                        help="Location of the Kepler/K2 publication list db. "
                        "Defaults to ~/.kpub.db.")
    parser.add_argument('csvfile', help="Filename of the csv file to ingest.")
    args = parser.parse_args(args)

    db = PublicationDB(args.f)
    for line in ProgressBar(open(args.csvfile, 'r').readlines()):
        col = line.split(',')  # Naive csv parsing
        db.add_by_bibcode(col[0], mission=col[1], science=col[2].strip())
Exemplo n.º 35
0
    def pymedian(self, header=None):
        try:
            import fitsio
        except ImportError:
            self._logger.error('fitsio is required !')
            raise

        data = [fitsio.FITS(f)[1] for f in self.files]
        # shape = data[0].get_dims()
        cube = np.empty(self.shape, dtype=np.float64)
        expmap = np.empty(self.shape, dtype=np.int32)
        valid_pix = np.zeros(self.nfiles, dtype=np.int32)
        nl = self.shape[0]

        self._logger.info('Looping on the %d planes of the cube', nl)
        for l in ProgressBar(range(nl)):
            arr = np.array([c[l, :, :][0] for c in data])
            cube[l, :, :] = np.nanmedian(arr, axis=0)
            expmap[l, :, :] = (~np.isnan(arr)).astype(int).sum(axis=0)
            valid_pix += (~np.isnan(arr)).astype(int).sum(axis=1).sum(axis=1)

        # no valid pixels
        npixels = np.prod(self.shape)
        no_valid_pix = npixels - valid_pix
        stat_pix = Table([self.files, no_valid_pix],
                         names=['FILENAME', 'NPIX_NAN'])

        kwargs = dict(expnb=_compute_expnb(expmap),
                      header=header,
                      method='obj.cubelist.pymedian')
        expmap = self.save_combined_cube(expmap,
                                         unit=u.dimensionless_unscaled,
                                         **kwargs)
        cube = self.save_combined_cube(cube, **kwargs)
        return cube, expmap, stat_pix
Exemplo n.º 36
0
def _get_barycorr_bvcs_withvels(coos, loc, injupyter=False):
    """
    Gets the barycentric correction of the test data from the
    http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
    Requires the https://github.com/tronsgaard/barycorr python interface to that
    site.

    Provided to reproduce the test data above, but not required to actually run
    the tests.
    """
    import barycorr
    from astropy.utils.console import ProgressBar

    bvcs = []
    for coo in ProgressBar(coos, ipython_widget=injupyter):
        res = barycorr.bvc(test_input_time.utc.jd,
                           coo.ra.deg,
                           coo.dec.deg,
                           lat=loc.geodetic[1].deg,
                           lon=loc.geodetic[0].deg,
                           pmra=coo.pm_ra_cosdec.to_value(u.mas / u.yr),
                           pmdec=coo.pm_dec.to_value(u.mas / u.yr),
                           parallax=coo.distance.to_value(
                               u.mas, equivalencies=u.parallax()),
                           rv=coo.radial_velocity.to_value(u.m / u.s),
                           epoch=test_input_time.utc.jd,
                           elevation=loc.geodetic[2].to(u.m).value)
        bvcs.append(res)
    return bvcs * u.m / u.s
Exemplo n.º 37
0
    def run(self, obs_list, selection=None):
        """
        Run MapMaker for a list of observations to create
        stacked counts, exposure and background maps

        Parameters
        --------------
        obs_list : `~gammapy.data.ObservationList`
            List of observations
        selection : list
            List of str, selecting which maps to make.
            Available: 'counts', 'exposure', 'background'
            By default, all maps are made.

        Returns
        -----------
        maps: dict of stacked counts, background and exposure maps.
        """
        selection = _check_selection(selection)

        # Initialise zero-filled maps
        for name in selection:
            unit = 'm2 s' if name == 'exposure' else ''
            self.maps[name] = Map.from_geom(self.geom, unit=unit)

        for obs in ProgressBar(obs_list):
            try:
                self._process_obs(obs, selection)
            except NoOverlapError:
                log.info(
                    'Skipping observation {}, not contained in map.'.format(
                        obs.obs_id))
                continue

        return self.maps
Exemplo n.º 38
0
 def _find_and_fit_peaks(self):
     """Find and fit peaks in each PSD. This can be done in parallell if requested.
     
     Configuration Items:
     
     - ``FMTS.fitting`` The dictionary of parameters used for :func`find_and_fit_peaks`.
     - ``FMTS.multiprocess`` `(bool)` whether to parallelize.
     
     Peaks are stored in an object array, and the number of peaks at each mode is stored in a separate, parallel array.
     
     """
     from astropy.utils.console import ProgressBar
     
     kwargs = dict(self.config["FMTS.fitting"])
     psd = self.psd
     template = self.template_ft
     omega = self.omega
     
     args = [ ((k,l),psd[:,k,l],template,omega,kwargs) for k,l in itertools.product(range(self.psd.shape[1]),range(self.psd.shape[2])) ]
     peaks = ProgressBar.map(pool_find_and_fit_peaks_in_modes,args,multiprocess=self.config["FMTS.multiprocess"])
     for peak_mode,ident in peaks:
         k,l = ident
         self.peaks[k,l] = peak_mode
         self.npeaks[k,l] = len(peak_mode)
     self.log.info("Found %d peaks",np.sum(self.npeaks))
Exemplo n.º 39
0
    def generate_calibrated_images(self,
                                   output_dir,
                                   output_suffix='corrected'):
        """
        Dark subtract and flat field all data images, save them to `output_dir`.

        This is useful for producing images that can be passed to astrometry.net
        """
        n_images = len(self.data_image_paths)
        print("Generating calibrated images...")
        with ProgressBar(n_images) as bar:
            for i, image_path in enumerate(self.data_image_paths):
                bar.update(i)
                image_header = fits.getheader(image_path)
                image_data = ((fits.getdata(image_path) - self.master_dark) /
                              self.master_flat)

                # TODO: Should this line be in our out?
                #image_data[image_data < 0] = np.median(image_data)

                file_name = image_path.split(os.sep)[-1].split('.fits')[0]
                fits.writeto(os.path.join(output_dir,
                                          file_name + output_suffix + '.fits'),
                             image_data,
                             clobber=True,
                             header=image_header)
Exemplo n.º 40
0
    def get_lightcurves(self, progress_bar=False, notebook=False):
        """
        """
        if not self.is_set():
            raise AttributeError("plan, generator or instrument not set")

        lcs = LightcurveCollection(empty=True)
        gen = izip(self.generator.get_lightcurve_full_param(),
                   self._get_observations_())
        if progress_bar:
            self._assign_obs_fields_(progress_bar=True, notebook=notebook)
            self._assign_non_field_obs_(progress_bar=True, notebook=notebook)

            print 'Generating lightcurves'
            with ProgressBar(self.generator.ntransient,
                             ipython_widget=notebook) as bar:
                for k, (p, obs) in enumerate(gen):
                    if obs is not None:
                        lcs.add(self._get_lightcurve_(p, obs, k))
                    bar.update()
        else:
            for k, (p, obs) in enumerate(gen):
                if obs is not None:
                    lcs.add(self._get_lightcurve_(p, obs, k))

        return lcs
Exemplo n.º 41
0
def get_all_fluxes(weblog_list, mapping=None):

    data_dict = {}
    for weblog in ProgressBar(weblog_list):
        try:
            data = get_calibrator_fluxes(weblog)
            name, _ = get_human_readable_name(weblog, mapping=mapping)
            data_dict[name] = data
        except ValueError:
            continue

    flux_data = {
        name: {
            ii: {
                'date': key[4],
                'ms': key[1],
                'calibrator': key[0],
                'spw': key[2],
                'freq': key[3],
                'measurement': value
            }
            for ii, (key, value) in enumerate(data_.items())
        }
        for name, data_ in data_dict.items()
    }

    return flux_data
Exemplo n.º 42
0
    def run(self, obs_list):
        """
        Run MapMaker for a list of observations to create
        stacked counts, exposure and background maps

        Parameters
        --------------
        obs_list: `~gammapy.data.ObservationList`
            List of observations

        Returns
        -----------
        maps: dict of stacked counts, background and exposure maps.
        """

        from astropy.utils.console import ProgressBar

        for obs in ProgressBar(obs_list):
            self.process_obs(obs)
        self.maps = {
            'counts_map': self.counts_map,
            'background_map': self.background_map,
            'exposure_map': self.exposure_map
                }
        return self.maps
Exemplo n.º 43
0
def extract_poly_slice(cube, polygons):

    nx = len(polygons)
    nz = cube.shape[0]

    total_slice = np.zeros((nz, nx))
    total_area = np.zeros((nz, nx))

    p = ProgressBar(len(polygons))

    for i, polygon in enumerate(polygons):

        p.update()

        # Find bounding box
        bbxmin = int(round(np.min(polygon.x)) - 1)
        bbxmax = int(round(np.max(polygon.x)) + 2)
        bbymin = int(round(np.min(polygon.y)) - 1)
        bbymax = int(round(np.max(polygon.y)) + 2)

        # Clip to cube box
        bbxmin = max(bbxmin, 0)
        bbxmax = min(bbxmax, cube.shape[2])
        bbymin = max(bbymin, 0)
        bbymax = min(bbymax, cube.shape[1])

        # Loop through pixels that might overlap
        for xmin in np.arange(bbxmin, bbxmax):
            for ymin in np.arange(bbymin, bbymax):

                area = square_polygon_overlap_area(xmin - 0.5, xmin + 0.5, ymin - 0.5, ymin + 0.5, polygon.x, polygon.y)

                if area > 0:
                    total_slice[:, i] += cube[:, ymin, xmin] * area
                    total_area[:, i] += area

    total_slice[total_area == 0.0] = np.nan
    total_slice[total_area > 0.0] /= total_area[total_area > 0.0]

    print("")

    return total_slice
Exemplo n.º 44
0
    def _HEADER_data_size(self, files):
        """
        Given a list of file URLs, return the data size.  This is useful for
        assessing how much data you might be downloading!
        (This is discouraged by the ALMA archive, as it puts unnecessary load
        on their system)
        """
        totalsize = 0 * u.B
        data_sizes = {}
        pb = ProgressBar(len(files))
        for ii, fileLink in enumerate(files):
            response = self._request('HEAD', fileLink, stream=False,
                                     cache=False, timeout=self.TIMEOUT)
            filesize = (int(response.headers['content-length']) * u.B).to(u.GB)
            totalsize += filesize
            data_sizes[fileLink] = filesize
            log.debug("File {0}: size {1}".format(fileLink, filesize))
            pb.update(ii + 1)
            response.raise_for_status()

        return data_sizes, totalsize.to(u.GB)
Exemplo n.º 45
0
def _fast_reader(index_map, data):
    """
    Use scipy.ndimage.find_objects to quickly identify subsets of the data
    to increase speed of dendrogram loading
    """

    flux_by_structure, indices_by_structure = {}, {}

    from scipy import ndimage

    idxs = np.unique(index_map[index_map > -1])

    # ndimage ignores 0 and -1, but we want index 0
    object_slices = ndimage.find_objects(index_map + 1)

    # find_objects returns a tuple that includes many None values that we
    # need to get rid of.
    object_slices = [x for x in object_slices if x is not None]

    index_cube = np.indices(index_map.shape)

    # Need to have same length, otherwise assumptions above are wrong
    assert len(idxs) == len(object_slices)
    log.debug("Creating index maps for {0} indices...".format(len(idxs)))

    p = ProgressBar(len(object_slices))
    for idx, sl in zip(idxs, object_slices):
        match = index_map[sl] == idx
        sl2 = (slice(None),) + sl
        match_inds = index_cube[sl2][:, match]
        coords = list(zip(*match_inds))
        dd = data[sl][match].tolist()
        flux_by_structure[idx] = dd
        indices_by_structure[idx] = coords
        p.update()

    return flux_by_structure, indices_by_structure
Exemplo n.º 46
0
def fit_all_tex(xaxis, cube, cubefrequencies, degeneracies,
                einsteinAij,
                errorcube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy,xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count=0

    for ii,jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:,ii,jj])):
            tmap[ii,jj] = np.nan
        else:
            if replace_bad:
                neg = cube[:,ii,jj] <= 0
                cube[neg,ii,jj] = replace_bad
            nuppers = nupper_of_kkms(cube[:,ii,jj], cubefrequencies,
                                     einsteinAij, degeneracies)
            if errorcube is not None:
                enuppers = nupper_of_kkms(errorcube[:,ii,jj], cubefrequencies,
                                          einsteinAij, degeneracies)
            fit_result = fit_tex(xaxis, nuppers.value, errors=enuppers.value)
            tmap[ii,jj] = fit_result[1].value
            Nmap[ii,jj] = fit_result[0].value
        pb.update(count)
        count+=1

    return tmap,Nmap
Exemplo n.º 47
0
def update_mass_table(drpall, mass_table_old=None, limit=None, mlband='i'):
    '''
    '''
    
    # what galaxies are available to aggregate?
    res_fnames = glob(os.path.join(basedir, 'results/*-*/*-*_res.fits'))[:limit]

    # filter out whose that have not been done
    if mass_table_old is None:
        already_aggregated = [False for _ in range(len(res_fnames))]
    else:
        already_aggregated = [os.path.split(fn)[1].split('_')[0] in old_mass_table['plateifu']
                              for fn in res_fnames]
    res_fnames = [fn for done, fn in zip(already_aggregated, res_fnames)]

    # aggregate individual galaxies, and stack them 
    mass_tables_new = list(ProgressBar.map(
        partial(mass_agg_onegal, mlband=mlband), res_fnames, multiprocess=False, step=5))
    mass_table_new = t.vstack(mass_tables_new)

    # if there was an old mass table, stack it with the new one
    if mass_table_old is None:
        mass_table = mass_table_new
    else:
        mass_table = t.vstack([mass_table_old, mass_table_new], join_type='inner')

    cmlr = totalmass.cmlr_kwargs
    missing_flux =  (mass_table['nsa_absmag'].to(m.Mgy) - \
                     mass_table['ifu_absmag'].to(m.Mgy)).clip(
                        a_min=0.*m.Mgy, a_max=np.inf*m.Mgy)
    mag_missing_flux = missing_flux.to(u.ABmag)
    cb1, cb2 = cmlr['cb1'], cmlr['cb2']
    color_missing_flux = mag_missing_flux[:, totalmass.StellarMass.bands_ixs[cb1]] - \
                         mag_missing_flux[:, totalmass.StellarMass.bands_ixs[cb2]]
    color_missing_flux[~np.isfinite(color_missing_flux)] = np.inf
    mass_table['outer_ml_cmlr'] = np.polyval(cmlr['cmlr_poly'], color_missing_flux.value) * \
                                  u.dex(m.m_to_l_unit)
    mass_table['outer_lum'] = mag_missing_flux.to(
        u.dex(m.bandpass_sol_l_unit),
        totalmass.bandpass_flux_to_solarunits(totalmass.StellarMass.absmag_sun))

    mass_table['outer_mass_ring'] = \
        (mass_table['outer_lum'][:, totalmass.StellarMass.bands_ixs['i']] + \
         mass_table['outer_ml_ring']).to(u.Msun)
    mass_table['outer_mass_cmlr'] = \
        (mass_table['outer_lum'][:, totalmass.StellarMass.bands_ixs['i']] + \
         mass_table['outer_ml_cmlr']).to(u.Msun)

    return mass_table
    # Clean up
    outfile.close()
    infile.close()


def convert_to_dmc(input_fn, output_fn):
    """Convert from csv to pipe-delimited.
    """
    syscall("""sed "s/,/|/g" {} > {}""".format(input_fn, output_fn))


if __name__ == "__main__":
    input_filenames = glob.glob(os.path.join(CROPPED_CATS_DIR, "*-cropped"))
    log.info("Converting cropped catalogues into csv format")
    ProgressBar.map(convert_to_csv,
                    input_filenames,
                    multiprocess=True, step=1)

    log.info("Removing objects appearing twice in the cropped tiles")
    ProgressBar.map(remove_duplicates_from_tiles,
                    input_filenames,
                    multiprocess=True, step=1)

    log.info("Removing objects already in EPIC with stilts "
             "using a {} arcsec matching radius".format(MATCHING_RADIUS))
    ProgressBar.map(remove_duplicates,
                    input_filenames,
                    multiprocess=True, step=1)

    merge_fn = os.path.join(FINAL_CAT_DIR, "merged.dmc.csv")
    log.info("Merging the tiles into {}".format(merge_fn))
Exemplo n.º 49
0
def make_validation_report(
    urls=None, destdir='astropy.io.votable.validator.results',
    multiprocess=True, stilts=None):
    """
    Validates a large collection of web-accessible VOTable files.

    Generates a report as a directory tree of HTML files.

    Parameters
    ----------
    urls : list of strings, optional
        If provided, is a list of HTTP urls to download VOTable files
        from.  If not provided, a built-in set of ~22,000 urls
        compiled by HEASARC will be used.

    destdir : path, optional
        The directory to write the report to.  By default, this is a
        directory called ``'results'`` in the current directory. If the
        directory does not exist, it will be created.

    multiprocess : bool, optional
        If `True` (default), perform validations in parallel using all
        of the cores on this machine.

    stilts : path, optional
        To perform validation with ``votlint`` from the the Java-based
        `STILTS <http://www.star.bris.ac.uk/~mbt/stilts/>`_ VOTable
        parser, in addition to `astropy.io.votable`, set this to the
        path of the ``'stilts.jar'`` file.  ``java`` on the system shell
        path will be used to run it.

    Notes
    -----
    Downloads of each given URL will be performed only once and cached
    locally in *destdir*.  To refresh the cache, remove *destdir*
    first.
    """
    from astropy.utils.console import (color_print, ProgressBar, Spinner)

    if stilts is not None:
        if not os.path.exists(stilts):
            raise ValueError(
                '{0} does not exist.'.format(stilts))

    destdir = os.path.abspath(destdir)

    if urls is None:
        with Spinner('Loading URLs', 'green') as s:
            urls = get_urls(destdir, s)
    else:
        color_print('Marking URLs', 'green')
        for url in ProgressBar.iterate(urls):
            with result.Result(url, root=destdir) as r:
                r['expected'] = type

    args = [(url, destdir) for url in urls]

    color_print('Downloading VO files', 'green')
    ProgressBar.map(
        download, args, multiprocess=multiprocess)

    color_print('Validating VO files', 'green')
    ProgressBar.map(
        validate_vo, args, multiprocess=multiprocess)

    if stilts is not None:
        color_print('Validating with votlint', 'green')
        votlint_args = [(stilts, x, destdir) for x in urls]
        ProgressBar.map(
            votlint_validate, votlint_args, multiprocess=multiprocess)

    color_print('Generating HTML files', 'green')
    ProgressBar.map(
        write_html_result, args, multiprocess=multiprocess)

    with Spinner('Grouping results', 'green') as s:
        subsets = result.get_result_subsets(urls, destdir, s)

    color_print('Generating index', 'green')
    html.write_index(subsets, urls, destdir)

    color_print('Generating subindices', 'green')
    subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
    ProgressBar.map(
        write_subindex, subindex_args, multiprocess=multiprocess)
Exemplo n.º 50
0
def measure_dendrogram_properties(dend=None, cube303=cube303,
                                  cube321=cube321, cube13co=cube13co,
                                  cube18co=cube18co, noise_cube=noise_cube,
                                  sncube=sncube,
                                  suffix="",
                                  last_index=None,
                                  plot_some=True,
                                  line='303',
                                  write=True):

    assert (cube321.shape == cube303.shape == noise_cube.shape ==
            cube13co.shape == cube18co.shape == sncube.shape)
    assert sncube.wcs is cube303.wcs is sncube.mask._wcs

    metadata = {}
    metadata['data_unit'] = u.K
    metadata['spatial_scale'] =  7.2 * u.arcsec
    metadata['beam_major'] =  30 * u.arcsec
    metadata['beam_minor'] =  30 * u.arcsec
    metadata['wavelength'] =  218.22219*u.GHz
    metadata['velocity_scale'] = u.km/u.s
    metadata['wcs'] = cube303.wcs

    keys = [
            'density_chi2',
            'expected_density',
            'dmin1sig_chi2',
            'dmax1sig_chi2',
            'column_chi2',
            'expected_column',
            'cmin1sig_chi2',
            'cmax1sig_chi2',
            'temperature_chi2',
            'expected_temperature',
            'tmin1sig_chi2',
            'tmax1sig_chi2',
            'eratio321303',
            'ratio321303',
            'logh2column',
            'elogh2column',
            'logabundance',
            'elogabundance',
           ]
    obs_keys = [
            'Stot303',
            'Smin303',
            'Smax303',
            'Stot321',
            'Smean303',
            'Smean321',
            'npix',
            'e303',
            'e321',
            'r321303',
            'er321303',
            '13cosum',
            'c18osum',
            '13comean',
            'c18omean',
            's_ntotal',
            'index',
            'is_leaf',
            'parent',
            'root',
            'lon',
            'lat',
            'vcen',
            'higaldusttem',
            'reff',
            'dustmass',
            'dustmindens',
            'bad',
            #'tkin_turb',
    ]
    columns = {k:[] for k in (keys+obs_keys)}

    log.debug("Initializing dendrogram temperature fitting loop")

    # FORCE wcs to match
    # (technically should reproject here)
    cube13co._wcs = cube18co._wcs = cube303.wcs
    cube13co.mask._wcs = cube18co.mask._wcs = cube303.wcs

    if line == '303':
        maincube = cube303
    elif line == '321':
        maincube = cube321
    else:
        raise ValueError("Unrecognized line: {0}".format(line))

    # Prepare an array to hold the fitted temperatures
    tcubedata = np.empty(maincube.shape, dtype='float32')
    tcubedata[:] = np.nan
    tcubeleafdata = np.empty(maincube.shape, dtype='float32')
    tcubeleafdata[:] = np.nan


    nbad = 0

    catalog = ppv_catalog(dend, metadata)
    pb = ProgressBar(len(catalog))
    for ii,row in enumerate(catalog):
        structure = dend[row['_idx']]
        assert structure.idx == row['_idx'] == ii
        dend_obj_mask = BooleanArrayMask(structure.get_mask(), wcs=cube303.wcs)
        dend_inds = structure.indices()

        view = (slice(dend_inds[0].min(), dend_inds[0].max()+1),
                slice(dend_inds[1].min(), dend_inds[1].max()+1),
                slice(dend_inds[2].min(), dend_inds[2].max()+1),)
        #view2 = cube303.subcube_slices_from_mask(dend_obj_mask)
        submask = dend_obj_mask[view]
        #assert np.count_nonzero(submask.include()) == np.count_nonzero(dend_obj_mask.include())

        sn = sncube[view].with_mask(submask)
        sntot = sn.sum().value
        #np.testing.assert_almost_equal(sntot, structure.values().sum(), decimal=0)

        c303 = cube303[view].with_mask(submask)
        c321 = cube321[view].with_mask(submask)
        co13sum = cube13co[view].with_mask(submask).sum().value
        co18sum = cube18co[view].with_mask(submask).sum().value
        if hasattr(co13sum,'__len__'):
            raise TypeError(".sum() applied to an array has yielded a non scalar.")

        npix = submask.include().sum()
        assert npix == structure.get_npix()
        Stot303 = c303.sum().value
        if np.isnan(Stot303):
            raise ValueError("NaN in cube.  This can't happen: the data from "
                             "which the dendrogram was derived can't have "
                             "NaN pixels.")
        Smax303 = c303.max().value
        Smin303 = c303.min().value

        Stot321 = c321.sum().value
        if npix == 0:
            raise ValueError("npix=0. This is impossible.")
        Smean303 = Stot303/npix
        if Stot303 <= 0 and line=='303':
            raise ValueError("The 303 flux is <=0.  This isn't possible because "
                             "the dendrogram was derived from the 303 data with a "
                             "non-zero threshold.")
        elif Stot303 <= 0 and line=='321':
            Stot303 = 0
            Smean303 = 0
        elif Stot321 <= 0 and line=='321':
            raise ValueError("The 321 flux is <=0.  This isn't possible because "
                             "the dendrogram was derived from the 321 data with a "
                             "non-zero threshold.")
        if np.isnan(Stot321):
            raise ValueError("NaN in 321 line")
        Smean321 = Stot321/npix

        #error = (noise_cube[view][submask.include()]).sum() / submask.include().sum()**0.5
        var = ((noise_cube[dend_obj_mask.include()]**2).sum() / npix**2)
        error = var**0.5
        if np.isnan(error):
            raise ValueError("error is nan: this is impossible by definition.")

        if line == '321' and Stot303 == 0:
            r321303 = np.nan
            er321303 = np.nan
        elif Stot321 < 0:
            r321303 = error / Smean303
            er321303 = (r321303**2 * (var/Smean303**2 + 1))**0.5
        else:
            r321303 = Stot321 / Stot303
            er321303 = (r321303**2 * (var/Smean303**2 + var/Smean321**2))**0.5

        for c in columns:
            assert len(columns[c]) == ii

        columns['index'].append(row['_idx'])
        columns['s_ntotal'].append(sntot)
        columns['Stot303'].append(Stot303)
        columns['Smax303'].append(Smax303)
        columns['Smin303'].append(Smin303)
        columns['Stot321'].append(Stot321)
        columns['Smean303'].append(Smean303)
        columns['Smean321'].append(Smean321)
        columns['npix'].append(npix)
        columns['e303'].append(error)
        columns['e321'].append(error)
        columns['r321303'].append(r321303)
        columns['er321303'].append(er321303)
        columns['13cosum'].append(co13sum)
        columns['c18osum'].append(co18sum)
        columns['13comean'].append(co13sum/npix)
        columns['c18omean'].append(co18sum/npix)
        columns['is_leaf'].append(structure.is_leaf)
        columns['parent'].append(structure.parent.idx if structure.parent else -1)
        columns['root'].append(get_root(structure).idx)
        s_main = maincube._data[dend_inds]
        x,y,z = maincube.world[dend_inds]
        lon = ((z.value-(360*(z.value>180)))*s_main).sum()/s_main.sum()
        lat = (y*s_main).sum()/s_main.sum()
        vel = (x*s_main).sum()/s_main.sum()
        columns['lon'].append(lon)
        columns['lat'].append(lat.value)
        columns['vcen'].append(vel.value)

        mask2d = dend_obj_mask.include().max(axis=0)[view[1:]]
        logh2column = np.log10(np.nanmean(column_regridded.data[view[1:]][mask2d]) * 1e22)
        if np.isnan(logh2column):
            log.info("Source #{0} has NaNs".format(ii))
            logh2column = 24
        elogh2column = elogabundance
        columns['higaldusttem'].append(np.nanmean(dusttem_regridded.data[view[1:]][mask2d]))

        r_arcsec = row['radius']*u.arcsec
        reff = (r_arcsec*(8.5*u.kpc)).to(u.pc, u.dimensionless_angles())
        mass = ((10**logh2column*u.cm**-2)*np.pi*reff**2*2.8*constants.m_p).to(u.M_sun)
        density = (mass/(4/3.*np.pi*reff**3)/constants.m_p/2.8).to(u.cm**-3)

        columns['reff'].append(reff.value)
        columns['dustmass'].append(mass.value)
        columns['dustmindens'].append(density.value)
        mindens = np.log10(density.value)
        if mindens < 3:
            mindens = 3

        if (r321303 < 0 or np.isnan(r321303)) and line != '321':
            raise ValueError("Ratio <0: This can't happen any more because "
                             "if either num/denom is <0, an exception is "
                             "raised earlier")
            #for k in columns:
            #    if k not in obs_keys:
            #        columns[k].append(np.nan)
        elif (r321303 < 0 or np.isnan(r321303)) and line == '321':
            for k in keys:
                columns[k].append(np.nan)
        else:
            # Replace negatives for fitting
            if Smean321 <= 0:
                Smean321 = error
            mf.set_constraints(ratio321303=r321303, eratio321303=er321303,
                               #ratio321322=ratio2, eratio321322=eratio2,
                               logh2column=logh2column, elogh2column=elogh2column,
                               logabundance=logabundance, elogabundance=elogabundance,
                               taline303=Smean303, etaline303=error,
                               taline321=Smean321, etaline321=error,
                               mindens=mindens,
                               linewidth=10)
            row_data = mf.get_parconstraints()
            row_data['ratio321303'] = r321303
            row_data['eratio321303'] = er321303

            for k in row_data:
                columns[k].append(row_data[k])

            # Exclude bad velocities from cubes
            if row['v_cen'] < -80e3 or row['v_cen'] > 180e3:
                # Skip: there is no real structure down here
                nbad += 1
                is_bad = True
            else:
                is_bad = False
                tcubedata[dend_obj_mask.include()] = row_data['expected_temperature']
                if structure.is_leaf:
                    tcubeleafdata[dend_obj_mask.include()] = row_data['expected_temperature']

            columns['bad'].append(is_bad)

            width = row['v_rms']*u.km/u.s
            lengthscale = reff

            #REMOVED in favor of despotic version done in dendrograms.py
            # we use the analytic version here; the despotic version is
            # computed elsewhere (with appropriate gcor factors)
            #columns['tkin_turb'].append(heating.tkin_all(10**row_data['density_chi2']*u.cm**-3,
            #                                             width,
            #                                             lengthscale,
            #                                             width/lengthscale,
            #                                             columns['higaldusttem'][-1]*u.K,
            #                                             crir=0./u.s))

        if len(set(len(c) for k,c in columns.items())) != 1:
            print("Columns are different lengths.  This is not allowed.")
            import ipdb; ipdb.set_trace()

        for c in columns:
            assert len(columns[c]) == ii+1

        if plot_some and not is_bad and (ii-nbad % 100 == 0 or ii-nbad < 50):
            try:
                log.info("T: [{tmin1sig_chi2:7.2f},{expected_temperature:7.2f},{tmax1sig_chi2:7.2f}]"
                         "  R={ratio321303:8.4f}+/-{eratio321303:8.4f}"
                         "  Smean303={Smean303:8.4f} +/- {e303:8.4f}"
                         "  Stot303={Stot303:8.2e}  npix={npix:6d}"
                         .format(Smean303=Smean303, Stot303=Stot303,
                                 npix=npix, e303=error, **row_data))

                pl.figure(1)
                pl.clf()
                mf.denstemplot()
                pl.savefig(fpath("dendrotem/diagnostics/{0}_{1}.png".format(suffix,ii)))
                pl.figure(2).clf()
                mf.parplot1d_all(levels=[0.68268949213708585])
                pl.savefig(fpath("dendrotem/diagnostics/1dplot{0}_{1}.png".format(suffix,ii)))
                pl.draw()
                pl.show()
            except Exception as ex:
                print ex
                pass
        else:
            pb.update(ii+1)

        if last_index is not None and ii >= last_index:
            break

    if last_index is not None:
        catalog = catalog[:last_index+1]

    for k in columns:
        if k not in catalog.keys():
            catalog.add_column(table.Column(name=k, data=columns[k]))

    for mid,lo,hi,letter in (('expected_temperature','tmin1sig_chi2','tmax1sig_chi2','t'),
                             ('expected_density','dmin1sig_chi2','dmax1sig_chi2','d'),
                             ('expected_column','cmin1sig_chi2','cmax1sig_chi2','c')):
        catalog.add_column(table.Column(name='elo_'+letter,
                                        data=catalog[mid]-catalog[lo]))
        catalog.add_column(table.Column(name='ehi_'+letter,
                                        data=catalog[hi]-catalog[mid]))

    if write:
        catalog.write(tpath('PPV_H2CO_Temperature{0}.ipac'.format(suffix)), format='ascii.ipac')

    # Note that there are overlaps in the catalog, which means that ORDER MATTERS
    # in the above loop.  I haven't yet checked whether large scale overwrites
    # small or vice-versa; it may be that both views of the data are interesting.
    tcube = SpectralCube(data=tcubedata, wcs=cube303.wcs,
                         mask=cube303.mask, meta={'unit':'K'},
                         header=cube303.header,
                        )
    tcubeleaf = SpectralCube(data=tcubeleafdata, wcs=cube303.wcs,
                         mask=cube303.mask, meta={'unit':'K'},
                         header=cube303.header,
                        )

    if write:
        log.info("Writing TemperatureCube")
        outpath = 'TemperatureCube_DendrogramObjects{0}.fits'
        tcube.write(hpath(outpath.format(suffix)),
                    overwrite=True)

        outpath_leaf = 'TemperatureCube_DendrogramObjects{0}_leaves.fits'
        tcubeleaf.write(hpath(outpath_leaf.format(suffix)),
                    overwrite=True)


    return catalog, tcube
Exemplo n.º 51
0
def add_data_to_cube(cubefilename, data=None, filename=None, fileheader=None,
                     flatheader='header.txt',
                     cubeheader='cubeheader.txt', nhits=None,
                     smoothto=1, baselineorder=5, velocityrange=None,
                     excludefitrange=None, noisecut=np.inf, do_runscript=False,
                     linefreq=None, allow_smooth=True,
                     data_iterator=data_iterator,
                     coord_iterator=coord_iterator,
                     velo_iterator=velo_iterator,
                     progressbar=False, coordsys='galactic',
                     datalength=None,
                     velocity_offset=0.0, negative_mean_cut=None,
                     add_with_kernel=False, kernel_fwhm=None, fsw=False,
                     kernel_function=Gaussian2DKernel,
                     diagnostic_plot_name=None, chmod=False,
                     continuum_prefix=None,
                     debug_breakpoint=False,
                     default_unit=u.km/u.s,
                     make_continuum=True,
                     weightspec=None,
                     varweight=False):
    """
    Given a .fits file that contains a binary table of spectra (e.g., as
    you would get from the GBT mapping "pipeline" or the reduce_map.pro aoidl
    file provided by Adam Ginsburg), adds each spectrum into the cubefile.

    velocity_offset : 0.0
        Amount to add to the velocity vector before adding it to the cube
        (useful for FSW observations)
    weightspec : np.ndarray
        A spectrum with the same size as the input arrays but containing the relative
        weights of the data
    """

    #if not default_unit.is_equivalent(u.km/u.s):
    #    raise TypeError("Default unit is not a velocity equivalent.")

    if type(nhits) is str:
        log.debug("Loading nhits from %s" % nhits)
        nhits = pyfits.getdata(nhits)
    elif type(nhits) is not np.ndarray:
        raise TypeError("nhits must be a .fits file or an ndarray, but it is ",type(nhits))
    naxis2,naxis1 = nhits.shape

    if velocity_offset and not fsw:
        raise ValueError("Using a velocity offset, but obs type is not "
                         "frequency switched; this is almost certainly wrong, "
                         "but if there's a case for it I'll remove this.")
    if not hasattr(velocity_offset,'unit'):
        velocity_offset = velocity_offset*default_unit


    contimage = np.zeros_like(nhits)
    nhits_once = np.zeros_like(nhits)

    log.debug("Loading data cube {0}".format(cubefilename))
    t0 = time.time()
    # rescale image to weight by number of observations
    image = pyfits.getdata(cubefilename)*nhits
    log.debug(" ".join(("nhits statistics: mean, std, nzeros, size",str(nhits.mean()),str(nhits.std()),str(np.sum(nhits==0)), str(nhits.size))))
    log.debug(" ".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size), str(np.sum(np.isnan(image))))))
    log.debug(" ".join(("nhits shape: ",str(nhits.shape))))
    # default is to set empty pixels to NAN; have to set them
    # back to zero
    image[image!=image] = 0.0
    header = pyfits.getheader(cubefilename)
    # debug print "Cube shape: ",image.shape," naxis3: ",header.get('NAXIS3')," nhits shape: ",nhits.shape

    log.debug("".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size))))

    flathead = get_header(flatheader)
    naxis3 = image.shape[0]
    wcs = pywcs.WCS(flathead)
    cwcs = pywcs.WCS(header)
    vwcs = cwcs.sub([pywcs.WCSSUB_SPECTRAL])
    vunit = u.Unit(vwcs.wcs.cunit[vwcs.wcs.spec])
    cubevelo = vwcs.wcs_pix2world(np.arange(naxis3),0)[0] * vunit
    cd3 = vwcs.wcs.cdelt[vwcs.wcs.spec] * vunit

    if not vunit.is_equivalent(default_unit):
        raise ValueError("The units of the cube and the velocity axis are "
                         "possibly not equivalent.  Change default_unit to "
                         "the appropriate unit (probably {0})".format(vunit))

    if add_with_kernel:
        if wcs.wcs.has_cd():
            cd = np.abs(wcs.wcs.cd[1,1])
        else:
            cd = np.abs(wcs.wcs.cdelt[1])
        # Alternative implementation; may not work for .cd?
        #cd = np.abs(np.prod((wcs.wcs.get_cdelt() * wcs.wcs.get_pc().diagonal())))**0.5

    if velocityrange is not None:
        if hasattr(velocityrange, 'unit'):
            v1,v4 = velocityrange
        else:
            v1,v4 = velocityrange * default_unit
        ind1 = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2 = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # stupid hack.  REALLY stupid hack.  Don't crop.
        if np.abs(ind2-image.shape[0]) < 5:
            ind2 = image.shape[0]
        if np.abs(ind1) < 5:
            ind1 = 0

        #print "Velo match for v1,v4 = %f,%f: %f,%f" % (v1,v4,cubevelo[ind1],cubevelo[ind2])
        # print "Updating CRPIX3 from %i to %i. Cropping to indices %i,%i" % (header.get('CRPIX3'),header.get('CRPIX3')-ind1,ind1,ind2)
        # I think this could be disastrous: cubevelo is already set, but now we're changing how it's set in the header!
        # I don't think there's any reason to have this in the first place
        # header.set('CRPIX3',header.get('CRPIX3')-ind1)

        # reset v1,v4 to the points we just selected
        v1 = cubevelo[ind1]
        v4 = cubevelo[ind2-1]
    else:
        ind1=0
        ind2 = image.shape[0]
        v1,v4 = min(cubevelo),max(cubevelo)

    # debug print "Cube has %i v-axis pixels from %f to %f.  Crop range is %f to %f" % (naxis3,cubevelo.min(),cubevelo.max(),v1,v4)

    #if abs(cdelt) < abs(cd3):
    #    print "Spectra have CD=%0.2f, cube has CD=%0.2f.  Will smooth & interpolate." % (cdelt,cd3)

    # Disable progressbar if debug-logging is enabled (they clash)
    if progressbar and 'ProgressBar' in globals() and log.level > 10:
        if datalength is None:
            pb = ProgressBar(len(data))
        else:
            pb = ProgressBar(datalength)
    else:
        progressbar = False

    skipped = []

    for spectrum,pos,velo in zip(data_iterator(data,fsw=fsw),
                                 coord_iterator(data,coordsys_out=coordsys),
                                 velo_iterator(data,linefreq=linefreq)):

        if log.level <= 10:
            t1 = time.time()

        if not hasattr(velo,'unit'):
            velo = velo * default_unit

        glon,glat = pos
        cdelt = velo[1]-velo[0]
        if cdelt < 0:
            # for interpolation, require increasing X axis
            spectrum = spectrum[::-1]
            velo = velo[::-1]
            if log.level < 5:
                log.debug("Reversed spectral axis... ")

        if (velo.max() < cubevelo.min() or velo.min() > cubevelo.max()):
            raise ValueError("Data out of range.")

        if progressbar and log.level > 10:
            pb.update()

        velo += velocity_offset

        if glon != 0 and glat != 0:
            x,y = wcs.wcs_world2pix(glon,glat,0)
            if np.isnan(x) or np.isnan(y):
                log.warn("".join(("Skipping NaN point {0}, {1} ...".format(glon,glat))))
                continue
            if log.level < 10:
                log.debug("".join(("At point {0},{1} ...".format(glon,glat),)))
            if abs(cdelt) < abs(cd3) and allow_smooth:
                # need to smooth before interpolating to preserve signal
                kernwidth = abs(cd3/cdelt/2.35).decompose().value
                if kernwidth > 2 and kernwidth < 10:
                    xr = kernwidth*5
                    npx = np.ceil(xr*2 + 1)
                elif kernwidth > 10:
                    raise ValueError('Too much smoothing')
                else:
                    xr = 5
                    npx = 11
                #kernel = np.exp(-(np.linspace(-xr,xr,npx)**2)/(2.0*kernwidth**2))
                #kernel /= kernel.sum()
                kernel = Gaussian1DKernel(stddev=kernwidth, x_size=npx)
                smspec = np.convolve(spectrum,kernel,mode='same')
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     smspec)
            else:
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     spectrum)
            OK = (datavect[ind1:ind2] == datavect[ind1:ind2])

            if excludefitrange is None:
                include = OK
            else:
                # Exclude certain regions (e.g., the spectral lines) when computing the noise
                include = OK.copy()

                if not hasattr(excludefitrange,'unit'):
                    excludefitrange = excludefitrange * default_unit

                # Convert velocities to indices
                exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

                # Loop through exclude_inds pairwise
                for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
                    # Do not include the excluded regions
                    include[i1:i2] = False

                if include.sum() == 0:
                    raise ValueError("All data excluded.")

            noiseestimate = datavect[ind1:ind2][include].std()
            contestimate = datavect[ind1:ind2][include].mean()

            if noiseestimate > noisecut:
                log.info("Skipped a data point at %f,%f in file %s because it had excessive noise %f" % (x,y,filename,noiseestimate))
                skipped.append(True)
                continue
            elif negative_mean_cut is not None and contestimate < negative_mean_cut:
                log.info("Skipped a data point at %f,%f in file %s because it had negative continuum %f" % (x,y,filename,contestimate))
                skipped.append(True)
                continue
            elif OK.sum() == 0:
                log.info("Skipped a data point at %f,%f in file %s because it had NANs" % (x,y,filename))
                skipped.append(True)
                continue
            elif OK.sum()/float(abs(ind2-ind1)) < 0.5:
                log.info("Skipped a data point at %f,%f in file %s because it had %i NANs" % (x,y,filename,np.isnan(datavect[ind1:ind2]).sum()))
                skipped.append(True)
                continue
            if log.level < 10:
                log.debug("did not skip...")

            if varweight:
                weight = 1./noiseestimate**2
            else:
                weight = 1.

            if weightspec is None:
                wspec = weight
            else:
                wspec = weight * weightspec


            if 0 < int(np.round(x)) < naxis1 and 0 < int(np.round(y)) < naxis2:
                if add_with_kernel:
                    fwhm = np.sqrt(8*np.log(2))
                    kernel_size = kd = int(np.ceil(kernel_fwhm/fwhm/cd * 5))
                    if kernel_size < 5:
                        kernel_size = kd = 5
                    if kernel_size % 2 == 0:
                        kernel_size = kd = kernel_size+1
                    if kernel_size > 100:
                        raise ValueError("Huge kernel - are you sure?")
                    kernel_middle = mid = (kd-1)/2.
                    xinds,yinds = (np.mgrid[:kd,:kd]-mid+np.array([np.round(x),np.round(y)])[:,None,None]).astype('int')
                    # This kernel is NOT centered, and that's the bloody point.
                    # (I made a very stupid error and used Gaussian2DKernel,
                    # which is strictly centered, in a previous version)
                    kernel2d = np.exp(-((xinds-x)**2+(yinds-y)**2)/(2*(kernel_fwhm/fwhm/cd)**2))

                    dim1 = ind2-ind1
                    vect_to_add = np.outer(datavect[ind1:ind2],kernel2d).reshape([dim1,kd,kd])
                    vect_to_add[True-OK] = 0

                    # need to slice out edges
                    if yinds.max() >= naxis2 or yinds.min() < 0:
                        yok = (yinds[0,:] < naxis2) & (yinds[0,:] >= 0)
                        xinds,yinds = xinds[:,yok],yinds[:,yok]
                        vect_to_add = vect_to_add[:,:,yok]
                        kernel2d = kernel2d[:,yok]
                    if xinds.max() >= naxis1 or xinds.min() < 0:
                        xok = (xinds[:,0] < naxis1) & (xinds[:,0] >= 0)
                        xinds,yinds = xinds[xok,:],yinds[xok,:]
                        vect_to_add = vect_to_add[:,xok,:]
                        kernel2d = kernel2d[xok,:]

                    image[ind1:ind2,yinds,xinds] += vect_to_add*wspec
                    # NaN spectral bins are not appropriately downweighted... but they shouldn't exist anyway...
                    nhits[yinds,xinds] += kernel2d*weight
                    contimage[yinds,xinds] += kernel2d * contestimate*weight
                    nhits_once[yinds,xinds] += kernel2d*weight

                else:
                    image[ind1:ind2,int(np.round(y)),int(np.round(x))][OK] += datavect[ind1:ind2][OK]*weight
                    nhits[int(np.round(y)),int(np.round(x))] += weight
                    contimage[int(np.round(y)),int(np.round(x))] += contestimate*weight
                    nhits_once[int(np.round(y)),int(np.round(x))] += weight

                if log.level < 10:
                    log.debug("Z-axis indices are %i,%i..." % (ind1,ind2,))
                    log.debug("Added a data point at %i,%i" % (int(np.round(x)),int(np.round(y))))
                skipped.append(False)
            else:
                skipped.append(True)
                log.info("Skipped a data point at x,y=%f,%f "
                         "lon,lat=%f,%f in file %s because "
                         "it's out of the grid" % (x,y,glon,glat,filename))

            if debug_breakpoint:
                import ipdb
                ipdb.set_trace()

        if log.level <= 10:
            dt = time.time() - t1
            log.debug("Completed x,y={x:4.0f},{y:4.0f}"
                      " ({x:6.2f},{y:6.2f}) in {dt:6.2g}s".format(x=float(x),
                                                                  y=float(y),
                                                                  dt=dt))

    log.info("Completed 'add_data' loop for"
             " {0} in {1}s".format(cubefilename, time.time()-t0))

    if excludefitrange is not None:
        # this block redefining "include" is used for diagnostics (optional)
        ind1a = np.argmin(np.abs(np.floor(v1-velo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-velo)))+1
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        OK = (data[dname][0,:]==data[dname][0,:])
        OK[:ind1a] = False
        OK[ind2a:] = False

        include = OK

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-velo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")
    else:
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        include = slice(None)


    if diagnostic_plot_name:
        from mpl_plot_templates import imdiagnostics

        pylab.clf()

        dd = data[dname][:,include]
        imdiagnostics(dd,axis=pylab.gca())
        pylab.savefig(diagnostic_plot_name, bbox_inches='tight')

        # Save a copy with the bad stuff flagged out; this should tell whether flagging worked
        skipped = np.array(skipped,dtype='bool')
        dd[skipped,:] = -999
        maskdata = np.ma.masked_equal(dd,-999)
        pylab.clf()
        imdiagnostics(maskdata, axis=pylab.gca())
        dpn_pre,dpn_suf = os.path.splitext(diagnostic_plot_name)
        dpn_flagged = dpn_pre+"_flagged"+dpn_suf
        pylab.savefig(dpn_flagged, bbox_inches='tight')

        log.info("Saved diagnostic plot %s and %s" % (diagnostic_plot_name,dpn_flagged))

    log.debug("nhits statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(nhits.mean(),nhits.std(),np.sum(nhits==0), nhits.size))
    log.debug("Image statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(image.mean(),image.std(),np.sum(image==0), image.size))
    
    imav = image/nhits

    if log.level <= 10:
        nnan = np.count_nonzero(np.isnan(imav))
        log.debug("imav statistics: mean, std, nzeros, size, nnan, ngood: {0} {1} {2} {3} {4} {5}".format(imav.mean(),imav.std(),np.sum(imav==0), imav.size, nnan, imav.size-nnan))
        log.debug("imav shape: {0}".format(imav.shape))

    subcube = imav[ind1:ind2,:,:]

    if log.level <= 10:
        nnan = np.sum(np.isnan(subcube))
        print("subcube statistics: mean, std, nzeros, size, nnan, ngood:",np.nansum(subcube)/subcube.size,np.std(subcube[subcube==subcube]),np.sum(subcube==0), subcube.size, nnan, subcube.size-nnan)
        print("subcube shape: ",subcube.shape)

    H = header.copy()
    if fileheader is not None:
        for k,v in fileheader.items():
            if 'RESTFRQ' in k or 'RESTFREQ' in k:
                header.set(k,v)
            #if k[0] == 'C' and '1' in k and k[-1] != '1':
            #    header.set(k.replace('1','3'), v)
    moreH = get_header(cubeheader)
    for k,v in H.items():
        header.set(k,v)
    for k,v in moreH.items():
        header.set(k,v)
    HDU = pyfits.PrimaryHDU(data=subcube,header=header)
    HDU.writeto(cubefilename,clobber=True,output_verify='fix')

    outpre = cubefilename.replace(".fits","")

    include = np.ones(imav.shape[0],dtype='bool')

    if excludefitrange is not None:
        # this block redifining "include" is used for continuum
        ind1a = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")

    HDU2 = pyfits.PrimaryHDU(data=nhits,header=flathead)
    HDU2.writeto(outpre+"_nhits.fits",clobber=True,output_verify='fix')

    #OKCube = (imav==imav)
    #contmap = np.nansum(imav[naxis3*0.1:naxis3*0.9,:,:],axis=0) / OKCube.sum(axis=0)
    if make_continuum:
        contmap = np.nansum(imav[include,:,:],axis=0) / include.sum()
        HDU2 = pyfits.PrimaryHDU(data=contmap,header=flathead)
        HDU2.writeto(outpre+"_continuum.fits",clobber=True,output_verify='fix')

        if continuum_prefix is not None:
            # Solo continuum image (just this obs set)
            HDU2.data = contimage / nhits_once
            HDU2.writeto(continuum_prefix+"_continuum.fits",clobber=True,output_verify='fix')
            HDU2.data = nhits_once
            HDU2.writeto(continuum_prefix+"_nhits.fits",clobber=True,output_verify='fix')

    log.info("Writing script file {0}".format(outpre+"_starlink.sh"))
    scriptfile = open(outpre+"_starlink.sh",'w')
    outpath,outfn = os.path.split(cubefilename)
    outpath,pre = os.path.split(outpre)
    print(("#!/bin/bash"), file=scriptfile)
    if outpath != '':
        print(('cd %s' % outpath), file=scriptfile)
    print(('. /star/etc/profile'), file=scriptfile)
    print(('kappa > /dev/null'), file=scriptfile)
    print(('convert > /dev/null'), file=scriptfile)
    print(('fits2ndf %s %s' % (outfn,outfn.replace(".fits",".sdf"))), file=scriptfile)
    if excludefitrange is not None:
        v2v3 = ""
        for v2,v3 in zip(excludefitrange[::2],excludefitrange[1::2]):
            v2v3 += "%0.2f %0.2f " % (v2.to(default_unit).value,v3.to(default_unit).value)
        print(('mfittrend %s  ranges=\\\"%0.2f %s %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v2v3,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    else:
        print(('mfittrend %s  ranges=\\\"%0.2f %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    print(('sub %s %s %s' % (outfn.replace(".fits",".sdf"),outfn.replace(".fits","_baseline.sdf"),outfn.replace(".fits","_sub.sdf"))), file=scriptfile)
    print(('sqorst %s_sub mode=pixelscale  axis=3 pixscale=%i out=%s_vrebin' % (pre,smoothto,pre)), file=scriptfile)
    print(('gausmooth %s_vrebin fwhm=1.0 axes=[1,2] out=%s_smooth' % (pre,pre)), file=scriptfile)
    print(('#collapse %s estimator=mean axis="VRAD" low=-400 high=500 out=%s_continuum' % (pre,pre)), file=scriptfile)
    print(('rm %s_sub.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_sub %s_sub.fits' % (pre,pre)), file=scriptfile)
    print(('rm %s_smooth.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_smooth %s_smooth.fits' % (pre,pre)), file=scriptfile)
    print(("# Fix STARLINK's failure to respect header keywords."), file=scriptfile)
    print(('sethead %s_smooth.fits RESTFRQ=`gethead RESTFRQ %s.fits`' % (pre,pre)), file=scriptfile)
    print(('rm %s_baseline.sdf' % (pre)), file=scriptfile)
    print(('rm %s_smooth.sdf' % (pre)), file=scriptfile)
    print(('rm %s_sub.sdf' % (pre)), file=scriptfile)
    print(('rm %s_vrebin.sdf' % (pre)), file=scriptfile)
    print(('rm %s.sdf' % (pre)), file=scriptfile)
    scriptfile.close()

    if chmod:
        scriptfilename = (outpre+"_starlink.sh").replace(" ","")
        #subprocess.call("chmod +x {0}".format(scriptfilename), shell=True)
        st = os.stat(scriptfilename)
        os.chmod(scriptfilename, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR)

    if do_runscript:
        runscript(outpre)

    _fix_ms_kms_file(outpre+"_sub.fits")
    _fix_ms_kms_file(outpre+"_smooth.fits")

    if log.level <= 20:
        log.info("Completed {0} in {1}s".format(pre, time.time()-t0))
def fourier_combine_cubes(cube1, cube2, highresextnum=0,
                          highresscalefactor=1.0,
                          lowresscalefactor=1.0, lowresfwhm=1*u.arcmin,
                          return_regridded_cube2=False,
                          return_hdu=False,
                         ):
    """
    Fourier combine two data cubes

    Parameters
    ----------
    cube1 : SpectralCube
    highresfitsfile : str
        The high-resolution FITS file
    cube2 : SpectralCube
    lowresfitsfile : str
        The low-resolution (single-dish) FITS file
    highresextnum : int
        The extension number to use from the high-res FITS file
    highresscalefactor : float
    lowresscalefactor : float
        A factor to multiply the high- or low-resolution data by to match the
        low- or high-resolution data
    lowresfwhm : `astropy.units.Quantity`
        The full-width-half-max of the single-dish (low-resolution) beam;
        or the scale at which you want to try to match the low/high resolution
        data
    return_hdu : bool
        Return an HDU instead of just a cube.  It will contain two image
        planes, one for the real and one for the imaginary data.
    return_regridded_cube2 : bool
        Return the 2nd cube regridded into the pixel space of the first?
    """
    if isinstance(cube1, str):
        cube1 = SpectralCube.read(cube1)
    if isinstance(cube2, str):
        cube2 = SpectralCube.read(cube2)
    #cube1 = spectral_cube.io.fits.load_fits_cube(highresfitsfile,
    #                                             hdu=highresextnum)
    im1 = cube1._data # want the raw data for this
    hd1 = cube1.header
    assert hd1['NAXIS'] == im1.ndim == 3
    w1 = cube1.wcs
    pixscale = np.abs(w1.wcs.get_cdelt()[0]) # REPLACE EVENTUALLY...

    cube2 = cube2.to(cube1.unit)

    assert cube1.unit == cube2.unit, 'Cubes must have same or equivalent unit'
    assert cube1.unit.is_equivalent(u.Jy/u.beam) or cube1.unit.is_equivalent(u.K), "Cubes must have brightness units."

    #f2 = regrid_fits_cube(lowresfitsfile, hd1)
    f2 = regrid_cube_hdu(cube2.hdu, hd1)
    w2 = wcs.WCS(f2.header)

    nax1,nax2,nax3 = (hd1['NAXIS1'],
                      hd1['NAXIS2'],
                      hd1['NAXIS3'])

    dcube1 = im1 * highresscalefactor
    dcube2 = f2.data * lowresscalefactor
    outcube = np.empty_like(dcube1)

    xgrid,ygrid = (np.indices([nax2,nax1])-np.array([(nax2-1.)/2,(nax1-1.)/2.])[:,None,None])
    fwhm = np.sqrt(8*np.log(2))
    # sigma in pixels
    sigma = ((lowresfwhm/fwhm/(pixscale*u.deg)).decompose().value)
    #sigma_fftspace = (1/(4*np.pi**2*sigma**2))**0.5
    sigma_fftspace = (2*np.pi*sigma)**-1
    log.debug('sigma = {0}, sigma_fftspace={1}'.format(sigma, sigma_fftspace))

    kernel = np.fft.fftshift(np.exp(-(xgrid**2+ygrid**2)/(2*sigma**2)))
    # convert the kernel, which is just a gaussian in image space,
    # to its corresponding kernel in fourier space
    kfft = np.abs(np.fft.fft2(kernel)) # should be mostly real
    # normalize the kernel
    kfft/=kfft.max()
    ikfft = 1-kfft

    pb = ProgressBar(dcube1.shape[0])

    for ii,(im1,im2) in enumerate(zip(dcube1, dcube2)):

        fft1 = np.fft.fft2(np.nan_to_num(im1))
        fft2 = np.fft.fft2(np.nan_to_num(im2))

        fftsum = kfft*fft2 + ikfft*fft1

        combo = np.fft.ifft2(fftsum)
        outcube[ii,:,:] = combo.real

        pb.update(ii+1)

    if return_regridded_cube2:
        return outcube, f2
    elif return_hdu:
        return fits.PrimaryHDU(data=outcube, header=w1.to_header())
    else:
        return outcube
Exemplo n.º 53
0
def convolve_model_dir(model_dir, filters, overwrite=False):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    filters : list
        A list of :class:`~sedfitter.filter.Filter` objects to use for the
        convolution
    overwrite : bool, optional
        Whether to overwrite the output files
    """

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=np.zeros(len(sed_files), dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Exemplo n.º 54
0
    def compute_bispectrum(self, show_progress=True, use_pyfftw=False,
                           threads=1, nsamples=100, seed=1000,
                           mean_subtract=False, **pyfftw_kwargs):
        '''
        Do the computation.

        Parameters
        ----------
        show_progress : optional, bool
            Show progress bar while sampling the bispectrum.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        nsamples : int, optional
            Sets the number of samples to take at each vector
            magnitude.
        seed : int, optional
            Sets the seed for the distribution draws.
        mean_subtract : bool, optional
            Subtract the mean from the data before computing. This removes the
            "zero frequency" (i.e., constant) portion of the power, resulting
            in a loss of phase coherence along the k_1=k_2 line.
        pyfft_kwargs : Passed to
            `~turbustat.statistics.rfft_to_fft.rfft_to_fft`. See
            `here <https://hgomersall.github.io/pyFFTW/pyfftw/interfaces/interfaces.html#interfaces-additional-args>`_
            for a list of accepted kwargs.
        '''

        if mean_subtract:
            norm_data = self.data - self.data.mean()
        else:
            norm_data = self.data

        if use_pyfftw:
            if PYFFTW_FLAG:
                if pyfftw_kwargs.get('threads') is not None:
                    pyfftw_kwargs.pop('threads')

                fftarr = fft2(norm_data,
                              threads=threads,
                              **pyfftw_kwargs)
            else:
                warn("pyfftw not installed. Reverting to using numpy.")
                use_pyfftw = False

        if not use_pyfftw:
            fftarr = np.fft.fft2(norm_data)

        conjfft = np.conj(fftarr)

        bispec_shape = (int(self.shape[0] / 2.), int(self.shape[1] / 2.))

        self._bispectrum = np.zeros(bispec_shape, dtype=np.complex)
        self._bicoherence = np.zeros(bispec_shape, dtype=np.float)
        self._tracker = np.zeros(self.shape, dtype=np.int16)

        biconorm = np.ones_like(self.bispectrum, dtype=float)

        if show_progress:
            bar = ProgressBar(np.prod(fftarr.shape) / 4.)

        prod = product(range(int(fftarr.shape[0] / 2.)),
                       range(int(fftarr.shape[1] / 2.)))

        with NumpyRNGContext(seed):
            for n, (k1mag, k2mag) in enumerate(prod):
                phi1 = ra.uniform(0, 2 * np.pi, nsamples)
                phi2 = ra.uniform(0, 2 * np.pi, nsamples)

                k1x = np.asarray([int(k1mag * np.cos(angle))
                                  for angle in phi1])
                k2x = np.asarray([int(k2mag * np.cos(angle))
                                  for angle in phi2])
                k1y = np.asarray([int(k1mag * np.sin(angle))
                                  for angle in phi1])
                k2y = np.asarray([int(k2mag * np.sin(angle))
                                  for angle in phi2])

                k3x = np.asarray([int(k1mag * np.cos(ang1) +
                                      k2mag * np.cos(ang2))
                                  for ang1, ang2 in zip(phi1, phi2)])
                k3y = np.asarray([int(k1mag * np.sin(ang1) +
                                      k2mag * np.sin(ang2))
                                  for ang1, ang2 in zip(phi1, phi2)])

                samps = fftarr[k1x, k1y] * fftarr[k2x, k2y] * conjfft[k3x, k3y]

                self._bispectrum[k1mag, k2mag] = np.sum(samps)

                biconorm[k1mag, k2mag] = np.sum(np.abs(samps))

                # Track where we're sampling from in fourier space
                self._tracker[k1x, k1y] += 1
                self._tracker[k2x, k2y] += 1
                self._tracker[k3x, k3y] += 1

                if show_progress:
                    bar.update(n + 1)

        self._bicoherence = (np.abs(self.bispectrum) / biconorm)
        self._bispectrum_amp = np.log10(np.abs(self.bispectrum))
Exemplo n.º 55
0
catalog_k = np.sort([f for f in catalog if "k" in f])
catalog_j = np.sort([f for f in catalog if "j" in f])

#Obtiene el seeing
color_print('-Obteniendo menor seeing...','cyan')
ks,ksi = lowest_seeing(fits_k)
js,jsi = lowest_seeing(fits_j)
cmd_fits = fits_j[jsi], fits_k[ksi]
cmd_cata = catalog_j[jsi], catalog_k[ksi]
print '\tMenor seeing en K: %s (%f)' % (cmd_fits[1],ks)
print '\tMenor seeing en J: %s (%f)' % (cmd_fits[0],js)

#Obtiene los RA y DEC con WCS
color_print('-Obteniendo RADEC...','cyan')
if get_RADEC: 
	ProgressBar.map(XYtoRADEC,np.transpose([fits_k,catalog_k]),multiprocess=True)
	ProgressBar.map(XYtoRADEC,np.transpose([fits_j,catalog_j]),multiprocess=True)
cmd_dat = cmd_fits[0].replace('fits','dat'), cmd_fits[1].replace('fits','dat')

#CMD (match y creacion)
color_print('-Match CMD...','cyan')
execute  = 'sh stilts tmatch2 ifmt1=ascii ifmt2=ascii matcher=sky ofmt=ascii values1="RA DEC" values2="RA DEC" '
if match_CMD:
	exec_CMD = 'in1=%s in2=%s out=%s params=%.1f progress=none join=all1' % (folder+cmd_dat[1],folder+cmd_dat[0],results+'CMD.dat',match_tolerance)

	os.system(execute+exec_CMD)

#Match de todas las epocas con la de referencia (menor seeing)
color_print('-Match epocas...','cyan')
execute = execute.replace('values1="RA DEC"','values1="RA_1 DEC_1"')
if match_epo:
Exemplo n.º 56
0
    def quick_render_movie(self, outdir, size=256, nframes=30,
                           camera_angle=(0,0,1), north_vector=(0,0,1),
                           rot_vector=(1,0,0),
                           colormap='doom',
                           cmap_range='auto',
                           transfer_function='auto',
                           start_index=0,
                           image_prefix="",
                           output_filename='out.mp4',
                           log_scale=False,
                           rescale=True):
        """
        Create a movie rotating the cube 360 degrees from
        PP -> PV -> PP -> PV -> PP

        Parameters
        ----------
        outdir: str
            The output directory in which the individual image frames and the
            resulting output mp4 file should be stored
        size: int
            The size of the individual output frame in pixels (i.e., size=256
            will result in a 256x256 image)
        nframes: int
            The number of frames in the resulting movie
        camera_angle: 3-tuple
            The initial angle of the camera
        north_vector: 3-tuple
            The vector of 'north' in the data cube.  Default is coincident with
            the spectral axis
        rot_vector: 3-tuple
            The vector around which the camera will be rotated
        colormap: str
            A valid colormap.  See `yt.show_colormaps`
        transfer_function: 'auto' or `yt.visualization.volume_rendering.TransferFunction`
            Either 'auto' to use the colormap specified, or a valid
            TransferFunction instance
        log_scale: bool
            Should the colormap be log scaled?
        rescale: bool
            If True, the images will be rescaled to have a common 95th
            percentile brightness, which can help reduce flickering from having
            a single bright pixel in some projections
        start_index : int
            The number of the first image to save
        image_prefix : str
            A string to prepend to the image name for each image that is output
        output_filename : str
            The movie file name to output.  The suffix may affect the file type
            created.  Defaults to 'out.mp4'.  Will be placed in ``outdir``

        Returns
        -------


        """
        if not ytOK:
            raise IOError("yt could not be imported.  Cube renderings are not possible.")

        scale = np.max(self.cube.shape)

        if not os.path.exists(outdir):
            os.makedirs(outdir)
        elif not os.path.isdir(outdir):
            raise OSError("Output directory {0} exists and is not a directory.".format(outdir))

        if cmap_range == 'auto':
            upper = self.cube.max().value
            lower = self.cube.std().value * 3
            cmap_range = [lower,upper]

        if transfer_function == 'auto':
            tfh = self.auto_transfer_function(cmap_range, log=log_scale)
            tfh.tf.map_to_colormap(cmap_range[0], cmap_range[1], colormap=colormap)
            tf = tfh.tf
        else:
            tf = transfer_function

        center = self.dataset.domain_center
        cam = self.dataset.h.camera(center, camera_angle, scale, size, tf,
                                    north_vector=north_vector, fields='flux')

        im  = cam.snapshot()
        images = [im]

        pb = ProgressBar(nframes)
        for ii,im in enumerate(cam.rotation(2 * np.pi, nframes,
                                            rot_vector=rot_vector)):
            images.append(im)
            im.write_png(os.path.join(outdir,"%s%04i.png" % (image_prefix,
                                                             ii+start_index)),
                         rescale=False)
            pb.update(ii+1)
        log.info("Rendering complete in {0}s".format(time.time() - pb._start_time))

        if rescale:
            _rescale_images(images, os.path.join(outdir, image_prefix))

        pipe = _make_movie(outdir, prefix=image_prefix,
                           filename=output_filename)
        
        return images
Exemplo n.º 57
0
def convolve_model_dir_monochromatic(model_dir, overwrite=False, max_ram=8,
                                     wav_min=-np.inf * u.micron, wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError("monochromatic filters are no longer used for new-style model directories")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(n_wav, int(np.floor(max_ram * 1024. ** 3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [ConvolvedFluxes(model_names=np.zeros(n_models, dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(chunk_size)]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters
Exemplo n.º 58
0
    def compute_surface(self, boundary='continuous', show_progress=True):
        '''
        Computes the SCF up to the given lag value. This is an
        expensive operation and could take a long time to calculate.

        Parameters
        ----------
        boundary : {"continuous", "cut"}
            Treat the boundary as continuous (wrap-around) or cut values
            beyond the edge (i.e., for most observational data).
        show_progress : bool, optional
            Show a progress bar when computing the surface. =
        '''

        if boundary not in ["continuous", "cut"]:
            raise ValueError("boundary must be 'continuous' or 'cut'.")

        self._scf_surface = np.zeros((self.size, self.size))

        # Convert the lags into pixel units.
        pix_lags = self._to_pixel(self.roll_lags).value

        dx = pix_lags.copy()
        dy = pix_lags.copy()

        if show_progress:
            bar = ProgressBar(len(dx) * len(dy))

        for n, (x_shift, y_shift) in enumerate(product(dx, dy)):

            i, j = np.unravel_index(n, (len(dx), len(dy)))

            if x_shift == 0 and y_shift == 0:
                self._scf_surface[j, i] = 1.

            if x_shift == 0:
                tmp = self.data
            else:
                if float(x_shift).is_integer():
                    shift_func = pixel_shift
                else:
                    shift_func = fourier_shift
                tmp = shift_func(self.data, x_shift, axis=1)

            if y_shift != 0:
                if float(y_shift).is_integer():
                    shift_func = pixel_shift
                else:
                    shift_func = fourier_shift
                tmp = shift_func(tmp, y_shift, axis=2)

            if boundary is "cut":
                # Always round up to the nearest integer.
                x_shift = np.ceil(x_shift).astype(int)
                y_shift = np.ceil(y_shift).astype(int)
                if x_shift < 0:
                    x_slice_data = slice(None, tmp.shape[1] + x_shift)
                    x_slice_tmp = slice(-x_shift, None)
                else:
                    x_slice_data = slice(x_shift, None)
                    x_slice_tmp = slice(None, tmp.shape[1] - x_shift)

                if y_shift < 0:
                    y_slice_data = slice(None, tmp.shape[2] + y_shift)
                    y_slice_tmp = slice(-y_shift, None)
                else:
                    y_slice_data = slice(y_shift, None)
                    y_slice_tmp = slice(None, tmp.shape[2] - y_shift)

                data_slice = (slice(None), x_slice_data, y_slice_data)
                tmp_slice = (slice(None), x_slice_tmp, y_slice_tmp)
            elif boundary is "continuous":
                data_slice = (slice(None),) * 3
                tmp_slice = (slice(None),) * 3

            values = \
                np.nansum(((self.data[data_slice] - tmp[tmp_slice]) ** 2),
                          axis=0) / \
                (np.nansum(self.data[data_slice] ** 2, axis=0) +
                 np.nansum(tmp[tmp_slice] ** 2, axis=0))

            scf_value = 1. - \
                np.sqrt(np.nansum(values) / np.sum(np.isfinite(values)))

            if scf_value > 1:
                raise ValueError("Cannot have a correlation above 1. Check "
                                 "your input data. Contact the TurbuStat "
                                 "authors if the problem persists.")

            self._scf_surface[j, i] = scf_value

            if show_progress:
                bar.update(n + 1)
Exemplo n.º 59
0
name     = np.genfromtxt(folder+'zinfo_img',unpack=True,usecols=(0,),dtype='string')
k_mask   = np.array(['k' in f for f in name])
sek,elk,yrk = np.transpose([se,el,yr])[k_mask].T
yrk = (yrk-yrk[0])/365.242199

color_print('Recopilando archivos de epocas...','cyan')
epochs = glob.glob('./%s/*.*' % match_folder)

color_print('Realizando match de la MF con las epocas','cyan')
ejecuta = 'java -jar %s/stilts.jar tmatch2 in1=./%s values1="ID" ifmt1=ascii ' % (stilts_folder, master)

def mf_match(ep):
    ej2 = 'in2=%s values2="ID_1" ifmt2=ascii icmd2=\'keepcols "ID_1 X Y"\' matcher=exact find=best join=1and2 out=./%s/%s ofmt=ascii progress=none ocmd="delcols ID_1"' % (ep, match_master, ep.split('/')[-1].replace('.match','.mfma'))
    os.system(ejecuta + ej2)

ProgressBar.map(mf_match,epochs,multiprocess=True)

color_print('Realizando transformaciones lineales','cyan')
matches = glob.glob('./%s/*.*' % match_master)
bid     = np.genfromtxt(locales,unpack=True,usecols=(0,))

def shift(ep):
    ids,x1,y1,mk,mj,x2,y2 = np.genfromtxt(ep,unpack=True)

    local_mask = np.in1d(ids,bid)
    lid,lx1,ly1,lx2,ly2  = np.transpose([ids,x1,y1,x2,y2])[local_mask].T

    loc_xy = np.transpose([lx2,ly2])
    nbrs   = NN(n_neighbors=vecinos, algorithm='auto').fit(loc_xy)

    coo_xy = np.transpose([x2,y2])