Exemple #1
0
def download_hitran(m, i, numin, numax):
    """
    Download HITRAN data for a particular molecule. Based on fetch function from
    hapi.py.

    Parameters
    ----------
    m : int
        HITRAN molecule number
    i : int
        HITRAN isotopologue number
    numin : real
        lower wavenumber bound
    numax : real
        upper wavenumber bound
    """
    iso_id = str(ISO[(m, i)][ISO_INDEX["id"]])
    mol_name = ISO[(m, i)][ISO_INDEX["mol_name"]]
    filename = os.path.join(cache_location, "{0}.data".format(mol_name))
    CHUNK = 64 * 1024
    data = dict(iso_ids_list=iso_id, numin=numin, numax=numax)
    with open(filename, "w") as fp:
        response = commons.send_request(HITRAN_URL, data, 10, request_type="GET")
        if "Content-Length" in response.headers:
            total_length = response.headers.get("Content-Length")
            pb = ProgressBar(int(total_length))
        for chunk in response.iter_content(chunk_size=CHUNK):
            fp.write(chunk.decode("utf-8"))
            try:
                pb.update(CHUNK)
            except NameError:
                pass
Exemple #2
0
    def _download_file(self, url, local_filepath, timeout=None, auth=None):
        """
        Download a file.  Resembles `astropy.utils.data.download_file` but uses
        the local ``_session``
        """
        response = self._session.get(url, timeout=timeout, stream=True,
                                      auth=auth)
        if 'content-length' in response.headers:
            length = int(response.headers['content-length'])
        else:
            length = 1

        pb = ProgressBar(length)

        blocksize = astropy.utils.data.conf.download_block_size

        bytes_read = 0

        with open(local_filepath, 'wb') as f:
            for block in response.iter_content(blocksize):
                f.write(block)
                bytes_read += blocksize
                pb.update(bytes_read if bytes_read <= length else length)

        response.close()
Exemple #3
0
def extract_poly_slice(cube, polygons, width=1.0):

    nx = len(polygons)
    nz = cube.shape[0]

    slice = np.zeros((nz, nx))

    p = ProgressBar(len(polygons))

    for i, polygon in enumerate(polygons):

        p.update()

        # Find bounding box
        bbxmin = int(round(np.min(polygon.x))-1)
        bbxmax = int(round(np.max(polygon.x))+2)
        bbymin = int(round(np.min(polygon.y))-1)
        bbymax = int(round(np.max(polygon.y))+2)

        # Loop through pixels that might overlap
        for xmin in np.arange(bbxmin, bbxmax):
            for ymin in np.arange(bbymin, bbymax):

                area = square_polygon_overlap_area(xmin-0.5, xmin+0.5,
                                                   ymin-0.5, ymin+0.5,
                                                   polygon.x, polygon.y)

                if area > 0:
                    slice[:, i] += cube[:, ymin, xmin] * area

    print("")

    return slice
    def ratio_to_dens_slow(ratio, c11, c22):
        """
        Shape:
            ratio [z,y,x]
            c11 [y,x]
            c22 [y,x]
        """

        assert c11.size == c22.size == ratio[0,:,:].size

        fshape = [ratio.shape[0], ratio.shape[1]*ratio.shape[2]]
        rrs = ratio.reshape(fshape).T

        outc = (ratio*0).reshape(fshape) + np.nan

        # set up a grid...

        pb = ProgressBar((c11.size))
        for ii,(r,c1,c2) in enumerate(zip(rrs, c11.flat, c22.flat)):
            #print r.shape,c1,c2
            if np.isfinite(c1) and np.isfinite(c2) and np.any(np.isfinite(r)):
                tauratio, ok = get_tau_ratio(c1,c2)

                inds = np.argsort(tauratio[ok])
                outc[:,ii] = np.interp(r, tauratio[ok][inds], dens[ok][inds], np.nan, np.nan)
            pb.update()
        #pb.finish()

        return outc.reshape(ratio.shape)
def fit_ch3cn_lines(spectra, save_prefix, velo=56*u.km/u.s, ampguess=-0.01):
    all_ch3cn = table.vstack([ch3cn, ch3cn_v])

    all_ch3cn.add_column(table.Column(name='FittedAmplitude', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedCenter', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedWidth', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedAmplitudeError', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedCenterError', data=np.zeros(len(all_ch3cn))))
    all_ch3cn.add_column(table.Column(name='FittedWidthError', data=np.zeros(len(all_ch3cn))))

    vkms = velo.to(u.km/u.s).value
    pl.figure(1).clf()
    ax = pl.gca()

    pb = ProgressBar(len(spectra) * len(all_ch3cn))

    ii = 0
    for sp in spectra:
        sp.xarr.convert_to_unit(u.GHz)
        mid = np.median(sp.data)
        for line in all_ch3cn:
            frq = line['Freq-GHz']*u.GHz
            if sp.xarr.in_range(frq*(1-velo/constants.c)):
                offset = ii*0.000 + mid
                ii += 1
                sp.xarr.convert_to_unit(u.km/u.s, refX=frq)
                sp.plotter(axis=ax, clear=False, offset=offset)
                sp.specfit(fittype='vheightgaussian',
                           guesses=[mid, ampguess, vkms, 2],)
                line['FittedAmplitude'] = sp.specfit.parinfo['AMPLITUDE0'].value
                line['FittedCenter'] = sp.specfit.parinfo['SHIFT0'].value
                line['FittedWidth'] = sp.specfit.parinfo['WIDTH0'].value
                line['FittedAmplitudeError'] = sp.specfit.parinfo['AMPLITUDE0'].error
                line['FittedCenterError'] = sp.specfit.parinfo['SHIFT0'].error
                line['FittedWidthError'] = sp.specfit.parinfo['WIDTH0'].error
                sp.xarr.convert_to_unit(u.GHz)
            pb.update()

    pl.xlim(vkms-14, vkms+14)
    #pl.ylim(0, offset)
    pl.draw()
    pl.show()
    pl.savefig(save_prefix+"_spectra_overlay.png")

    pl.figure(2).clf()
    pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedWidth'], 'o')
    pl.xlabel("E$_U$ (K)")
    pl.ylabel("$\sigma$ (km/s)")
    pl.ylim(0,3.5)
    pl.savefig(save_prefix+"_sigma_vs_eupper.png")


    pl.figure(3).clf()
    pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedCenter'], 'o')
    pl.xlabel("E$_U$ (K)")
    pl.ylabel("$v_{lsr}$ (km/s)")
    pl.ylim(vkms-3, vkms+3)
    pl.savefig(save_prefix+"_vcen_vs_eupper.png")
def spectral_regrid(cube, outgrid):
    """
    Spectrally regrid a cube onto a new spectral output grid

    (this is apparently redundant with regrid_cube_hdu)
    """

    assert isinstance(cube, SpectralCube)

    inaxis = cube.spectral_axis.to(outgrid.unit)

    indiff = np.mean(np.diff(inaxis))
    outdiff = np.mean(np.diff(outgrid))
    if outdiff < 0:
        outgrid=outgrid[::-1]
        outdiff = np.mean(np.diff(outgrid))
    if indiff < 0:
        cubedata = cube.filled_data[::-1]
        inaxis = cube.spectral_axis.to(outgrid.unit)[::-1]
        indiff = np.mean(np.diff(inaxis))
    else:
        cubedata = cube.filled_data[:]
    if indiff < 0 or outdiff < 0:
        raise ValueError("impossible.")

    assert np.all(np.diff(outgrid) > 0)
    assert np.all(np.diff(inaxis) > 0)

    np.testing.assert_allclose(np.diff(outgrid), outdiff,
                               err_msg="Output grid must be linear")

    if outdiff > 2 * indiff:
        raise ValueError("Input grid has too small a spacing.  It needs to be "
                         "smoothed prior to resampling.")

    newcube = np.empty([outgrid.size, cube.shape[1], cube.shape[2]])

    yy,xx = np.indices(cube.shape[1:])

    pb = ProgressBar(xx.size)
    for ix, iy in (zip(xx.flat, yy.flat)):
        newcube[:,iy,ix] = np.interp(outgrid.value, inaxis.value,
                                     cubedata[:,iy,ix].value)
        pb.update()

    newheader = cube.header
    newheader['CRPIX3'] = 1
    newheader['CRVAL3'] = outgrid[0].value
    newheader['CDELT3'] = outdiff.value
    newheader['CUNIT3'] = outgrid.unit.to_string('FITS')

    return fits.PrimaryHDU(data=newcube, header=newheader)
def download_file(url, outdir=rawpath):
    r = requests.get(url, verify=False, stream=True)
    _, cdisp = cgi.parse_header(r.headers['content-disposition'])
    outfilename = cdisp['filename']
    fullname = os.path.join(outdir, outfilename)

    pb = ProgressBar(int(r.headers['content-length']))

    with open(fullname, 'wb') as f:
        for chunk in r.iter_content(chunk_size=1024):
            f.write(chunk)
            f.flush()
            pb.update(pb._current_value + 1024)

    return fullname
def fit_all_tex(xaxis, cube, cubefrequencies, indices, degeneracies,
                ecube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy,xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count=0

    for ii,jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:,ii,jj])):
            tmap[ii,jj] = np.nan
        else:
            if replace_bad:
                uplims = nupper_of_kkms(replace_bad, cubefrequencies,
                                        einsteinAij[indices], degeneracies,).value
            else:
                uplims = None

            nuppers = nupper_of_kkms(cube[:,ii,jj], cubefrequencies,
                                     einsteinAij[indices], degeneracies,
                                    )
            if ecube is not None:
                nupper_error = nupper_of_kkms(ecube[:,ii,jj], cubefrequencies,
                                              einsteinAij[indices], degeneracies,).value
                uplims = 3 * nupper_error
                if replace_bad:
                    raise ValueError("replace_bad is ignored now...")
            else:
                nupper_error = None

            fit_result = fit_tex(xaxis, nuppers.value,
                                 errors=nupper_error,
                                 uplims=uplims)
            tmap[ii,jj] = fit_result[1].value
            Nmap[ii,jj] = fit_result[0].value
        pb.update(count)
        count+=1

    return tmap,Nmap
Exemple #9
0
def extract_poly_slice(cube, polygons):

    nx = len(polygons)
    nz = cube.shape[0]

    total_slice = np.zeros((nz, nx))
    total_area = np.zeros((nz, nx))

    p = ProgressBar(len(polygons))

    for i, polygon in enumerate(polygons):

        p.update()

        # Find bounding box
        bbxmin = int(round(np.min(polygon.x)) - 1)
        bbxmax = int(round(np.max(polygon.x)) + 2)
        bbymin = int(round(np.min(polygon.y)) - 1)
        bbymax = int(round(np.max(polygon.y)) + 2)

        # Clip to cube box
        bbxmin = max(bbxmin, 0)
        bbxmax = min(bbxmax, cube.shape[2])
        bbymin = max(bbymin, 0)
        bbymax = min(bbymax, cube.shape[1])

        # Loop through pixels that might overlap
        for xmin in np.arange(bbxmin, bbxmax):
            for ymin in np.arange(bbymin, bbymax):

                area = square_polygon_overlap_area(xmin - 0.5, xmin + 0.5, ymin - 0.5, ymin + 0.5, polygon.x, polygon.y)

                if area > 0:
                    total_slice[:, i] += cube[:, ymin, xmin] * area
                    total_area[:, i] += area

    total_slice[total_area == 0.0] = np.nan
    total_slice[total_area > 0.0] /= total_area[total_area > 0.0]

    print("")

    return total_slice
Exemple #10
0
    def _HEADER_data_size(self, files):
        """
        Given a list of file URLs, return the data size.  This is useful for
        assessing how much data you might be downloading!
        (This is discouraged by the ALMA archive, as it puts unnecessary load
        on their system)
        """
        totalsize = 0 * u.B
        data_sizes = {}
        pb = ProgressBar(len(files))
        for ii, fileLink in enumerate(files):
            response = self._request('HEAD', fileLink, stream=False,
                                     cache=False, timeout=self.TIMEOUT)
            filesize = (int(response.headers['content-length']) * u.B).to(u.GB)
            totalsize += filesize
            data_sizes[fileLink] = filesize
            log.debug("File {0}: size {1}".format(fileLink, filesize))
            pb.update(ii + 1)
            response.raise_for_status()

        return data_sizes, totalsize.to(u.GB)
def fit_all_tex(xaxis, cube, cubefrequencies, degeneracies,
                einsteinAij,
                errorcube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy,xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count=0

    for ii,jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:,ii,jj])):
            tmap[ii,jj] = np.nan
        else:
            if replace_bad:
                neg = cube[:,ii,jj] <= 0
                cube[neg,ii,jj] = replace_bad
            nuppers = nupper_of_kkms(cube[:,ii,jj], cubefrequencies,
                                     einsteinAij, degeneracies)
            if errorcube is not None:
                enuppers = nupper_of_kkms(errorcube[:,ii,jj], cubefrequencies,
                                          einsteinAij, degeneracies)
            fit_result = fit_tex(xaxis, nuppers.value, errors=enuppers.value)
            tmap[ii,jj] = fit_result[1].value
            Nmap[ii,jj] = fit_result[0].value
        pb.update(count)
        count+=1

    return tmap,Nmap
Exemple #12
0
def _fast_reader(index_map, data):
    """
    Use scipy.ndimage.find_objects to quickly identify subsets of the data
    to increase speed of dendrogram loading
    """

    flux_by_structure, indices_by_structure = {}, {}

    from scipy import ndimage

    idxs = np.unique(index_map[index_map > -1])

    # ndimage ignores 0 and -1, but we want index 0
    object_slices = ndimage.find_objects(index_map + 1)

    # find_objects returns a tuple that includes many None values that we
    # need to get rid of.
    object_slices = [x for x in object_slices if x is not None]

    index_cube = np.indices(index_map.shape)

    # Need to have same length, otherwise assumptions above are wrong
    assert len(idxs) == len(object_slices)
    log.debug("Creating index maps for {0} indices...".format(len(idxs)))

    p = ProgressBar(len(object_slices))
    for idx, sl in zip(idxs, object_slices):
        match = index_map[sl] == idx
        sl2 = (slice(None),) + sl
        match_inds = index_cube[sl2][:, match]
        coords = list(zip(*match_inds))
        dd = data[sl][match].tolist()
        flux_by_structure[idx] = dd
        indices_by_structure[idx] = coords
        p.update()

    return flux_by_structure, indices_by_structure
Exemple #13
0
class Pool(object):
    """
    The main pool object. Manages a set of specified workers.

    Usage::

        commands = [
            'ls -al',
            'cd /tmp && mkdir foo',
            'date',
            'echo "Hello There."',
            'sleep 2 && echo "Done."'
        ]
        lil = Pool(workers=2)
        lil.run(commands)

    Optionally accepts a ``workers`` kwarg. Default is 1.

    Optionally accepts a ``debug`` kwarg. Default is False.

    Optionally accepts a ``wait_time`` kwarg. Default is 0.1.
    """

    def __init__(self, workers=1, debug=False, wait_time=0.1):
        if workers < 1:
            raise NotEnoughWorkers("You need to use at least one worker.")

        self.workers = workers
        self.pool = {}
        self.commands = []
        self.callback = None
        self.debug = debug
        self.wait_time = wait_time
        self.progressbar = None

    def init_progressbar(self):
        """
        Initialise the progress bar.

        This only happens if run command is called with ``progressbar=True``.
        """
        self.progressbar = ProgressBar(self.command_count())

    def prepare_commands(self, commands):
        """
        A hook to override how the commands are added.

        By default, simply copies the provided command ``list`` to the
        internal ``commands`` list.
        """
        # Make a copy of the commands to run.
        self.commands = commands[:]

    def command_count(self):
        """
        Returns the number of commands to be run.

        Useful as a hook if you use a different structure for the commands.
        """
        return len(self.commands)

    def next_command(self):
        """
        Fetches the next command for processing.

        Will return ``None`` if there are no commands remaining (unless
        ``Pool.debug = True``).
        """
        try:
            return self.commands.pop(0)
        except IndexError:
            if self.debug:
                raise

        return None

    def process_kwargs(self, command):
        """
        A hook to alter the kwargs given to ``subprocess.Process``.

        Takes a ``command`` argument, which is unused by default, but can be
        used to switch the flags used.

        By default, only specifies ``shell=True``.
        """
        return {"shell": True}

    def create_process(self, command):
        """
        Given a provided command (string or list), creates a new process
        to execute the command.
        """
        logging.debug("Starting process to handle command '%s'." % command)
        kwargs = self.process_kwargs(command)
        return subprocess.Popen(command, **kwargs)

    def set_callback(self, callback=None):
        """
        Sets up a callback to be run whenever a process finishes.

        If called with ``None`` or without any args, it will clear any
        existing callback.
        """
        self.callback = callback

    def add_to_pool(self, proc):
        """
        Adds a process to the pool.
        """
        logging.debug("Adding %s to the pool." % proc.pid)
        self.pool[proc.pid] = proc

    def remove_from_pool(self, pid):
        """
        Removes a process to the pool.

        Fails silently if the process id is no longer present (unless
        ``Pool.debug = True``).
        """
        try:
            logging.debug("Removing %s from the pool" % pid)
            del (self.pool[pid])
        except KeyError:
            if self.debug:
                raise

    def inspect_pool(self):
        """
        A hook for inspecting the pool's current status.

        By default, simply makes a log message and returns the length of
        the pool.
        """
        # Call ``len()`` just once.
        pool_size = len(self.pool)
        logging.debug("Current pool size: %s" % pool_size)
        return pool_size

    def busy_wait(self):
        """
        A hook to control how often the busy-wait loop runs.

        By default, sleeps for 0.1 seconds.
        """
        time.sleep(self.wait_time)

    def run(self, commands=None, callback=None, progressbar=False):
        """
        The method to actually execute all the commands with the pool.

        Optionally accepts a ``commands`` kwarg, as a shortcut not to have to
        call ``Pool.prepare_commands``.
        """
        if commands is not None:
            self.prepare_commands(commands)

        if callback is not None:
            self.set_callback(callback)

        if progressbar is True:
            self.init_progressbar()

        keep_running = True

        while keep_running:
            self.inspect_pool()

            if len(self.pool) <= min(self.command_count(), self.workers):
                command = self.next_command()

                if not command:
                    self.busy_wait()
                    continue

                proc = self.create_process(command)
                self.add_to_pool(proc)

            # Go in reverse order so offsets never get screwed up.
            for pid in self.pool.keys():
                logging.debug("Checking status on %s" % self.pool[pid].pid)

                if self.pool[pid].poll() >= 0:
                    if self.callback:
                        self.callback(self.pool[pid])
                    if progressbar:
                        self.progressbar.update()
                    self.remove_from_pool(pid)

            keep_running = self.command_count() or len(self.pool) > 0
            self.busy_wait()

        if progressbar:
            self.progressbar.__exit__(None, None, None)
    multigauss_uncerts = np.zeros((3 * max_gauss_comps, ) + peaktemp.shape)

    multigauss_comps = np.zeros(peaktemp.shape)

    multigauss_model_cube = np.zeros(subcube.shape)

    fit_bics = np.zeros((2, ) + peaktemp.shape)

    show_plots = False
    # show_plots = True

    pbar = ProgressBar(len(mask_positions[0]))

    for y, x in zip(mask_positions[0], mask_positions[1]):

        pbar.update()
        # print(f"{y}, {x}")

        spec = subcube[:, y, x].with_spectral_unit(u.km / u.s)

        # Fit that spectrum.

        thickHI_fit, vels, thickHI_fit_model = \
            fit_isoturbHI_model_simple(spec.spectral_axis,  # [spec_mask],
                                       spec,  # [spec_mask],
                                       peakvels[y, x],
                                       err=noise_val,
                                       delta_vcent=10 * u.km / u.s,
                                       verbose=show_plots,
                                       plot_fit=show_plots,
                                       use_emcee=False,
Exemple #15
0
    def compute_bispectrum(self, show_progress=True, use_pyfftw=False,
                           threads=1, nsamples=100, seed=1000,
                           mean_subtract=False, **pyfftw_kwargs):
        '''
        Do the computation.

        Parameters
        ----------
        show_progress : optional, bool
            Show progress bar while sampling the bispectrum.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        nsamples : int, optional
            Sets the number of samples to take at each vector
            magnitude.
        seed : int, optional
            Sets the seed for the distribution draws.
        mean_subtract : bool, optional
            Subtract the mean from the data before computing. This removes the
            "zero frequency" (i.e., constant) portion of the power, resulting
            in a loss of phase coherence along the k_1=k_2 line.
        pyfft_kwargs : Passed to
            `~turbustat.statistics.rfft_to_fft.rfft_to_fft`. See
            `here <https://hgomersall.github.io/pyFFTW/pyfftw/interfaces/interfaces.html#interfaces-additional-args>`_
            for a list of accepted kwargs.
        '''

        if mean_subtract:
            norm_data = self.data - self.data.mean()
        else:
            norm_data = self.data

        if use_pyfftw:
            if PYFFTW_FLAG:
                if pyfftw_kwargs.get('threads') is not None:
                    pyfftw_kwargs.pop('threads')

                fftarr = fft2(norm_data,
                              threads=threads,
                              **pyfftw_kwargs)
            else:
                warn("pyfftw not installed. Reverting to using numpy.")
                use_pyfftw = False

        if not use_pyfftw:
            fftarr = np.fft.fft2(norm_data)

        conjfft = np.conj(fftarr)

        bispec_shape = (int(self.shape[0] / 2.), int(self.shape[1] / 2.))

        self._bispectrum = np.zeros(bispec_shape, dtype=np.complex)
        self._bicoherence = np.zeros(bispec_shape, dtype=np.float)
        self._tracker = np.zeros(self.shape, dtype=np.int16)

        biconorm = np.ones_like(self.bispectrum, dtype=float)

        if show_progress:
            bar = ProgressBar(np.prod(fftarr.shape) / 4.)

        prod = product(range(int(fftarr.shape[0] / 2.)),
                       range(int(fftarr.shape[1] / 2.)))

        with NumpyRNGContext(seed):
            for n, (k1mag, k2mag) in enumerate(prod):
                phi1 = ra.uniform(0, 2 * np.pi, nsamples)
                phi2 = ra.uniform(0, 2 * np.pi, nsamples)

                k1x = np.asarray([int(k1mag * np.cos(angle))
                                  for angle in phi1])
                k2x = np.asarray([int(k2mag * np.cos(angle))
                                  for angle in phi2])
                k1y = np.asarray([int(k1mag * np.sin(angle))
                                  for angle in phi1])
                k2y = np.asarray([int(k2mag * np.sin(angle))
                                  for angle in phi2])

                k3x = np.asarray([int(k1mag * np.cos(ang1) +
                                      k2mag * np.cos(ang2))
                                  for ang1, ang2 in zip(phi1, phi2)])
                k3y = np.asarray([int(k1mag * np.sin(ang1) +
                                      k2mag * np.sin(ang2))
                                  for ang1, ang2 in zip(phi1, phi2)])

                samps = fftarr[k1x, k1y] * fftarr[k2x, k2y] * conjfft[k3x, k3y]

                self._bispectrum[k1mag, k2mag] = np.sum(samps)

                biconorm[k1mag, k2mag] = np.sum(np.abs(samps))

                # Track where we're sampling from in fourier space
                self._tracker[k1x, k1y] += 1
                self._tracker[k2x, k2y] += 1
                self._tracker[k3x, k3y] += 1

                if show_progress:
                    bar.update(n + 1)

        self._bicoherence = (np.abs(self.bispectrum) / biconorm)
        self._bispectrum_amp = np.log10(np.abs(self.bispectrum))
Exemple #16
0
def reject(imfile, catfile, threshold):
    """Reject noisy detections.
    
    Parameters
    ----------
    imfile : str
        The path to the radio image file
    catfile : str
        The path to the source catalog, as obtained from detect.py
    threshold : float
        The signal-to-noise threshold below which sources are rejected
    """
    # Extract information from filename
    outfile = os.path.basename(catfile).split('cat_')[1].split('.dat')[0]
    region = outfile.split('region')[1].split('_band')[0]
    band = outfile.split('band')[1].split('_val')[0]
    min_value = outfile.split('val')[1].split('_delt')[0]
    min_delta = outfile.split('delt')[1].split('_pix')[0]
    min_npix = outfile.split('pix')[1]
    print("\nSource rejection for region {} in band {}".format(region, band))

    print("Loading image file")
    contfile = fits.open(imfile)
    data = contfile[0].data.squeeze()
    mywcs = wcs.WCS(contfile[0].header).celestial

    catalog = Table(Table.read(catfile, format='ascii'), masked=True)

    beam = radio_beam.Beam.from_fits_header(contfile[0].header)
    pixel_scale = np.abs(
        mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
    ppbeam = (beam.sr / (pixel_scale**2)).decompose().value

    data = data / ppbeam

    # Remove existing region files
    if os.path.isfile('./reg/reg_' + outfile + '_annulus.reg'):
        os.remove('./reg/reg_' + outfile + '_annulus.reg')
    if os.path.isfile('./reg/reg_' + outfile + '_filtered.reg'):
        os.remove('./reg/reg_' + outfile + '_filtered.reg')

    # Load in manually accepted and rejected sources
    override_accepted = []
    override_rejected = []
    if os.path.isfile('./.override/accept_' + outfile + '.txt'):
        override_accepted = np.loadtxt('./.override/accept_' + outfile +
                                       '.txt').astype('int')
    if os.path.isfile('./.override/reject_' + outfile + '.txt'):
        override_rejected = np.loadtxt('./.override/reject_' + outfile +
                                       '.txt').astype('int')
    print("\nManually accepted sources: ", set(override_accepted))
    print("Manually rejected sources: ", set(override_rejected))

    print('\nCalculating RMS values within aperture annuli')
    pb = ProgressBar(len(catalog))

    data_cube = []
    masks = []
    rejects = []
    snr_vals = []
    mean_backgrounds = []

    for i in range(len(catalog)):
        x_cen = catalog['x_cen'][i] * u.deg
        y_cen = catalog['y_cen'][i] * u.deg
        major_fwhm = catalog['major_fwhm'][i] * u.deg
        minor_fwhm = catalog['minor_fwhm'][i] * u.deg
        position_angle = catalog['position_angle'][i] * u.deg
        dend_flux = catalog['dend_flux_band{}'.format(band)][i]

        annulus_width = 1e-5 * u.deg
        center_distance = 1e-5 * u.deg

        # Define some ellipse properties in pixel coordinates
        position = coordinates.SkyCoord(x_cen,
                                        y_cen,
                                        frame='icrs',
                                        unit=(u.deg, u.deg))
        pix_position = np.array(position.to_pixel(mywcs))
        pix_major_fwhm = major_fwhm / pixel_scale
        pix_minor_fwhm = minor_fwhm / pixel_scale

        # Cutout section of the image we care about, to speed up computation time
        size = (center_distance + annulus_width + major_fwhm) * 2.2
        cutout = Cutout2D(data, position, size, mywcs, mode='partial')
        cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                         cutout.center_cutout[1])

        # Define the aperture regions needed for SNR
        ellipse_reg = regions.EllipsePixelRegion(
            cutout_center,
            pix_major_fwhm * 2.,
            pix_minor_fwhm * 2.,
            angle=position_angle
        )  # Make sure you're running the dev version of regions, otherwise the position angles will be in radians!

        innerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm)
        outerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm +
            annulus_width / pixel_scale)

        # Make masks from aperture regions
        ellipse_mask = mask(ellipse_reg, cutout)
        annulus_mask = mask(outerann_reg, cutout) - mask(innerann_reg, cutout)

        # Plot annulus and ellipse regions
        data_cube.append(cutout.data)
        masks.append([annulus_mask, ellipse_mask])

        # Calculate the SNR and aperture flux sums
        bg_rms = rms(cutout.data[annulus_mask.astype('bool')])
        peak_flux = np.max(cutout.data[ellipse_mask.astype('bool')])
        flux_rms_ratio = peak_flux / bg_rms
        snr_vals.append(flux_rms_ratio)

        # Reject bad sources below some SNR threshold
        rejected = False
        if flux_rms_ratio <= threshold:
            rejected = True

        # Process manual overrides
        if catalog['_idx'][i] in override_accepted:
            rejected = False
        if catalog['_idx'][i] in override_rejected:
            rejected = True
        rejects.append(int(rejected))

        # Add non-rejected source ellipses to a new region file
        fname = './reg/reg_' + outfile + '_filtered.reg'
        with open(fname, 'a') as fh:
            if os.stat(fname).st_size == 0:
                fh.write("icrs\n")
            if not rejected:
                fh.write("ellipse({}, {}, {}, {}, {}) # text={{{}}}\n".format(
                    x_cen.value, y_cen.value, major_fwhm.value,
                    minor_fwhm.value, position_angle.value, i))
        pb.update()

    # Plot the grid of sources
    plot_grid(data_cube, masks, rejects, snr_vals, catalog['_idx'])
    plt.suptitle(
        'region={}, band={}, min_value={}, min_delta={}, min_npix={}, threshold={:.4f}'
        .format(region, band, min_value, min_delta, min_npix, threshold))
    plt.show(block=False)

    # Get overrides from user
    print(
        'Manual overrides example: type "r319, a605" to manually reject source #319 and accept source #605.'
    )
    overrides = input(
        "\nType manual override list, or press enter to continue:\n").split(
            ', ')
    accepted_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('a'), overrides))
    ]
    rejected_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('r'), overrides))
    ]

    # Save the manually accepted and rejected sources
    fname = './.override/accept_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in accepted_list:
            fh.write('\n' + str(num))
    fname = './.override/reject_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in rejected_list:
            fh.write('\n' + str(num))
    print(
        "Manual overrides written to './.override/' and saved to source catalog. New overrides will be displayed the next time the rejection script is run."
    )

    # Process the new overrides, to be saved into the catalog
    rejects = np.array(rejects)
    acc = np.array([a[-2:] for a in accepted_list], dtype=int)
    rej = np.array([r[-2:] for r in rejected_list], dtype=int)
    rejects[acc] = 0
    rejects[rej] = 1

    # Save the catalog with new columns for SNR
    catalog.add_column(Column(snr_vals), name='snr_band' + band)
    catalog.add_column(np.invert(catalog.mask['snr_band' + band]).astype(int),
                       name='detected_band' + band)
    catalog.add_column(Column(rejects), name='rejected')
    catalog.write('./cat/cat_' + outfile + '_filtered.dat', format='ascii')
    def compute_dendro(self,
                       show_progress=False,
                       save_dendro=False,
                       dendro_name=None,
                       dendro_obj=None,
                       periodic_bounds=False):
        '''
        Compute the dendrogram and prune to the minimum deltas.
        ** min_deltas must be in ascending order! **

        Parameters
        ----------
        show_progress : optional, bool
            Enables the progress bar in astrodendro.
        save_dendro : optional, bool
            Saves the dendrogram in HDF5 format. **Requires pyHDF5**
        dendro_name : str, optional
            Save name when save_dendro is enabled. ".hdf5" appended
            automatically.
        dendro_obj : Dendrogram, optional
            Input a pre-computed dendrogram object. It is assumed that
            the dendrogram has already been computed!
        periodic_bounds : bool, optional
            Enable when the data is periodic in the spatial dimensions.
        '''

        self._numfeatures = np.empty(self.min_deltas.shape, dtype=int)
        self._values = []

        if dendro_obj is None:
            if periodic_bounds:
                # Find the spatial dimensions
                num_axes = self.data.ndim
                spat_axes = []
                for i, axis_type in enumerate(self._wcs.get_axis_types()):
                    if axis_type["coordinate_type"] == u"celestial":
                        spat_axes.append(num_axes - i - 1)
                neighbours = periodic_neighbours(spat_axes)
            else:
                neighbours = None

            d = Dendrogram.compute(self.data,
                                   verbose=show_progress,
                                   min_delta=self.min_deltas[0],
                                   min_value=self.dendro_params["min_value"],
                                   min_npix=self.dendro_params["min_npix"],
                                   neighbours=neighbours)
        else:
            d = dendro_obj
        self._numfeatures[0] = len(d)
        self._values.append(
            np.array([struct.vmax for struct in d.all_structures]))

        if len(self.min_deltas) > 1:

            # Another progress bar for pruning steps
            if show_progress:
                print("Pruning steps.")
                bar = ProgressBar(len(self.min_deltas[1:]))

            for i, delta in enumerate(self.min_deltas[1:]):
                d.prune(min_delta=delta)
                self._numfeatures[i + 1] = len(d)
                self._values.append(
                    np.array([struct.vmax for struct in d.all_structures]))

                if show_progress:
                    bar.update(i + 1)
Exemple #18
0
    def __init__(self,
                 SED,
                 Filters,
                 redshift,
                 force_age=True,
                 madau=True,
                 units=u.uJy,
                 verbose=False):
        """
        
        Parameters
        ----------
        SED : '~smpy.CSP' object
            Built
        Filters : '~smpy.FilterSet' object
            Filter set through which to observe the set of models included
            in SED object
        redshift : float of numpy.array
            Redshift(s) at which models are to be observed
        force_age : boolean
            Require age of the stellar population to be younger than
            the age of the Universe at the desired redshift.
        madau : boolean
            Apply IGM absorption following Madau 1999 prescription
        units : '~astropy.units.Quantity'
            Desired output units, must be in spectral flux density equivalent
        verbose : boolean
            Add additional terminal outputs if true
        
        
        Attributes
        ----------
        fluxes : array
            Apparent fluxes of CSP models observed through 'Filters' at 
            the desired redshifts
        AB : array
            Apparent AB magnitudes of CSP models observed through 'Filters' at
            the desired redshifts
        
        
        Examples
        --------
        
        >>> redshifts = np.linspace(0, 3, 10)
        >>> A = Observe(CSP, Filters, redshifts)
        
        """
        self.F = Filters
        self.redshifts = np.array(redshift, ndmin=1)
        self.wave = SED.wave
        self.Ms = SED.Ms
        self.SFR = SED.SFR

        self.fluxes = np.zeros(
            np.append([len(self.redshifts),
                       len(self.F.filters)], SED.SED.shape[:-1])) * units
        self.AB = np.zeros_like(self.fluxes.value) * u.mag
        self.wl = np.zeros(len(self.F.filters)) * u.AA
        self.fwhm = np.zeros(len(self.F.filters)) * u.AA

        self.dl = cosmo.luminosity_distance(self.redshifts).cgs
        self.dl[self.redshifts == 0] = 10 * c.pc

        if verbose:
            bar = ProgressBar(len(self.redshifts))

        for i, z in enumerate(self.redshifts):
            self.lyman_abs = np.ones(len(self.wave))
            if madau:
                self.lyman_abs = np.clip(tau_madau(self.wave, z), 0., 1.)

            for j, filter in enumerate(self.F.filters):
                self.wl[j] = filter.lambda_c
                self.fwhm[j] = filter.fwhm
                self.fluxes[i, j] = self.calcflux(SED, filter, z, self.dl[i],
                                                  units)

                if not force_age:
                    # Set fluxes for ages older than universe to zero
                    agecut = (SED.tg.to(u.Gyr) > cosmo.age(z))
                    self.fluxes[i, j, :, agecut] = 0.

            if verbose:
                assert isinstance(bar, object)
                bar.update()

        # Convert spectral flux density to AB magnitudes
        self.AB = (-2.5 * np.log10(self.fluxes.to(u.Jy) /
                                   (3631 * u.Jy))) * u.mag
Exemple #19
0
    def compute_surface(self, boundary='continuous', show_progress=True):
        '''
        Computes the SCF up to the given lag value. This is an
        expensive operation and could take a long time to calculate.

        Parameters
        ----------
        boundary : {"continuous", "cut"}
            Treat the boundary as continuous (wrap-around) or cut values
            beyond the edge (i.e., for most observational data).
        show_progress : bool, optional
            Show a progress bar when computing the surface. =
        '''

        if boundary not in ["continuous", "cut"]:
            raise ValueError("boundary must be 'continuous' or 'cut'.")

        self._scf_surface = np.zeros((self.size, self.size))

        # Convert the lags into pixel units.
        pix_lags = self._to_pixel(self.roll_lags).value

        dx = pix_lags.copy()
        dy = pix_lags.copy()

        if show_progress:
            bar = ProgressBar(len(dx) * len(dy))

        for n, (x_shift, y_shift) in enumerate(product(dx, dy)):

            i, j = np.unravel_index(n, (len(dx), len(dy)))

            if x_shift == 0 and y_shift == 0:
                self._scf_surface[j, i] = 1.

            if x_shift == 0:
                tmp = self.data
            else:
                if float(x_shift).is_integer():
                    shift_func = pixel_shift
                else:
                    shift_func = fourier_shift
                tmp = shift_func(self.data, x_shift, axis=1)

            if y_shift != 0:
                if float(y_shift).is_integer():
                    shift_func = pixel_shift
                else:
                    shift_func = fourier_shift
                tmp = shift_func(tmp, y_shift, axis=2)

            if boundary is "cut":
                # Always round up to the nearest integer.
                x_shift = np.ceil(x_shift).astype(int)
                y_shift = np.ceil(y_shift).astype(int)
                if x_shift < 0:
                    x_slice_data = slice(None, tmp.shape[1] + x_shift)
                    x_slice_tmp = slice(-x_shift, None)
                else:
                    x_slice_data = slice(x_shift, None)
                    x_slice_tmp = slice(None, tmp.shape[1] - x_shift)

                if y_shift < 0:
                    y_slice_data = slice(None, tmp.shape[2] + y_shift)
                    y_slice_tmp = slice(-y_shift, None)
                else:
                    y_slice_data = slice(y_shift, None)
                    y_slice_tmp = slice(None, tmp.shape[2] - y_shift)

                data_slice = (slice(None), x_slice_data, y_slice_data)
                tmp_slice = (slice(None), x_slice_tmp, y_slice_tmp)
            elif boundary is "continuous":
                data_slice = (slice(None),) * 3
                tmp_slice = (slice(None),) * 3

            values = \
                np.nansum(((self.data[data_slice] - tmp[tmp_slice]) ** 2),
                          axis=0) / \
                (np.nansum(self.data[data_slice] ** 2, axis=0) +
                 np.nansum(tmp[tmp_slice] ** 2, axis=0))

            scf_value = 1. - \
                np.sqrt(np.nansum(values) / np.sum(np.isfinite(values)))

            if scf_value > 1:
                raise ValueError("Cannot have a correlation above 1. Check "
                                 "your input data. Contact the TurbuStat "
                                 "authors if the problem persists.")

            self._scf_surface[j, i] = scf_value

            if show_progress:
                bar.update(n + 1)
                                                               column_flat, utline303,
                                                               utline321, unoise)):
    if tcube[z,y,x] == 0:
        logh2column = np.log10(col)+22

        mf.set_constraints(ratio303321=rat, eratio303321=erat,
                           #ratio321322=ratio2, eratio321322=eratio2,
                           logh2column=logh2column, elogh2column=elogh2column,
                           logabundance=logabundance, elogabundance=elogabundance,
                           taline303=ta303.value, etaline303=err,
                           taline321=ta321.value, etaline321=err,
                           linewidth=linewidth)
        row_data = mf.get_parconstraints()
        tcube[z,y,x] = row_data['temperature_chi2']
        row_data['ratio303321'] = rat
        row_data['eratio303321'] = erat

        if ii % 100 == 0 or ii < 50:
            log.info("T: [{tmin1sig_chi2:7.2f},{temperature_chi2:7.2f},{tmax1sig_chi2:7.2f}]  R={ratio303321:6.2f}+/-{eratio303321:6.2f}".format(**row_data))
        else:
            pb.update(ii)
        tcube.flush()
    else:
        pb.update(ii)

tcube[tcube==0] = np.nan
tCube = SpectralCube(tcube, cube303.wcs, mask=BooleanArrayMask(np.isfinite(tcube), wcs=cube303.wcs))
tCube.write(hpath('chi2_temperature_cube.fits'), overwrite=True)

print()
Exemple #21
0
def dense_scatter(x,
                  y,
                  ax=None,
                  zorder=None,
                  label=None,
                  marker='o',
                  markersize=1.,
                  edgewidth=0.,
                  c='w',
                  edgecolor='k',
                  color_smoothing_box=None,
                  xscale='log',
                  yscale='log',
                  show_progress=False,
                  **kwargs):
    """
    Make scatter plots that handle overlapping data points better.

    This function attempts to make a dense scatter plot prettier by:
    + merging the 'marker edges' of overlapping data points;
    + running a median-filter to homogenize color for 'neighbouring'
      data points (size of the smoothing box specified by the user).

    Parameters
    ----------
    x, y : array_like
        x & y coordinates of the data points
    ax : `~matplotlib.axes.Axes` object, optional
        The Axes object in which to draw the scatter plot.
    zorder : float, optional
    label : string, optional
        Text label to use in the legend
        (ignored if facecolor is not a scalar)
    marker : marker style
        Default: 'o'
    markersize : float, optional
        Default: 1.
    edgewidth : float, optional
        Default: 0.
    c : color or array-like, optional
        Default: 'w'
    edgecolor : color, optional
        Default: 'k'
    color_smoothing_box : None or 2-tuple, optional
        If None, then no color smoothing will be performed.
        If a 2-tuple, then this parameter specifies the full width
        of the color smoothing box along X and Y direction.
    xscale : {'log', 'linear'}, optional
        X axis scale type (default: 'log')
    yscale : {'log', 'linear'}, optional
        Y axis scale type (default: 'log')
    show_progress : bool, optional
        Whether to show the progress bar for color smoothing.
    **kwargs
        Keywords to be passed to `~matplotlib.pyplot.scatter`

    Returns
    -------
    ax : `~matplotlib.axes.Axes` object
        The Axes object in which contours are plotted.
    """
    if ax is None:
        ax = plt.gca()

    ax.set_xscale(xscale)
    ax.set_yscale(yscale)

    if (color_smoothing_box is not None) and (np.size(c) > 1):
        if xscale == 'log':
            x_ = np.log10(x)
        elif xscale == 'linear':
            x_ = x
        else:
            raise ValueError("xscale={} not supported yet".format(xscale))
        if yscale == 'log':
            y_ = np.log10(y)
        elif yscale == 'linear':
            y_ = y
        else:
            raise ValueError("yscale={} not supported yet".format(yscale))
        newc = []
        if show_progress:
            bar = ProgressBar(range(len(x)))
        else:
            bar = None
        for (x0_, y0_) in zip(x_, y_):
            newc.append(
                np.nanmedian(c[(x_ > x0_ - color_smoothing_box[0] / 2)
                               & (x_ < x0_ + color_smoothing_box[0] / 2) &
                               (y_ > y0_ - color_smoothing_box[1] / 2) &
                               (y_ < y0_ + color_smoothing_box[1] / 2)]))
            if show_progress:
                bar.update()
    else:
        newc = c

    if edgewidth == 0:
        ax.scatter(x,
                   y,
                   marker=marker,
                   c=newc,
                   s=markersize**2,
                   linewidths=0,
                   zorder=zorder,
                   **kwargs)
    else:
        ax.scatter(x,
                   y,
                   marker=marker,
                   c=edgecolor,
                   s=(markersize + edgewidth)**2,
                   linewidths=0,
                   zorder=zorder,
                   **kwargs)
        ax.scatter(x,
                   y,
                   marker=marker,
                   c=newc,
                   s=(markersize - edgewidth)**2,
                   linewidths=0,
                   zorder=zorder,
                   **kwargs)

    if label is not None:
        if np.size(c) > 1:
            print("Unable to add legend entry: `c` is not a scalar")
        else:
            ax.plot([], [],
                    marker=marker,
                    mfc=c,
                    mec=edgecolor,
                    ms=markersize,
                    mew=edgewidth,
                    ls='',
                    label=label)

    return ax
def measure_dendrogram_properties(dend=None,
                                  cube303=cube303,
                                  cube321=cube321,
                                  cube13co=cube13co,
                                  cube18co=cube18co,
                                  noise_cube=noise_cube,
                                  sncube=sncube,
                                  suffix="",
                                  last_index=None,
                                  plot_some=True,
                                  line='303',
                                  write=True):

    assert (cube321.shape == cube303.shape == noise_cube.shape ==
            cube13co.shape == cube18co.shape == sncube.shape)
    assert sncube.wcs is cube303.wcs is sncube.mask._wcs

    metadata = {}
    metadata['data_unit'] = u.K
    metadata['spatial_scale'] = 7.2 * u.arcsec
    metadata['beam_major'] = 30 * u.arcsec
    metadata['beam_minor'] = 30 * u.arcsec
    metadata['wavelength'] = 218.22219 * u.GHz
    metadata['velocity_scale'] = u.km / u.s
    metadata['wcs'] = cube303.wcs

    keys = [
        'density_chi2',
        'expected_density',
        'dmin1sig_chi2',
        'dmax1sig_chi2',
        'column_chi2',
        'expected_column',
        'cmin1sig_chi2',
        'cmax1sig_chi2',
        'temperature_chi2',
        'expected_temperature',
        'tmin1sig_chi2',
        'tmax1sig_chi2',
        'eratio321303',
        'ratio321303',
        'logh2column',
        'elogh2column',
        'logabundance',
        'elogabundance',
    ]
    obs_keys = [
        'Stot303',
        'Smin303',
        'Smax303',
        'Stot321',
        'Smean303',
        'Smean321',
        'npix',
        'e303',
        'e321',
        'r321303',
        'er321303',
        '13cosum',
        'c18osum',
        '13comean',
        'c18omean',
        's_ntotal',
        'index',
        'is_leaf',
        'parent',
        'root',
        'lon',
        'lat',
        'vcen',
        'higaldusttem',
        'reff',
        'dustmass',
        'dustmindens',
        'bad',
        #'tkin_turb',
    ]
    columns = {k: [] for k in (keys + obs_keys)}

    log.debug("Initializing dendrogram temperature fitting loop")

    # FORCE wcs to match
    # (technically should reproject here)
    cube13co._wcs = cube18co._wcs = cube303.wcs
    cube13co.mask._wcs = cube18co.mask._wcs = cube303.wcs

    if line == '303':
        maincube = cube303
    elif line == '321':
        maincube = cube321
    else:
        raise ValueError("Unrecognized line: {0}".format(line))

    # Prepare an array to hold the fitted temperatures
    tcubedata = np.empty(maincube.shape, dtype='float32')
    tcubedata[:] = np.nan
    tcubeleafdata = np.empty(maincube.shape, dtype='float32')
    tcubeleafdata[:] = np.nan

    nbad = 0

    catalog = ppv_catalog(dend, metadata)
    pb = ProgressBar(len(catalog))
    for ii, row in enumerate(catalog):
        structure = dend[row['_idx']]
        assert structure.idx == row['_idx'] == ii
        dend_obj_mask = BooleanArrayMask(structure.get_mask(), wcs=cube303.wcs)
        dend_inds = structure.indices()

        view = (
            slice(dend_inds[0].min(), dend_inds[0].max() + 1),
            slice(dend_inds[1].min(), dend_inds[1].max() + 1),
            slice(dend_inds[2].min(), dend_inds[2].max() + 1),
        )
        #view2 = cube303.subcube_slices_from_mask(dend_obj_mask)
        submask = dend_obj_mask[view]
        #assert np.count_nonzero(submask.include()) == np.count_nonzero(dend_obj_mask.include())

        sn = sncube[view].with_mask(submask)
        sntot = sn.sum().value
        #np.testing.assert_almost_equal(sntot, structure.values().sum(), decimal=0)

        c303 = cube303[view].with_mask(submask)
        c321 = cube321[view].with_mask(submask)
        co13sum = cube13co[view].with_mask(submask).sum().value
        co18sum = cube18co[view].with_mask(submask).sum().value
        if hasattr(co13sum, '__len__'):
            raise TypeError(
                ".sum() applied to an array has yielded a non scalar.")

        npix = submask.include().sum()
        assert npix == structure.get_npix()
        Stot303 = c303.sum().value
        if np.isnan(Stot303):
            raise ValueError("NaN in cube.  This can't happen: the data from "
                             "which the dendrogram was derived can't have "
                             "NaN pixels.")
        Smax303 = c303.max().value
        Smin303 = c303.min().value

        Stot321 = c321.sum().value
        if npix == 0:
            raise ValueError("npix=0. This is impossible.")
        Smean303 = Stot303 / npix
        if Stot303 <= 0 and line == '303':
            raise ValueError(
                "The 303 flux is <=0.  This isn't possible because "
                "the dendrogram was derived from the 303 data with a "
                "non-zero threshold.")
        elif Stot303 <= 0 and line == '321':
            Stot303 = 0
            Smean303 = 0
        elif Stot321 <= 0 and line == '321':
            raise ValueError(
                "The 321 flux is <=0.  This isn't possible because "
                "the dendrogram was derived from the 321 data with a "
                "non-zero threshold.")
        if np.isnan(Stot321):
            raise ValueError("NaN in 321 line")
        Smean321 = Stot321 / npix

        #error = (noise_cube[view][submask.include()]).sum() / submask.include().sum()**0.5
        var = ((noise_cube[dend_obj_mask.include()]**2).sum() / npix**2)
        error = var**0.5
        if np.isnan(error):
            raise ValueError("error is nan: this is impossible by definition.")

        if line == '321' and Stot303 == 0:
            r321303 = np.nan
            er321303 = np.nan
        elif Stot321 < 0:
            r321303 = error / Smean303
            er321303 = (r321303**2 * (var / Smean303**2 + 1))**0.5
        else:
            r321303 = Stot321 / Stot303
            er321303 = (r321303**2 *
                        (var / Smean303**2 + var / Smean321**2))**0.5

        for c in columns:
            assert len(columns[c]) == ii

        columns['index'].append(row['_idx'])
        columns['s_ntotal'].append(sntot)
        columns['Stot303'].append(Stot303)
        columns['Smax303'].append(Smax303)
        columns['Smin303'].append(Smin303)
        columns['Stot321'].append(Stot321)
        columns['Smean303'].append(Smean303)
        columns['Smean321'].append(Smean321)
        columns['npix'].append(npix)
        columns['e303'].append(error)
        columns['e321'].append(error)
        columns['r321303'].append(r321303)
        columns['er321303'].append(er321303)
        columns['13cosum'].append(co13sum)
        columns['c18osum'].append(co18sum)
        columns['13comean'].append(co13sum / npix)
        columns['c18omean'].append(co18sum / npix)
        columns['is_leaf'].append(structure.is_leaf)
        columns['parent'].append(
            structure.parent.idx if structure.parent else -1)
        columns['root'].append(get_root(structure).idx)
        s_main = maincube._data[dend_inds]
        x, y, z = maincube.world[dend_inds]
        lon = ((z.value - (360 *
                           (z.value > 180))) * s_main).sum() / s_main.sum()
        lat = (y * s_main).sum() / s_main.sum()
        vel = (x * s_main).sum() / s_main.sum()
        columns['lon'].append(lon)
        columns['lat'].append(lat.value)
        columns['vcen'].append(vel.value)

        mask2d = dend_obj_mask.include().max(axis=0)[view[1:]]
        logh2column = np.log10(
            np.nanmean(column_regridded.data[view[1:]][mask2d]) * 1e22)
        if np.isnan(logh2column):
            log.info("Source #{0} has NaNs".format(ii))
            logh2column = 24
        elogh2column = elogabundance
        columns['higaldusttem'].append(
            np.nanmean(dusttem_regridded.data[view[1:]][mask2d]))

        r_arcsec = row['radius'] * u.arcsec
        reff = (r_arcsec * (8.5 * u.kpc)).to(u.pc, u.dimensionless_angles())
        mass = ((10**logh2column * u.cm**-2) * np.pi * reff**2 * 2.8 *
                constants.m_p).to(u.M_sun)
        density = (mass / (4 / 3. * np.pi * reff**3) / constants.m_p / 2.8).to(
            u.cm**-3)

        columns['reff'].append(reff.value)
        columns['dustmass'].append(mass.value)
        columns['dustmindens'].append(density.value)
        mindens = np.log10(density.value)
        if mindens < 3:
            mindens = 3

        if (r321303 < 0 or np.isnan(r321303)) and line != '321':
            raise ValueError("Ratio <0: This can't happen any more because "
                             "if either num/denom is <0, an exception is "
                             "raised earlier")
            #for k in columns:
            #    if k not in obs_keys:
            #        columns[k].append(np.nan)
        elif (r321303 < 0 or np.isnan(r321303)) and line == '321':
            for k in keys:
                columns[k].append(np.nan)
        else:
            # Replace negatives for fitting
            if Smean321 <= 0:
                Smean321 = error
            mf.set_constraints(
                ratio321303=r321303,
                eratio321303=er321303,
                #ratio321322=ratio2, eratio321322=eratio2,
                logh2column=logh2column,
                elogh2column=elogh2column,
                logabundance=logabundance,
                elogabundance=elogabundance,
                taline303=Smean303,
                etaline303=error,
                taline321=Smean321,
                etaline321=error,
                mindens=mindens,
                linewidth=10)
            row_data = mf.get_parconstraints()
            row_data['ratio321303'] = r321303
            row_data['eratio321303'] = er321303

            for k in row_data:
                columns[k].append(row_data[k])

            # Exclude bad velocities from cubes
            if row['v_cen'] < -80e3 or row['v_cen'] > 180e3:
                # Skip: there is no real structure down here
                nbad += 1
                is_bad = True
            else:
                is_bad = False
                tcubedata[
                    dend_obj_mask.include()] = row_data['expected_temperature']
                if structure.is_leaf:
                    tcubeleafdata[dend_obj_mask.include(
                    )] = row_data['expected_temperature']

            columns['bad'].append(is_bad)

            width = row['v_rms'] * u.km / u.s
            lengthscale = reff

            #REMOVED in favor of despotic version done in dendrograms.py
            # we use the analytic version here; the despotic version is
            # computed elsewhere (with appropriate gcor factors)
            #columns['tkin_turb'].append(heating.tkin_all(10**row_data['density_chi2']*u.cm**-3,
            #                                             width,
            #                                             lengthscale,
            #                                             width/lengthscale,
            #                                             columns['higaldusttem'][-1]*u.K,
            #                                             crir=0./u.s))

        if len(set(len(c) for k, c in columns.items())) != 1:
            print("Columns are different lengths.  This is not allowed.")
            import ipdb
            ipdb.set_trace()

        for c in columns:
            assert len(columns[c]) == ii + 1

        if plot_some and not is_bad and (ii - nbad % 100 == 0
                                         or ii - nbad < 50):
            try:
                log.info(
                    "T: [{tmin1sig_chi2:7.2f},{expected_temperature:7.2f},{tmax1sig_chi2:7.2f}]"
                    "  R={ratio321303:8.4f}+/-{eratio321303:8.4f}"
                    "  Smean303={Smean303:8.4f} +/- {e303:8.4f}"
                    "  Stot303={Stot303:8.2e}  npix={npix:6d}".format(
                        Smean303=Smean303,
                        Stot303=Stot303,
                        npix=npix,
                        e303=error,
                        **row_data))

                pl.figure(1)
                pl.clf()
                mf.denstemplot()
                pl.savefig(
                    fpath("dendrotem/diagnostics/{0}_{1}.png".format(
                        suffix, ii)))
                pl.figure(2).clf()
                mf.parplot1d_all(levels=[0.68268949213708585])
                pl.savefig(
                    fpath("dendrotem/diagnostics/1dplot{0}_{1}.png".format(
                        suffix, ii)))
                pl.draw()
                pl.show()
            except Exception as ex:
                print ex
                pass
        else:
            pb.update(ii + 1)

        if last_index is not None and ii >= last_index:
            break

    if last_index is not None:
        catalog = catalog[:last_index + 1]

    for k in columns:
        if k not in catalog.keys():
            catalog.add_column(table.Column(name=k, data=columns[k]))

    for mid, lo, hi, letter in (('expected_temperature', 'tmin1sig_chi2',
                                 'tmax1sig_chi2', 't'),
                                ('expected_density', 'dmin1sig_chi2',
                                 'dmax1sig_chi2', 'd'),
                                ('expected_column', 'cmin1sig_chi2',
                                 'cmax1sig_chi2', 'c')):
        catalog.add_column(
            table.Column(name='elo_' + letter,
                         data=catalog[mid] - catalog[lo]))
        catalog.add_column(
            table.Column(name='ehi_' + letter,
                         data=catalog[hi] - catalog[mid]))

    if write:
        catalog.write(tpath('PPV_H2CO_Temperature{0}.ipac'.format(suffix)),
                      format='ascii.ipac')

    # Note that there are overlaps in the catalog, which means that ORDER MATTERS
    # in the above loop.  I haven't yet checked whether large scale overwrites
    # small or vice-versa; it may be that both views of the data are interesting.
    tcube = SpectralCube(
        data=tcubedata,
        wcs=cube303.wcs,
        mask=cube303.mask,
        meta={'unit': 'K'},
        header=cube303.header,
    )
    tcubeleaf = SpectralCube(
        data=tcubeleafdata,
        wcs=cube303.wcs,
        mask=cube303.mask,
        meta={'unit': 'K'},
        header=cube303.header,
    )

    if write:
        log.info("Writing TemperatureCube")
        outpath = 'TemperatureCube_DendrogramObjects{0}.fits'
        tcube.write(hpath(outpath.format(suffix)), overwrite=True)

        outpath_leaf = 'TemperatureCube_DendrogramObjects{0}_leaves.fits'
        tcubeleaf.write(hpath(outpath_leaf.format(suffix)), overwrite=True)

    return catalog, tcube
def gaussfit_catalog(
    fitsfile,
    region_list,
    radius=1.0 * u.arcsec,
    max_radius_in_beams=2,
    max_offset_in_beams=1,
    background_estimator=np.nanmedian,
    noise_estimator=lambda x: mad_std(x, ignore_nan=True),
    savepath=None,
    prefix="",
    covariance='param_cov',
    raise_for_failure=False,
):
    """
    Given a FITS filename and a list of regions, fit a gaussian to each region
    with an input guess based on the beam size.

    Parameters
    ----------
    fitsfile : str
        Name of the FITS file
    region_list : list
        List of regions (see https://github.com/astropy/regions/)
    radius : angular size
        The radius of the region around the region center to extract and
        include in the fit
    max_radius_in_beams : float
        The maximum allowed source radius in units of beam major axis
        (this is a limit passed to the fitter)
    max_offset_in_beams : float
        The maximum allowed offset of the source center from the guessed
        position
    background_estimator : function
        A function to apply to the background pixels (those not within 1 beam
        HWHM of the center) to estimate the background level.  The background
        will be subtracted before fitting.
    noise_estimator : function
        Function to apply to the whole data set to determine the noise level
        and therefore the appropriate per-pixel weight to get the correct
        normalization for the covariance matrix.
    savepath : str or None
        If specified, plots will be made and saved to this directory using the
        source name from the region metadata
    prefix : str
        The prefix to append to saved source names
    covariance : 'param_cov' or 'cov_x'
        Which covariance matrix should be used to estimate the parameter
        errors?  ``param_cov`` uses the diagonal of the reduced-chi^2-scaled
        covariance matrix to compute the parameter errors, while ``cov_x`` uses
        the unscaled errors.  See http://arxiv.org/abs/1009.2755 for a
        description, and criticism, of using the scaled covariance.
    raise_for_failure : bool
        If the fit was not successful, raise an exception
    """

    # need central coordinates of each object
    coords = coordinates.SkyCoord([reg.center for reg in region_list])

    fh = fits.open(fitsfile)
    data = fh[0].data.squeeze()
    header = fh[0].header
    datawcs = wcs.WCS(header).celestial
    beam = Beam.from_fits_header(header)
    pixscale = wcs.utils.proj_plane_pixel_area(datawcs)**0.5 * u.deg
    bmmin_px = (beam.minor.to(u.deg) / pixscale).decompose()
    bmmaj_px = (beam.major.to(u.deg) / pixscale).decompose()

    noise = noise_estimator(data)

    log.info("Noise estimate is {0} for file {1}".format(noise, fitsfile))

    fit_data = {}

    pb = ProgressBar(len(region_list))

    for ii, reg in enumerate(region_list):

        phot_reg = regions.CircleSkyRegion(center=reg.center, radius=radius)
        pixreg = phot_reg.to_pixel(datawcs)
        mask = pixreg.to_mask()
        mask_cutout = mask.cutout(data)
        if mask_cutout is None:
            log.warning(
                "Skipping region {0} because it failed to produce a cutout.".
                format(reg))
            continue
        cutout = mask_cutout * mask.data
        cutout_mask = mask.data.astype('bool')

        smaller_phot_reg = regions.CircleSkyRegion(center=reg.center,
                                                   radius=beam.major /
                                                   2.)  #FWHM->HWHM
        smaller_pixreg = smaller_phot_reg.to_pixel(datawcs)
        smaller_mask = smaller_pixreg.to_mask()
        smaller_cutout = smaller_mask.cutout(data) * smaller_mask.data

        # mask out (as zeros) neighboring sources within the fitting area
        nearby_matches = phot_reg.contains(coords, datawcs)
        if any(nearby_matches):
            inds = np.where(nearby_matches)[0].tolist()
            inds.remove(ii)
            for ind in inds:
                maskoutreg = regions.EllipseSkyRegion(
                    center=region_list[ind].center,
                    width=beam.major,
                    height=beam.minor,
                    angle=beam.pa + 90 * u.deg,
                )
                mpixreg = maskoutreg.to_pixel(datawcs)
                mmask = mpixreg.to_mask()

                view, mview = slice_bbox_from_bbox(mask.bbox, mmask.bbox)
                cutout_mask[view] &= ~mmask.data.astype('bool')[mview]
                cutout = cutout * cutout_mask

        background_mask = cutout_mask.copy().astype('bool')
        background_mask[sub_bbox_slice(
            mask.bbox, smaller_mask.bbox)] &= ~smaller_mask.data.astype('bool')
        background = background_estimator(cutout[background_mask])

        sz = cutout.shape[0]
        mx = np.nanmax(smaller_cutout)
        ampguess = mx - background

        p_init = models.Gaussian2D(
            amplitude=ampguess,
            x_mean=sz / 2,
            y_mean=sz / 2,
            x_stddev=bmmaj_px / STDDEV_TO_FWHM,
            y_stddev=bmmin_px / STDDEV_TO_FWHM,
            theta=beam.pa,
            bounds={
                'x_stddev': (bmmin_px / STDDEV_TO_FWHM * 0.75,
                             bmmaj_px * max_radius_in_beams / STDDEV_TO_FWHM),
                'y_stddev': (bmmin_px / STDDEV_TO_FWHM * 0.75,
                             bmmaj_px * max_radius_in_beams / STDDEV_TO_FWHM),
                'x_mean':
                (sz / 2 - max_offset_in_beams * bmmaj_px / STDDEV_TO_FWHM,
                 sz / 2 + max_offset_in_beams * bmmaj_px / STDDEV_TO_FWHM),
                'y_mean':
                (sz / 2 - max_offset_in_beams * bmmaj_px / STDDEV_TO_FWHM,
                 sz / 2 + max_offset_in_beams * bmmaj_px / STDDEV_TO_FWHM),
                'amplitude': (ampguess * 0.9, ampguess * 1.1)
            })

        imtofit = np.nan_to_num((cutout - background) * mask.data)
        result, fit_info, chi2, fitter = gaussfit_image(
            image=imtofit,
            gaussian=p_init,
            weights=1 / noise**2,
            plot=savepath is not None,
        )
        if 'text' in reg.meta:
            sourcename = reg.meta['text'].strip('{}')
        elif 'label' in reg.meta:
            sourcename = reg.meta['label'].strip('{}')
        else:
            raise ValueError("Regions need to have names, either as 'text' or "
                             "'label' entries.")

        if savepath is not None:
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', UserWarning)
                bmarr = beam.as_kernel(pixscale=pixscale, x_size=sz,
                                       y_size=sz).array
            assert bmarr.max() > 0
            bm_ellipse = beam.ellipse_to_plot(sz / 2, sz / 2., pixscale)
            bm_ellipse.set_facecolor('none')
            bm_ellipse.set_edgecolor('r')
            pl.gca().add_patch(bm_ellipse)
            #pl.contour(bmarr, levels=[0.317*bmarr.max()], colors=['r'])
            pl.savefig(os.path.join(savepath,
                                    '{0}{1}.png'.format(prefix, sourcename)),
                       bbox_inches='tight')

        if covariance not in fit_info or fit_info[covariance] is None:
            fit_info[covariance] = np.zeros([6, 6])
            success = False
        else:
            success = True

        cx, cy = pixreg.bounding_box.ixmin + result.x_mean, pixreg.bounding_box.iymin + result.y_mean
        clon, clat = datawcs.wcs_pix2world(cx, cy, 0)

        major, minor = (result.x_stddev * STDDEV_TO_FWHM *
                        pixscale.to(u.arcsec), result.y_stddev *
                        STDDEV_TO_FWHM * pixscale.to(u.arcsec))
        majind, minind = 3, 4
        pa = (result.theta * u.rad).to(u.deg)
        if minor > major:
            major, minor = minor, major
            majind, minind = minind, majind
            pa += 90 * u.deg

        fitted_gaussian_as_beam = Beam(major=major, minor=minor, pa=pa)
        try:
            deconv_fit = fitted_gaussian_as_beam.deconvolve(beam)
            deconv_major, deconv_minor, deconv_pa = (deconv_fit.major,
                                                     deconv_fit.minor,
                                                     deconv_fit.pa)
        except ValueError:
            print("Could not deconvolve {0} from {1}".format(
                beam.__repr__(), fitted_gaussian_as_beam.__repr__()))
            deconv_major, deconv_minor, deconv_pa = np.nan, np.nan, np.nan

        fit_data[sourcename] = {
            'amplitude':
            result.amplitude,
            'center_x':
            float(clon) * u.deg,
            'center_y':
            float(clat) * u.deg,
            'fwhm_major':
            major,
            'fwhm_minor':
            minor,
            'pa':
            pa,
            'deconv_fwhm_major':
            deconv_major,
            'deconv_fwhm_minor':
            deconv_minor,
            'deconv_pa':
            deconv_pa,
            'chi2':
            chi2,
            'chi2/n':
            chi2 / mask.data.sum(),
            'e_amplitude':
            fit_info[covariance][0, 0]**0.5,
            'e_center_x':
            fit_info[covariance][1, 1]**0.5 * pixscale,
            'e_center_y':
            fit_info[covariance][2, 2]**0.5 * pixscale,
            'e_fwhm_major':
            fit_info[covariance][majind, majind]**0.5 * STDDEV_TO_FWHM *
            pixscale.to(u.arcsec),
            'e_fwhm_minor':
            fit_info[covariance][minind, minind]**0.5 * STDDEV_TO_FWHM *
            pixscale.to(u.arcsec),
            'e_pa':
            fit_info[covariance][5, 5]**0.5 * u.deg,
            'success':
            success,
            'ampguess':
            ampguess,
            'peak':
            mx,
            'fit_info':
            fit_info,
        }

        if raise_for_failure and not success:
            raise ValueError("Fit failed.")

        pb.update(ii)
        signal.signal(signal.SIGINT, signal_handler)

    return fit_data
Exemple #24
0
def download_timerange_log(start="2018-03-01",
                           end=None,
                           which="completed",
                           nprocess=1,
                           auth=None,
                           show_progress=True,
                           notebook=True,
                           verbose=True):
    """ Storing and not return forced. See download_completed_log() for individual date downloading. """
    if nprocess is None:
        nprocess = 1
    elif nprocess < 1:
        raise ValueError("nprocess must 1 or higher (None means 1)")

    if auth is not None:
        print("updating skyvision authentification")
        io.set_account("skyvision",
                       username=auth[0],
                       password=auth[0],
                       test=False)
    if which not in ["completed", "qa"]:
        raise ValueError(f"which can only be completed of qa, {which} given")

    dl_function = eval(f"download_{which}_log")

    # Dates to be downloaded.
    dates = get_daterange(start, end=None)

    if nprocess == 1:
        # Single processing
        if verbose:
            warnings.warn("No parallel downloading")

        _ = [
            dl_function(date, auth=auth, store=True, returns=False)
            for date in dates
        ]
    else:
        # First download manually otherwise it could fail, unclear why
        _ = dl_function(dates[0], auth=auth, store=True, returns=False)
        # Multi processing
        import multiprocessing
        if show_progress:
            from astropy.utils.console import ProgressBar
            bar = ProgressBar(len(dates[1:]), ipython_widget=notebook)
        else:
            bar = None

        if verbose:
            warnings.warn(
                f"parallel downloading ; asking for {nprocess} processes")

        # Passing arguments
        with multiprocessing.Pool(nprocess) as p:
            # Da Loop
            for j, result in enumerate(p.imap(dl_function, dates[1:])):
                if bar is not None:
                    bar.update(j)

            if bar is not None:
                bar.update(len(dates[1:]))
Exemple #25
0
def combine_matches(table1, table2):
    """
    Iterate through all stars in a stack of two tables, subtracting a test star's x and y from the rest of the catalog's x_cen and y_cen columns, sorting by x_cen, and testing distances starting at the top until the criterion is met."""
    complete_colnames = set(table1.colnames + table2.colnames)
    stack = vstack([table1, table2])
    stack = stack[sorted(list(complete_colnames))]
    print('Combining matches')
    numbefore = len(stack[np.where(stack['rejected'] == 0)])
    pb = ProgressBar(numbefore)
    i = 0
    while True:
        if i == len(stack) - 1:
            break

        if stack[i]['rejected'] == 1:
            i += 1
            continue

        teststar = stack[i]
        diff_table = vstack([stack[:i],
                             stack[i + 1:]])['_idx', 'x_cen', 'y_cen',
                                             'position_angle']
        diff_table['x_cen'] = np.abs(diff_table['x_cen'] - teststar['x_cen'])
        diff_table['y_cen'] = np.abs(diff_table['y_cen'] - teststar['y_cen'])
        diff_table.sort('x_cen')

        threshold = 1e-5
        found_match = False

        dist_col = MaskedColumn(length=len(diff_table),
                                name='distance',
                                mask=True)
        for j in range(
                10):  # speed up computation by only going through 10 closest
            dist_col[j] = dist(diff_table[j]['x_cen'], diff_table[j]['y_cen'])
            if dist_col[j] <= threshold:
                found_match = True
        diff_table.add_column(dist_col)
        diff_table.sort('distance')

        if found_match:
            match_index = getrowindex(diff_table[0]['_idx'],
                                      diff_table[0]['position_angle'], stack)
            match = deepcopy(stack[match_index])
            stack.remove_row(match_index)

            # Find the common bounding ellipse between the match and the test star
            new_x_cen = np.average([match['x_cen'], teststar['x_cen']])
            new_y_cen = np.average([match['y_cen'], teststar['y_cen']])

            # REPLACE WITH COMMON BOUNDING ELLIPSE
            new_major, new_minor, new_pa = commonbeam(
                match['major_fwhm'] * u.deg, match['minor_fwhm'] * u.deg,
                match['position_angle'] * u.deg,
                teststar['major_fwhm'] * u.deg, teststar['minor_fwhm'] * u.deg,
                teststar['position_angle'] * u.deg)

            # Save new info in test star's place
            stack[i]['x_cen'] = new_x_cen
            stack[i]['y_cen'] = new_y_cen
            stack[i]['major_fwhm'] = new_major.value
            stack[i]['minor_fwhm'] = new_minor.value
            stack[i]['position_angle'] = new_pa.value

            # Replace any masked data in the teststar row with available data from the match
            for k, masked in enumerate(stack.mask[i]):  # get masked fields
                colname = stack.colnames[k]  # get column name of masked fields
                if masked:  # if masked:
                    stack[i][colname] = match[
                        colname]  # replace with data from the matched star
        i += 1
        pb.update()

    for colname in stack.colnames:  # iterate over columns
        if colname.split('_')[0] == 'detected':  # if it's a detection column
            stack[
                colname].fill_value = 0  # replace masked values with 0 (False)

    numafter = len(stack[np.where(stack['rejected'] == 0)])
    print("\n{} matches combined".format(numbefore - numafter))
    return stack
Exemple #26
0
def test_files(filename,
               erasebad=True,
               nprocess=1,
               show_progress=True,
               notebook=False,
               redownload=False,
               **kwargs):
    """ 
    
    Parameters
    ----------
    filename: [fiulepath or list of]
        File(s) to be checked. 

    erasebad: [bool] -optional-
        Do you want to remove from your local directory the corrupted files ?
        
    redownload: [bool] -optional-
        Shall corrupted file be automatically re downloaded ?
        (Only works for IRSA files ('/sci/','/raw/', '/ref/', '/cal/')

    nprocess: [int] -optional-
        Number of paralell processing

    show_progress: [bool] -optional-
        Do you want to show the progress bar ?
        
    notebook: [bool]
        Are you running from a notebook. 
        Ignored if show_progress=False
       
    Returns
    -------
    list of corrupted/bad files (might already be removed, see erasebad)

    """
    if nprocess is None:
        nprocess = 1
    elif nprocess < 1:
        raise ValueError("nprocess must 1 or higher (None means 1)")

    filename = np.atleast_1d(filename)

    if nprocess == 1:
        fileissue = [
            f for f in filename if not _test_file_(
                f, erasebad=erasebad, redownload=redownload, **kwargs)
        ]
    else:
        import multiprocessing
        if show_progress:
            from astropy.utils.console import ProgressBar
            bar = ProgressBar(len(filename), ipython_widget=notebook)
        else:
            bar = None

        erasebad_ = [erasebad] * len(filename)
        fileissue = []

        with multiprocessing.Pool(nprocess) as p:
            # Da Loop
            for j, isgood in enumerate(
                    p.imap(_test_file_multiprocess_, zip(filename,
                                                         erasebad_))):
                if bar is not None:
                    bar.update(j)
                if not isgood:
                    fileissue.append(filename[j])

            if bar is not None:
                bar.update(len(filename))

    if len(fileissue) > 0:
        warnings.warn("%d file failed" % len(fileissue))
        if redownload:
            from .buildurl import _localsource_to_source_
            to_download_urls, locations = np.asarray([
                _localsource_to_source_(filename) for filename in fileissue
            ]).T
            source_to_dl = ["irsa"]
            for source in source_to_dl:
                source_dl = np.in1d(locations, [source])
                print("Downloading %d files from %s" %
                      (len(source_dl[source_dl]), source))
                download_url(np.asarray(to_download_urls)[source_dl],
                             np.asarray(fileissue)[source_dl],
                             show_progress=show_progress,
                             notebook=notebook,
                             verbose=True,
                             overwrite=True,
                             nprocess=nprocess,
                             cookies=get_cookie(*_load_id_(source)),
                             **kwargs)
            for source_ in np.unique(locations):
                if source_ is not None and source_ not in source_to_dl:
                    warnings.warn(
                        "files from %s have not downloaded (not implemented)."
                        % source_)

        return fileissue
Exemple #27
0
def detect(infile,
           region,
           band,
           min_value=0.000325,
           min_delta=0.0005525,
           min_npix=7.5,
           plot=False,
           verbose=True):

    outfile = 'region{}_band{}_val{:.5g}_delt{:.5g}_pix{}'.format(
        region, band, min_value, min_delta, min_npix)
    contfile = fits.open(infile)  # load in fits image
    da = contfile[0].data.squeeze()  # get rid of extra axes
    print(da)

    mywcs = wcs.WCS(
        contfile[0].header
    ).celestial  # set up world coordinate system, ditch extra dimensions
    beam = radio_beam.Beam.from_fits_header(contfile[0].header)

    d = Dendrogram.compute(da,
                           min_value=min_value,
                           min_delta=min_delta,
                           min_npix=min_npix,
                           wcs=mywcs,
                           verbose=verbose)
    pixel_scale = np.abs(
        mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg

    metadata = {
        'data_unit': u.Jy / u.beam,
        'spatial_scale': pixel_scale,
        'beam_major': beam.major,
        'beam_minor': beam.minor,
        'wavelength': 9.298234612192E+10 * u.Hz,
        'velocity_scale': u.km / u.s,
        'wcs': mywcs,
    }

    cat = pp_catalog(d.leaves, metadata)  # set up position-position catalog
    cat['_idx'] = range(len(cat))

    # Use FWHM for ellipse dimensions instead of sigma
    cat['major_sigma'] = cat['major_sigma'] * np.sqrt(8 * np.log(2))
    cat['minor_sigma'] = cat['minor_sigma'] * np.sqrt(8 * np.log(2))
    cat.rename_column('major_sigma', 'major_fwhm')
    cat.rename_column('minor_sigma', 'minor_fwhm')
    cat.rename_column('flux', 'dend_flux_band{}'.format(band))

    # Rename _idx to include the band number in the hundreds digit
    for i in range(len(cat)):
        cat['_idx'][i] = int('{}{:02d}'.format(band, cat['_idx'][i]))

    # Output the catalog and region files
    cat.write('./cat/cat_' + outfile + '.dat', format='ascii')
    savereg(cat, './reg/reg_' + outfile + '.reg')

    if plot:  # create PDF plots of contour regions, if enabled
        ax = plt.gca()
        ax.cla()
        plt.imshow(da,
                   cmap='gray_r',
                   interpolation='none',
                   origin='lower',
                   vmax=0.01,
                   vmin=-0.001)
        pltr = d.plotter()

        if verbose:
            print("Plotting contours to PDF...")
            pb = ProgressBar(len(d.leaves))

        for struct in d.leaves:  # iterate over each of the leaf structures
            pltr.plot_contour(ax,
                              structure=struct,
                              colors=['r'],
                              linewidths=[0.9],
                              zorder=5)
            if struct.parent:
                while struct.parent:
                    struct = struct.parent
                pltr.plot_contour(ax,
                                  structure=struct,
                                  colors=[(0, 1, 0, 1)],
                                  linewidths=[0.5])
            if verbose:
                pb.update()

        cntr = plt.gca().collections

        plt.setp([x for x in cntr if x.get_color()[0, 0] == 1], linewidth=0.25)
        plt.setp([x for x in cntr if x.get_color()[0, 1] == 1], linewidth=0.25)
        plt.savefig('./contour/contour_' + outfile + '.pdf')
        plt.axis((1125.4006254228616, 1670.3650637799306, 1291.6829155596627,
                  1871.8063499397681))
        plt.setp([x for x in cntr if x.get_color()[0, 0] == 1],
                 linewidth=0.75)  # Red
        plt.setp([x for x in cntr if x.get_color()[0, 1] == 1],
                 linewidth=0.5)  # Green
        plt.savefig('./contour/contour_' + outfile + 'zoom.pdf')
Exemple #28
0
    def compute_dendro(self, show_progress=False, save_dendro=False,
                       dendro_name=None, dendro_obj=None,
                       periodic_bounds=False):
        '''
        Compute the dendrogram and prune to the minimum deltas.
        ** min_deltas must be in ascending order! **

        Parameters
        ----------
        show_progress : optional, bool
            Enables the progress bar in astrodendro.
        save_dendro : optional, bool
            Saves the dendrogram in HDF5 format. **Requires pyHDF5**
        dendro_name : str, optional
            Save name when save_dendro is enabled. ".hdf5" appended
            automatically.
        dendro_obj : Dendrogram, optional
            Input a pre-computed dendrogram object. It is assumed that
            the dendrogram has already been computed!
        periodic_bounds : bool, optional
            Enable when the data is periodic in the spatial dimensions.
        '''

        self._numfeatures = np.empty(self.min_deltas.shape, dtype=int)
        self._values = []

        if dendro_obj is None:
            if periodic_bounds:
                # Find the spatial dimensions
                num_axes = self.data.ndim
                spat_axes = []
                for i, axis_type in enumerate(self._wcs.get_axis_types()):
                    if axis_type["coordinate_type"] == u"celestial":
                        spat_axes.append(num_axes - i - 1)
                neighbours = periodic_neighbours(spat_axes)
            else:
                neighbours = None

            d = Dendrogram.compute(self.data, verbose=show_progress,
                                   min_delta=self.min_deltas[0],
                                   min_value=self.dendro_params["min_value"],
                                   min_npix=self.dendro_params["min_npix"],
                                   neighbours=neighbours)
        else:
            d = dendro_obj
        self._numfeatures[0] = len(d)
        self._values.append(np.array([struct.vmax for struct in
                                      d.all_structures]))

        if len(self.min_deltas) > 1:

            # Another progress bar for pruning steps
            if show_progress:
                print("Pruning steps.")
                bar = ProgressBar(len(self.min_deltas[1:]))

            for i, delta in enumerate(self.min_deltas[1:]):
                d.prune(min_delta=delta)
                self._numfeatures[i + 1] = len(d)
                self._values.append(np.array([struct.vmax for struct in
                                              d.all_structures]))

                if show_progress:
                    bar.update(i + 1)
            dens_slopes.append(dens_slope.slope)

            cube_hdu = make_ppv(velocity,
                                density,
                                vel_disp=np.std(velocity),
                                T=T,
                                threads=4,
                                verbose=True,
                                chan_width=dv_eff / 2.,
                                v_min=-60 * u.km / u.s,
                                v_max=60 * u.km / u.s,
                                max_chan=2000)

            filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}.fits"\
                .format(np.abs(dens), np.abs(vel), i, cube_size)

            cube_hdu.writeto(osjoin(out_dir, filename), overwrite=True)

            bar.update()


    filename = "fBM_3D_velocity_slopes_size_{0}.npy"\
        .format(cube_size)

    np.save(osjoin(out_dir, filename), np.array(vel_slopes))

    filename = "fBM_3D_density_slopes_size_{0}.npy"\
        .format(cube_size)

    np.save(osjoin(out_dir, filename), np.array(dens_slopes))
            logabundance=logabundance,
            elogabundance=elogabundance,
            taline303=ta303.value,
            etaline303=err,
            taline321=ta321.value,
            etaline321=err,
            linewidth=linewidth)
        row_data = mf.get_parconstraints()
        tcube[z, y, x] = row_data['temperature_chi2']
        row_data['ratio303321'] = rat
        row_data['eratio303321'] = erat

        if ii % 100 == 0 or ii < 50:
            log.info(
                "T: [{tmin1sig_chi2:7.2f},{temperature_chi2:7.2f},{tmax1sig_chi2:7.2f}]  R={ratio303321:6.2f}+/-{eratio303321:6.2f}"
                .format(**row_data))
        else:
            pb.update(ii)
        tcube.flush()
    else:
        pb.update(ii)

tcube[tcube == 0] = np.nan
tCube = SpectralCube(tcube,
                     cube303.wcs,
                     mask=BooleanArrayMask(np.isfinite(tcube),
                                           wcs=cube303.wcs))
tCube.write(hpath('chi2_temperature_cube.fits'), overwrite=True)

print()
Exemple #31
0
def var_cov_cube(cube, mean_sub=False, progress_bar=True):
    '''
    Compute the variance-covariance matrix of a data cube, with proper
    handling of NaNs.

    Parameters
    ----------
    cube : numpy.ndarray
        PPV cube. Spectral dimension assumed to be 0th axis.
    mean_sub : bool, optional
        Subtract column means.
    progress_bar : bool, optional
        Show a progress bar, since this operation could be slow for large
        cubes.

    Returns
    -------
    cov_matrix : numpy.ndarray
        Computed covariance matrix.
    '''

    n_velchan = cube.shape[0]

    cov_matrix = np.zeros((n_velchan, n_velchan))

    if progress_bar:
        bar = ProgressBar(n_velchan)

    for i, chan in enumerate(_iter_2D(cube)):
        # Set the nans to tiny values
        chan[np.isnan(chan)] = np.finfo(chan.dtype).eps

        norm_chan = chan
        if mean_sub:
            norm_chan -= np.nanmean(chan)
        for j, chan2 in enumerate(_iter_2D(cube[:i + 1, :, :])):
            norm_chan2 = chan2
            if mean_sub:
                norm_chan2 -= np.nanmean(chan2)

            divisor = np.sum(np.isfinite(norm_chan * norm_chan2))

            # Apply Bessel's correction when mean subtracting
            if mean_sub:
                divisor -= 1.0

            cov_matrix[i, j] = \
                np.nansum(norm_chan * norm_chan2) / divisor

        # Variances
        # Divided in half to account for doubling in line below
        var_divis = np.sum(np.isfinite(norm_chan))
        if mean_sub:
            var_divis -= 1.0

        cov_matrix[i, i] = 0.5 * \
            np.nansum(norm_chan * norm_chan) / var_divis

        if progress_bar:
            bar.update(i + 1)

    cov_matrix = cov_matrix + cov_matrix.T

    return np.nan_to_num(cov_matrix)
Exemple #32
0
def var_cov_cube(cube, mean_sub=False, progress_bar=True):
    '''
    Compute the variance-covariance matrix of a data cube, with proper
    handling of NaNs.

    Parameters
    ----------
    cube : numpy.ndarray
        PPV cube. Spectral dimension assumed to be 0th axis.
    mean_sub : bool, optional
        Subtract column means.
    progress_bar : bool, optional
        Show a progress bar, since this operation could be slow for large
        cubes.

    Returns
    -------
    cov_matrix : numpy.ndarray
        Computed covariance matrix.
    '''

    n_velchan = cube.shape[0]

    cov_matrix = np.zeros((n_velchan, n_velchan))

    if progress_bar:
        bar = ProgressBar(n_velchan)

    for i, chan in enumerate(_iter_2D(cube)):
        norm_chan = chan
        if mean_sub:
            norm_chan -= np.nanmean(chan)
        for j, chan2 in enumerate(_iter_2D(cube[:i + 1, :, :])):
            norm_chan2 = chan2
            if mean_sub:
                norm_chan2 -= np.nanmean(chan2)

            divisor = np.sum(np.isfinite(norm_chan * norm_chan2))

            # Apply Bessel's correction when mean subtracting
            if mean_sub:
                divisor -= 1.0

            cov_matrix[i, j] = \
                np.nansum(norm_chan * norm_chan2) / divisor

        # Variances
        # Divided in half to account for doubling in line below
        var_divis = np.sum(np.isfinite(norm_chan))
        if mean_sub:
            var_divis -= 1.0

        cov_matrix[i, i] = 0.5 * \
            np.nansum(norm_chan * norm_chan) / var_divis

        if progress_bar:
            bar.update(i + 1)

    cov_matrix = cov_matrix + cov_matrix.T

    return np.nan_to_num(cov_matrix)
Exemple #33
0
def add_data_to_cube(cubefilename, data=None, filename=None, fileheader=None,
                     flatheader='header.txt',
                     cubeheader='cubeheader.txt', nhits=None,
                     smoothto=1, baselineorder=5, velocityrange=None,
                     excludefitrange=None, noisecut=np.inf, do_runscript=False,
                     linefreq=None, allow_smooth=True,
                     data_iterator=data_iterator,
                     coord_iterator=coord_iterator,
                     velo_iterator=velo_iterator,
                     progressbar=False, coordsys='galactic',
                     datalength=None,
                     velocity_offset=0.0, negative_mean_cut=None,
                     add_with_kernel=False, kernel_fwhm=None, fsw=False,
                     kernel_function=Gaussian2DKernel,
                     diagnostic_plot_name=None, chmod=False,
                     continuum_prefix=None,
                     debug_breakpoint=False,
                     default_unit=u.km/u.s,
                     make_continuum=True,
                     weightspec=None,
                     varweight=False):
    """
    Given a .fits file that contains a binary table of spectra (e.g., as
    you would get from the GBT mapping "pipeline" or the reduce_map.pro aoidl
    file provided by Adam Ginsburg), adds each spectrum into the cubefile.

    velocity_offset : 0.0
        Amount to add to the velocity vector before adding it to the cube
        (useful for FSW observations)
    weightspec : np.ndarray
        A spectrum with the same size as the input arrays but containing the relative
        weights of the data
    """

    #if not default_unit.is_equivalent(u.km/u.s):
    #    raise TypeError("Default unit is not a velocity equivalent.")

    if type(nhits) is str:
        log.debug("Loading nhits from %s" % nhits)
        nhits = pyfits.getdata(nhits)
    elif type(nhits) is not np.ndarray:
        raise TypeError("nhits must be a .fits file or an ndarray, but it is ",type(nhits))
    naxis2,naxis1 = nhits.shape

    if velocity_offset and not fsw:
        raise ValueError("Using a velocity offset, but obs type is not "
                         "frequency switched; this is almost certainly wrong, "
                         "but if there's a case for it I'll remove this.")
    if not hasattr(velocity_offset,'unit'):
        velocity_offset = velocity_offset*default_unit


    contimage = np.zeros_like(nhits)
    nhits_once = np.zeros_like(nhits)

    log.debug("Loading data cube {0}".format(cubefilename))
    t0 = time.time()
    # rescale image to weight by number of observations
    image = pyfits.getdata(cubefilename)*nhits
    log.debug(" ".join(("nhits statistics: mean, std, nzeros, size",str(nhits.mean()),str(nhits.std()),str(np.sum(nhits==0)), str(nhits.size))))
    log.debug(" ".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size), str(np.sum(np.isnan(image))))))
    log.debug(" ".join(("nhits shape: ",str(nhits.shape))))
    # default is to set empty pixels to NAN; have to set them
    # back to zero
    image[image!=image] = 0.0
    header = pyfits.getheader(cubefilename)
    # debug print "Cube shape: ",image.shape," naxis3: ",header.get('NAXIS3')," nhits shape: ",nhits.shape

    log.debug("".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size))))

    flathead = get_header(flatheader)
    naxis3 = image.shape[0]
    wcs = pywcs.WCS(flathead)
    cwcs = pywcs.WCS(header)
    vwcs = cwcs.sub([pywcs.WCSSUB_SPECTRAL])
    vunit = u.Unit(vwcs.wcs.cunit[vwcs.wcs.spec])
    cubevelo = vwcs.wcs_pix2world(np.arange(naxis3),0)[0] * vunit
    cd3 = vwcs.wcs.cdelt[vwcs.wcs.spec] * vunit

    if not vunit.is_equivalent(default_unit):
        raise ValueError("The units of the cube and the velocity axis are "
                         "possibly not equivalent.  Change default_unit to "
                         "the appropriate unit (probably {0})".format(vunit))

    if add_with_kernel:
        if wcs.wcs.has_cd():
            cd = np.abs(wcs.wcs.cd[1,1])
        else:
            cd = np.abs(wcs.wcs.cdelt[1])
        # Alternative implementation; may not work for .cd?
        #cd = np.abs(np.prod((wcs.wcs.get_cdelt() * wcs.wcs.get_pc().diagonal())))**0.5

    if velocityrange is not None:
        if hasattr(velocityrange, 'unit'):
            v1,v4 = velocityrange
        else:
            v1,v4 = velocityrange * default_unit
        ind1 = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2 = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # stupid hack.  REALLY stupid hack.  Don't crop.
        if np.abs(ind2-image.shape[0]) < 5:
            ind2 = image.shape[0]
        if np.abs(ind1) < 5:
            ind1 = 0

        #print "Velo match for v1,v4 = %f,%f: %f,%f" % (v1,v4,cubevelo[ind1],cubevelo[ind2])
        # print "Updating CRPIX3 from %i to %i. Cropping to indices %i,%i" % (header.get('CRPIX3'),header.get('CRPIX3')-ind1,ind1,ind2)
        # I think this could be disastrous: cubevelo is already set, but now we're changing how it's set in the header!
        # I don't think there's any reason to have this in the first place
        # header.set('CRPIX3',header.get('CRPIX3')-ind1)

        # reset v1,v4 to the points we just selected
        v1 = cubevelo[ind1]
        v4 = cubevelo[ind2-1]
    else:
        ind1=0
        ind2 = image.shape[0]
        v1,v4 = min(cubevelo),max(cubevelo)

    # debug print "Cube has %i v-axis pixels from %f to %f.  Crop range is %f to %f" % (naxis3,cubevelo.min(),cubevelo.max(),v1,v4)

    #if abs(cdelt) < abs(cd3):
    #    print "Spectra have CD=%0.2f, cube has CD=%0.2f.  Will smooth & interpolate." % (cdelt,cd3)

    # Disable progressbar if debug-logging is enabled (they clash)
    if progressbar and 'ProgressBar' in globals() and log.level > 10:
        if datalength is None:
            pb = ProgressBar(len(data))
        else:
            pb = ProgressBar(datalength)
    else:
        progressbar = False

    skipped = []

    for spectrum,pos,velo in zip(data_iterator(data,fsw=fsw),
                                 coord_iterator(data,coordsys_out=coordsys),
                                 velo_iterator(data,linefreq=linefreq)):

        if log.level <= 10:
            t1 = time.time()

        if not hasattr(velo,'unit'):
            velo = velo * default_unit

        glon,glat = pos
        cdelt = velo[1]-velo[0]
        if cdelt < 0:
            # for interpolation, require increasing X axis
            spectrum = spectrum[::-1]
            velo = velo[::-1]
            if log.level < 5:
                log.debug("Reversed spectral axis... ")

        if (velo.max() < cubevelo.min() or velo.min() > cubevelo.max()):
            raise ValueError("Data out of range.")

        if progressbar and log.level > 10:
            pb.update()

        velo += velocity_offset

        if glon != 0 and glat != 0:
            x,y = wcs.wcs_world2pix(glon,glat,0)
            if np.isnan(x) or np.isnan(y):
                log.warn("".join(("Skipping NaN point {0}, {1} ...".format(glon,glat))))
                continue
            if log.level < 10:
                log.debug("".join(("At point {0},{1} ...".format(glon,glat),)))
            if abs(cdelt) < abs(cd3) and allow_smooth:
                # need to smooth before interpolating to preserve signal
                kernwidth = abs(cd3/cdelt/2.35).decompose().value
                if kernwidth > 2 and kernwidth < 10:
                    xr = kernwidth*5
                    npx = np.ceil(xr*2 + 1)
                elif kernwidth > 10:
                    raise ValueError('Too much smoothing')
                else:
                    xr = 5
                    npx = 11
                #kernel = np.exp(-(np.linspace(-xr,xr,npx)**2)/(2.0*kernwidth**2))
                #kernel /= kernel.sum()
                kernel = Gaussian1DKernel(stddev=kernwidth, x_size=npx)
                smspec = np.convolve(spectrum,kernel,mode='same')
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     smspec)
            else:
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     spectrum)
            OK = (datavect[ind1:ind2] == datavect[ind1:ind2])

            if excludefitrange is None:
                include = OK
            else:
                # Exclude certain regions (e.g., the spectral lines) when computing the noise
                include = OK.copy()

                if not hasattr(excludefitrange,'unit'):
                    excludefitrange = excludefitrange * default_unit

                # Convert velocities to indices
                exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

                # Loop through exclude_inds pairwise
                for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
                    # Do not include the excluded regions
                    include[i1:i2] = False

                if include.sum() == 0:
                    raise ValueError("All data excluded.")

            noiseestimate = datavect[ind1:ind2][include].std()
            contestimate = datavect[ind1:ind2][include].mean()

            if noiseestimate > noisecut:
                log.info("Skipped a data point at %f,%f in file %s because it had excessive noise %f" % (x,y,filename,noiseestimate))
                skipped.append(True)
                continue
            elif negative_mean_cut is not None and contestimate < negative_mean_cut:
                log.info("Skipped a data point at %f,%f in file %s because it had negative continuum %f" % (x,y,filename,contestimate))
                skipped.append(True)
                continue
            elif OK.sum() == 0:
                log.info("Skipped a data point at %f,%f in file %s because it had NANs" % (x,y,filename))
                skipped.append(True)
                continue
            elif OK.sum()/float(abs(ind2-ind1)) < 0.5:
                log.info("Skipped a data point at %f,%f in file %s because it had %i NANs" % (x,y,filename,np.isnan(datavect[ind1:ind2]).sum()))
                skipped.append(True)
                continue
            if log.level < 10:
                log.debug("did not skip...")

            if varweight:
                weight = 1./noiseestimate**2
            else:
                weight = 1.

            if weightspec is None:
                wspec = weight
            else:
                wspec = weight * weightspec


            if 0 < int(np.round(x)) < naxis1 and 0 < int(np.round(y)) < naxis2:
                if add_with_kernel:
                    fwhm = np.sqrt(8*np.log(2))
                    kernel_size = kd = int(np.ceil(kernel_fwhm/fwhm/cd * 5))
                    if kernel_size < 5:
                        kernel_size = kd = 5
                    if kernel_size % 2 == 0:
                        kernel_size = kd = kernel_size+1
                    if kernel_size > 100:
                        raise ValueError("Huge kernel - are you sure?")
                    kernel_middle = mid = (kd-1)/2.
                    xinds,yinds = (np.mgrid[:kd,:kd]-mid+np.array([np.round(x),np.round(y)])[:,None,None]).astype('int')
                    # This kernel is NOT centered, and that's the bloody point.
                    # (I made a very stupid error and used Gaussian2DKernel,
                    # which is strictly centered, in a previous version)
                    kernel2d = np.exp(-((xinds-x)**2+(yinds-y)**2)/(2*(kernel_fwhm/fwhm/cd)**2))

                    dim1 = ind2-ind1
                    vect_to_add = np.outer(datavect[ind1:ind2],kernel2d).reshape([dim1,kd,kd])
                    vect_to_add[True-OK] = 0

                    # need to slice out edges
                    if yinds.max() >= naxis2 or yinds.min() < 0:
                        yok = (yinds[0,:] < naxis2) & (yinds[0,:] >= 0)
                        xinds,yinds = xinds[:,yok],yinds[:,yok]
                        vect_to_add = vect_to_add[:,:,yok]
                        kernel2d = kernel2d[:,yok]
                    if xinds.max() >= naxis1 or xinds.min() < 0:
                        xok = (xinds[:,0] < naxis1) & (xinds[:,0] >= 0)
                        xinds,yinds = xinds[xok,:],yinds[xok,:]
                        vect_to_add = vect_to_add[:,xok,:]
                        kernel2d = kernel2d[xok,:]

                    image[ind1:ind2,yinds,xinds] += vect_to_add*wspec
                    # NaN spectral bins are not appropriately downweighted... but they shouldn't exist anyway...
                    nhits[yinds,xinds] += kernel2d*weight
                    contimage[yinds,xinds] += kernel2d * contestimate*weight
                    nhits_once[yinds,xinds] += kernel2d*weight

                else:
                    image[ind1:ind2,int(np.round(y)),int(np.round(x))][OK] += datavect[ind1:ind2][OK]*weight
                    nhits[int(np.round(y)),int(np.round(x))] += weight
                    contimage[int(np.round(y)),int(np.round(x))] += contestimate*weight
                    nhits_once[int(np.round(y)),int(np.round(x))] += weight

                if log.level < 10:
                    log.debug("Z-axis indices are %i,%i..." % (ind1,ind2,))
                    log.debug("Added a data point at %i,%i" % (int(np.round(x)),int(np.round(y))))
                skipped.append(False)
            else:
                skipped.append(True)
                log.info("Skipped a data point at x,y=%f,%f "
                         "lon,lat=%f,%f in file %s because "
                         "it's out of the grid" % (x,y,glon,glat,filename))

            if debug_breakpoint:
                import ipdb
                ipdb.set_trace()

        if log.level <= 10:
            dt = time.time() - t1
            log.debug("Completed x,y={x:4.0f},{y:4.0f}"
                      " ({x:6.2f},{y:6.2f}) in {dt:6.2g}s".format(x=float(x),
                                                                  y=float(y),
                                                                  dt=dt))

    log.info("Completed 'add_data' loop for"
             " {0} in {1}s".format(cubefilename, time.time()-t0))

    if data.dtype.names is not None:
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
    else:
        dname = slice(None)

    if excludefitrange is not None:
        # this block redefining "include" is used for diagnostics (optional)
        ind1a = np.argmin(np.abs(np.floor(v1-velo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-velo)))+1
        OK = (data[dname][0,:]==data[dname][0,:])
        OK[:ind1a] = False
        OK[ind2a:] = False

        include = OK

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-velo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")
    else:
        include = slice(None)


    if diagnostic_plot_name:
        from mpl_plot_templates import imdiagnostics

        pylab.clf()

        dd = data[dname][:,include]
        imdiagnostics(dd,axis=pylab.gca())
        pylab.savefig(diagnostic_plot_name, bbox_inches='tight')

        # Save a copy with the bad stuff flagged out; this should tell whether flagging worked
        skipped = np.array(skipped,dtype='bool')
        dd[skipped,:] = -999
        maskdata = np.ma.masked_equal(dd,-999)
        pylab.clf()
        imdiagnostics(maskdata, axis=pylab.gca())
        dpn_pre,dpn_suf = os.path.splitext(diagnostic_plot_name)
        dpn_flagged = dpn_pre+"_flagged"+dpn_suf
        pylab.savefig(dpn_flagged, bbox_inches='tight')

        log.info("Saved diagnostic plot %s and %s" % (diagnostic_plot_name,dpn_flagged))

    log.debug("nhits statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(nhits.mean(),nhits.std(),np.sum(nhits==0), nhits.size))
    log.debug("Image statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(image.mean(),image.std(),np.sum(image==0), image.size))

    imav = image/nhits

    if log.level <= 10:
        nnan = np.count_nonzero(np.isnan(imav))
        log.debug("imav statistics: mean, std, nzeros, size, nnan, ngood: {0} {1} {2} {3} {4} {5}".format(imav.mean(),imav.std(),np.sum(imav==0), imav.size, nnan, imav.size-nnan))
        log.debug("imav shape: {0}".format(imav.shape))

    subcube = imav[ind1:ind2,:,:]

    if log.level <= 10:
        nnan = np.sum(np.isnan(subcube))
        print("subcube statistics: mean, std, nzeros, size, nnan, ngood:",np.nansum(subcube)/subcube.size,np.std(subcube[subcube==subcube]),np.sum(subcube==0), subcube.size, nnan, subcube.size-nnan)
        print("subcube shape: ",subcube.shape)

    H = header.copy()
    if fileheader is not None:
        for k,v in fileheader.items():
            if 'RESTFRQ' in k or 'RESTFREQ' in k:
                header.set(k,v)
            #if k[0] == 'C' and '1' in k and k[-1] != '1':
            #    header.set(k.replace('1','3'), v)
    moreH = get_header(cubeheader)
    for k,v in H.items():
        header.set(k,v)
    for k,v in moreH.items():
        header.set(k,v)
    HDU = pyfits.PrimaryHDU(data=subcube,header=header)
    HDU.writeto(cubefilename,clobber=True,output_verify='fix')

    outpre = cubefilename.replace(".fits","")

    include = np.ones(imav.shape[0],dtype='bool')

    if excludefitrange is not None:
        # this block redifining "include" is used for continuum
        ind1a = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")

    HDU2 = pyfits.PrimaryHDU(data=nhits,header=flathead)
    HDU2.writeto(outpre+"_nhits.fits",clobber=True,output_verify='fix')

    #OKCube = (imav==imav)
    #contmap = np.nansum(imav[naxis3*0.1:naxis3*0.9,:,:],axis=0) / OKCube.sum(axis=0)
    if make_continuum:
        contmap = np.nansum(imav[include,:,:],axis=0) / include.sum()
        HDU2 = pyfits.PrimaryHDU(data=contmap,header=flathead)
        HDU2.writeto(outpre+"_continuum.fits",clobber=True,output_verify='fix')

        if continuum_prefix is not None:
            # Solo continuum image (just this obs set)
            HDU2.data = contimage / nhits_once
            HDU2.writeto(continuum_prefix+"_continuum.fits",clobber=True,output_verify='fix')
            HDU2.data = nhits_once
            HDU2.writeto(continuum_prefix+"_nhits.fits",clobber=True,output_verify='fix')

    log.info("Writing script file {0}".format(outpre+"_starlink.sh"))
    scriptfile = open(outpre+"_starlink.sh",'w')
    outpath,outfn = os.path.split(cubefilename)
    outpath,pre = os.path.split(outpre)
    print(("#!/bin/bash"), file=scriptfile)
    if outpath != '':
        print(('cd %s' % outpath), file=scriptfile)
    print(('. /star/etc/profile'), file=scriptfile)
    print(('kappa > /dev/null'), file=scriptfile)
    print(('convert > /dev/null'), file=scriptfile)
    print(('fits2ndf %s %s' % (outfn,outfn.replace(".fits",".sdf"))), file=scriptfile)
    if excludefitrange is not None:
        v2v3 = ""
        for v2,v3 in zip(excludefitrange[::2],excludefitrange[1::2]):
            v2v3 += "%0.2f %0.2f " % (v2.to(default_unit).value,v3.to(default_unit).value)
        print(('mfittrend %s  ranges=\\\"%0.2f %s %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v2v3,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    else:
        print(('mfittrend %s  ranges=\\\"%0.2f %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    print(('sub %s %s %s' % (outfn.replace(".fits",".sdf"),outfn.replace(".fits","_baseline.sdf"),outfn.replace(".fits","_sub.sdf"))), file=scriptfile)
    print(('sqorst %s_sub mode=pixelscale  axis=3 pixscale=%i out=%s_vrebin' % (pre,smoothto,pre)), file=scriptfile)
    print(('gausmooth %s_vrebin fwhm=1.0 axes=[1,2] out=%s_smooth' % (pre,pre)), file=scriptfile)
    print(('#collapse %s estimator=mean axis="VRAD" low=-400 high=500 out=%s_continuum' % (pre,pre)), file=scriptfile)
    print(('rm %s_sub.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_sub %s_sub.fits' % (pre,pre)), file=scriptfile)
    print(('rm %s_smooth.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_smooth %s_smooth.fits' % (pre,pre)), file=scriptfile)
    print(("# Fix STARLINK's failure to respect header keywords."), file=scriptfile)
    print(('sethead %s_smooth.fits RESTFRQ=`gethead RESTFRQ %s.fits`' % (pre,pre)), file=scriptfile)
    print(('rm %s_baseline.sdf' % (pre)), file=scriptfile)
    print(('rm %s_smooth.sdf' % (pre)), file=scriptfile)
    print(('rm %s_sub.sdf' % (pre)), file=scriptfile)
    print(('rm %s_vrebin.sdf' % (pre)), file=scriptfile)
    print(('rm %s.sdf' % (pre)), file=scriptfile)
    scriptfile.close()

    if chmod:
        scriptfilename = (outpre+"_starlink.sh").replace(" ","")
        #subprocess.call("chmod +x {0}".format(scriptfilename), shell=True)
        st = os.stat(scriptfilename)
        os.chmod(scriptfilename, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR)

    if do_runscript:
        runscript(outpre)

    _fix_ms_kms_file(outpre+"_sub.fits")
    _fix_ms_kms_file(outpre+"_smooth.fits")

    if log.level <= 20:
        log.info("Completed {0} in {1}s".format(pre, time.time()-t0))
Exemple #34
0
    def quick_render_movie(self,
                           outdir,
                           size=256,
                           nframes=30,
                           camera_angle=(0, 0, 1),
                           north_vector=(0, 0, 1),
                           rot_vector=(1, 0, 0),
                           colormap='doom',
                           cmap_range='auto',
                           transfer_function='auto',
                           start_index=0,
                           image_prefix="",
                           output_filename='out.mp4',
                           log_scale=False,
                           rescale=True):
        """
        Create a movie rotating the cube 360 degrees from
        PP -> PV -> PP -> PV -> PP

        Parameters
        ----------
        outdir: str
            The output directory in which the individual image frames and the
            resulting output mp4 file should be stored
        size: int
            The size of the individual output frame in pixels (i.e., size=256
            will result in a 256x256 image)
        nframes: int
            The number of frames in the resulting movie
        camera_angle: 3-tuple
            The initial angle of the camera
        north_vector: 3-tuple
            The vector of 'north' in the data cube.  Default is coincident with
            the spectral axis
        rot_vector: 3-tuple
            The vector around which the camera will be rotated
        colormap: str
            A valid colormap.  See `yt.show_colormaps`
        transfer_function: 'auto' or `yt.visualization.volume_rendering.TransferFunction`
            Either 'auto' to use the colormap specified, or a valid
            TransferFunction instance
        log_scale: bool
            Should the colormap be log scaled?
        rescale: bool
            If True, the images will be rescaled to have a common 95th
            percentile brightness, which can help reduce flickering from having
            a single bright pixel in some projections
        start_index : int
            The number of the first image to save
        image_prefix : str
            A string to prepend to the image name for each image that is output
        output_filename : str
            The movie file name to output.  The suffix may affect the file type
            created.  Defaults to 'out.mp4'.  Will be placed in ``outdir``

        Returns
        -------


        """
        if not ytOK:
            raise IOError(
                "yt could not be imported.  Cube renderings are not possible.")

        scale = np.max(self.cube.shape)

        if not os.path.exists(outdir):
            os.makedirs(outdir)
        elif not os.path.isdir(outdir):
            raise OSError(
                "Output directory {0} exists and is not a directory.".format(
                    outdir))

        if cmap_range == 'auto':
            upper = self.cube.max().value
            lower = self.cube.std().value * 3
            cmap_range = [lower, upper]

        if transfer_function == 'auto':
            tfh = self.auto_transfer_function(cmap_range, log=log_scale)
            tfh.tf.map_to_colormap(cmap_range[0],
                                   cmap_range[1],
                                   colormap=colormap)
            tf = tfh.tf
        else:
            tf = transfer_function

        center = self.dataset.domain_center
        cam = self.dataset.h.camera(center,
                                    camera_angle,
                                    scale,
                                    size,
                                    tf,
                                    north_vector=north_vector,
                                    fields='flux')

        im = cam.snapshot()
        images = [im]

        pb = ProgressBar(nframes)
        for ii, im in enumerate(
                cam.rotation(2 * np.pi, nframes, rot_vector=rot_vector)):
            images.append(im)
            im.write_png(os.path.join(
                outdir, "%s%04i.png" % (image_prefix, ii + start_index)),
                         rescale=False)
            pb.update(ii + 1)
        log.info("Rendering complete in {0}s".format(time.time() -
                                                     pb._start_time))

        if rescale:
            _rescale_images(images, os.path.join(outdir, image_prefix))

        pipe = _make_movie(outdir,
                           prefix=image_prefix,
                           filename=output_filename)

        return images
Exemple #35
0
def convolve_model_dir(model_dir, filters, overwrite=False):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    filters : list
        A list of :class:`~sedfitter.filter.Filter` objects to use for the
        convolution
    overwrite : bool, optional
        Whether to overwrite the output files
    """

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=np.zeros(len(sed_files), dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Exemple #36
0
    def build(self,
              age,
              sfh,
              dust,
              metal,
              fesc=1.,
              sfh_law=exponential,
              dust_model=Calzetti,
              neb_dust_weight=1.,
              neb_cont=True,
              neb_met=True,
              timesteps=500,
              verbose=False):
        """ Build composite stellar population SED(s) from input SSP models
            
        Parameters
        ----------
            ssp : SSP class
            
            age : '~astropy.units.Quantity' array (with units of time)
                Desired stellar population age(s) since the onset of
                star-formation
            sfh :  array of float or '~astropy.units.Quantity',
                Star-formation history parameter/exponent
            dust : array of floats,
                Strength of dust extinction in Av
            metal_ind : float or float array,
                Metallicity or metallicity range of stellar population
                relative to solar metallicity (Z/Z_sol), min and max
                allowed values set by range of input metallicities in 'ssp' 
                class
            f_esc : float or array of float,
                Escape fraction(s) of nebular emission component
            sfh_law : '~smpy.sfh' or user defined function,
                Star-formation history parametrisation for composite
                stellar population.
            dust_model : '~smpy.dust' or user defined function,
                Dust extinction or attenuation law parametrisation
            neb_cont : boolean, default = True
                Include continuum emission component in nebular emission
                model
            neb_met : boolean, default = True
                Include metal line component in nebular emission model
            
            Returns
            -------
        
        
        """
        try:
            self.tau = u.Quantity(sfh, ndmin=1)
        except:
            self.tau = sfh

        self.tg = u.Quantity(age, ndmin=1).to(u.yr)
        self.tauv = np.array(dust, ndmin=1)
        self.mi = np.array(metal, ndmin=1)
        self.fesc = np.array(fesc, ndmin=1)
        self.sfh_law = sfh_law
        self.inc_cont = neb_cont
        self.inc_met = neb_met
        self.getAttenuation = dust_model
        self.sfr_func = sfh_law

        mu = 0.3

        self.ta = self.ages

        outshape = [
            len(self.mi),
            len(self.tg),
            len(self.tau),
            len(self.tauv),
            len(self.fesc),
            len(self.wave)
        ]
        self.SED = np.zeros(outshape) * self.sed_arr.unit
        self.STR = np.zeros(self.SED.shape[:-1])
        self.SFR = np.zeros(self.SED.shape[:-1]) * u.solMass / u.yr

        # Set up nebular emission arrays -- WILL CHANGE
        if len(self.neb_wave) != len(self.wave):
            self.neb_cont = griddata(self.neb_wave, self.neb_cont, self.wave)

            self.neb_hlines = griddata(self.neb_wave, self.neb_hlines,
                                       self.wave)

            neb_metaln = np.zeros((len(self.wave), 3))
            for i in range(3):
                neb_metaln[:, i] = griddata(self.neb_wave,
                                            self.neb_metal[:, i], self.wave)
            self.neb_metal = neb_metaln
            self.neb_wave = self.wave

        self.neb_cont[self.wave <= 912. * u.AA] = 0.
        self.neb_hlines[self.wave <= 912. * u.AA] = 0.
        self.neb_metal[self.wave <= 912. * u.AA, :] = 0.

        self.Nly_arr = self.calc_lyman(self.wave, self.sed_arr)
        self.neb_sed_arr = np.ones_like(self.sed_arr.value)

        nebrange1 = (self.metallicities <= 0.02)
        nebrange2 = (self.metallicities > 0.02) * (self.metallicities <= 0.2)
        nebrange3 = (self.metallicities > 0.2)

        self.neb_sed_arr[nebrange1, :, :] *= (
            (self.neb_cont * self.inc_cont) + self.neb_hlines +
            (self.neb_metal[:, 0] * self.inc_met))

        self.neb_sed_arr[nebrange2, :, :] *= (
            (self.neb_cont * self.inc_cont) + self.neb_hlines +
            (self.neb_metal[:, 1] * self.inc_met))

        self.neb_sed_arr[nebrange3, :, :] *= (
            (self.neb_cont * self.inc_cont) + self.neb_hlines +
            (self.neb_metal[:, 2] * self.inc_met))

        self.neb_sed_arr *= (c.c.to(u.AA / u.s) / (self.wave**2)).value
        # Convert to Flambda
        self.neb_sed_arr = (self.neb_sed_arr * self.sed_arr.unit /
                            self.Nly_arr.unit)

        # Set up grid for ND-interpolation
        ti, mi = np.meshgrid(
            np.log10(self.ages / u.yr).value, np.log10(self.metallicities))
        self.grid = zip(mi.flatten(), ti.flatten())

        tri_grid = spatial.Delaunay(self.grid)

        # Make cube of interpolated age and SFHs.
        # Uses array slicing and vectorisation to minimise
        # loops where possible.
        sfh_grid_shape = (len(self.mi), timesteps)

        self.me_sfh = np.ones(sfh_grid_shape)

        for met_idx, metal in enumerate(self.mi):
            self.me_sfh[met_idx] *= metal

        if verbose:
            bar = ProgressBar(np.product(self.SED.shape[1:-1]))

        for idG, age in enumerate(self.tg):
            ta_range = np.logspace(
                np.log10(self.ages / u.yr).min(), np.log10(age / u.yr),
                timesteps)
            self.ta_sfh = np.ones(sfh_grid_shape) * u.yr
            self.ta_sfh *= ta_range[None, :]

            # Calculate Barycentric coordinates for all ages/metallicities in SFH.
            points = np.array(
                zip(np.log10(self.me_sfh.flatten()),
                    np.log10(self.ta_sfh.flatten() / u.yr)))

            #if verbose:
            #    print 'Interpolating SEDs at SFH timesteps'
            ss = tri_grid.find_simplex(points)

            X = tri_grid.transform[ss, :2]
            Y = points - tri_grid.transform[ss, 2]

            b = np.einsum('ijk,ik->ij', X, Y)
            self.bc = np.c_[b, 1 - b.sum(axis=1)]
            self.simplices = tri_grid.simplices[ss]

            #if verbose:
            #    print 'Reshaping grids'
            # Interpolate SED, stellar mass fraction and remnant fractions
            # for SFH age grid using calculated Barycentric coordinates (bc).
            #print self.sed_arr.shape
            #print (len(self.metallicities) * len(self.ages), self.iw)

            self.temp = self.sed_arr.reshape(
                len(self.metallicities) * len(self.ages),
                self.iw)[self.simplices]

            self.sed_sfh = (self.temp * self.bc[:, :, None]).sum(1)
            self.sed_sfh = self.sed_sfh.reshape(
                np.append(sfh_grid_shape, self.iw))

            self.temp = self.neb_sed_arr.reshape(
                len(self.metallicities) * len(self.ages),
                self.iw)[self.simplices]

            self.neb_sed_sfh = (self.temp * self.bc[:, :, None]).sum(1)
            self.neb_sed_sfh = self.neb_sed_sfh.reshape(
                np.append(sfh_grid_shape, self.iw))

            self.strm_sfh = np.array(
                self.strm_arr.reshape(
                    len(self.metallicities) * len(self.ages))[self.simplices] *
                self.bc).sum(1)
            self.strm_sfh = self.strm_sfh.reshape(sfh_grid_shape)

            self.rmtm_sfh = np.array(
                self.rmtm_arr.reshape(
                    len(self.metallicities) * len(self.ages))[self.simplices] *
                self.bc).sum(1)

            self.rmtm_sfh = self.rmtm_sfh.reshape(sfh_grid_shape)

            self.Nly_sfh = (self.Nly_arr.reshape(
                len(self.metallicities) * len(self.ages))[self.simplices] *
                            self.bc).sum(1)
            self.Nly_sfh = self.Nly_sfh.reshape(sfh_grid_shape)  #*
            #(1 - self.fesc[None, None, :, None]))

            # Star-formation history

            for idT, t in enumerate(self.tau):
                if type(t) == tuple:
                    tau = t
                else:
                    tau = tuple([t])

                self.sfr_hist = self.sfr_func(self.ta_sfh, *tau)
                # Enforce integrated SFR = 1 Msol.
                self.norm = np.trapz(self.sfr_hist, self.ta_sfh, axis=-1)[:,
                                                                          None]

                self.sfr_hist /= self.norm
                self.weights = np.abs(self.sfr_func(self.tg[None, idG, None] \
                                      - self.ta_sfh, *tau)) / self.norm
                self.sfh_weights = np.ones(sfh_grid_shape) * self.weights

                for idA, Av in enumerate(self.tauv):
                    for idf, fesc in enumerate(self.fesc):
                        self.Att = self.getAttenuation(self.ta_sfh, \
                                                       self.wave, Av)

                        neb_att = self.getAttenuation(self.ta_sfh, \
                                                      self.wave,
                                                      Av * neb_dust_weight)

                        combined_sed = np.copy(
                            self.sed_sfh) * self.sed_sfh.unit

                        # Absorbed LyC photons
                        combined_sed[:, :, self.wave <= 912 * u.AA] *= fesc

                        # Resulting nebular emission
                        combined_sed *= self.Att  # Dust attenuated combined SED
                        combined_sed += (
                            neb_att * ((1 - fesc) * self.Nly_sfh[:, :, None] *
                                       self.neb_sed_sfh))

                        # Integrate over star-formation history
                        self.SED[:, idG, idT, idA, idf, :] = \
                            np.trapz(self.sfh_weights[:, :, None] * \
                            combined_sed, self.ta_sfh[:, :, None], axis=-2)

                        self.STR[:, idG, idT, idA, idf] = \
                            np.trapz(self.sfh_weights * self.strm_sfh,
                                     self.ta_sfh)

                        self.SFR[:, idG, idT, idA, idf] = \
                            self.sfr_hist[:, -1] * u.solMass

                        if verbose:
                            bar.update()

        # Normalise by stellar mass fraction (stars + remnants in case of BC03)
        self.SFR = self.SFR / self.STR
        self.SED = self.SED / self.STR[:, :, :, :, :, None]
        self.Ms = np.ones_like(self.SFR.value) * u.Msun

        self.Nly = self.calc_lyman(self.wave, self.SED).cgs
Exemple #37
0
def render_chem(yth2co321=yth2co321,
                yth2co303=yth2co303,
                ytsio=ytsio,
                outdir='yt_renders_chem3',
                size=512,
                scale=1100.,
                nframes=60,
                north_vector=[1, 0, 0],
                rot_vector=[1, 0, 0],
                movie=True,
                camera_angle=[-0.6, 0.4, 0.6]):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf1 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf1.add_gaussian(0.1,0.05, [1,0,0,1])
    #tf1.map_to_colormap(0, 0.5, scale=1, colormap='Reds')
    tf1.add_gaussian(0.25, 0.01, [1, 0, 0, 1])
    tf1.add_step(0.25, 0.5, [1, 0, 0, 1])
    #tf1.add_step(1,2,[1,1,1,1])
    tf1.map_to_colormap(0.5, 2, scale=1, colormap=red)
    tf2 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf2.add_gaussian(0.1,0.05, [0,1,0,1])
    #tf2.map_to_colormap(0, 0.5, scale=1, colormap='Greens')
    tf2.add_gaussian(0.25, 0.01, [0, 1, 0, 1])
    tf2.add_step(0.25, 0.5, [0, 1, 0, 1])
    tf2.map_to_colormap(0.5, 2, scale=1, colormap=green)
    tf3 = yt.ColorTransferFunction([0.1, 2], grey_opacity=True)
    #tf3.add_gaussian(0.1,0.05, [0,0,1,1])
    #tf3.map_to_colormap(0, 0.5, scale=1, colormap='Blues')
    tf3.add_gaussian(0.25, 0.01, [0, 0, 1, 1])
    tf3.add_step(0.25, 0.5, [0, 0, 1, 1])
    tf3.map_to_colormap(0.5, 2, scale=1, colormap=blue)

    center = yth2co303.dataset.domain_dimensions / 2.
    camh2co303 = yth2co303.dataset.h.camera(center,
                                            camera_angle,
                                            scale,
                                            size,
                                            tf3,
                                            north_vector=north_vector,
                                            fields='flux')
    camh2co321 = yth2co321.dataset.h.camera(center,
                                            camera_angle,
                                            scale,
                                            size,
                                            tf2,
                                            north_vector=north_vector,
                                            fields='flux')
    camsio = ytsio.dataset.h.camera(center,
                                    camera_angle,
                                    scale,
                                    size,
                                    tf1,
                                    north_vector=north_vector,
                                    fields='flux')

    imh2co303 = camh2co303.snapshot()
    imh2co321 = camh2co321.snapshot()
    imsio = camsio.snapshot()

    pl.figure(1)
    pl.clf()
    pl.imshow(imh2co303 + imh2co321 + imsio)
    pl.figure(2)
    pl.clf()
    pl.imshow(imh2co303[:, :, :3] + imh2co321[:, :, :3] + imsio[:, :, :3])

    if movie:
        images_h2co303 = [imh2co303]
        images_h2co321 = [imh2co321]
        images_sio = [imsio]

        r1 = camh2co303.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r2 = camh2co321.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r3 = camsio.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        pb = ProgressBar(nframes * 3)
        for (ii, (imh2co303, imh2co321, imsio)) in enumerate(izip(r1, r2, r3)):
            images_h2co303.append(imh2co303)
            images_h2co321.append(imh2co321)
            images_sio.append(imsio)

            imh2co303 = imh2co303.swapaxes(0, 1)
            imh2co303.write_png(paths.mpath(
                os.path.join(outdir, "h2co303_%04i.png" % (ii))),
                                rescale=False)
            pb.update(ii * 3)
            imsio = imsio.swapaxes(0, 1)
            imsio.write_png(paths.mpath(
                os.path.join(outdir, "sio_%04i.png" % (ii))),
                            rescale=False)
            pb.update(ii * 3 + 1)
            imh2co321 = imh2co321.swapaxes(0, 1)
            imh2co321.write_png(paths.mpath(
                os.path.join(outdir, "h2co321_%04i.png" % (ii))),
                                rescale=False)
            pb.update(ii * 3 + 2)

        pb.next()

        save_images([
            i1 + i2 + i3
            for i1, i2, i3 in izip(images_h2co303, images_h2co321, images_sio)
        ], paths.mpath(outdir))

        make_movie(paths.mpath(outdir))

        return images_h2co303, images_h2co321, images_sio
    else:
        return imh2co303, imh2co321, imsio
Exemple #38
0
    def build(self,
              SED,
              Filters,
              redshift,
              savepath,
              force_age=True,
              madau=True,
              units=u.uJy,
              verbose=False,
              clobber=True):
        """ 
            
        Parameters
        ----------
        SED : '~smpy.CSP' object
            Built CSP model set
        Filters : '~smpy.FilterSet' object
            Filter set through which to observe the set of models included
            in SED object
        redshift : array
            Redshift(s) at which to observe SED set
        savepath : str
            Filename for hdf5 save file
        force_age : boolean
            Require age of the stellar population to be younger than
            the age of the Universe at the desired redshift.
        madau : boolean
            Apply IGM absorption following Madau 1999 prescription
        units : '~astropy.units.Quantity'
            Desired output units, must be in spectral flux density
            equivalent
        verbose : boolean
            Add additional terminal outputs if true
        
        """
        self.F = Filters
        self.redshifts = np.array(redshift, ndmin=1)
        self.wave = SED.wave

        #self.fluxes = np.zeros(gridshape) * units
        #self.AB = np.zeros(gridshape) * u.mag
        self.wl = np.zeros(len(self.F.filters)) * u.AA
        self.fwhm = np.zeros(len(self.F.filters)) * u.AA

        self.dl = cosmo.luminosity_distance(self.redshifts).cgs
        self.dl[self.redshifts == 0] = 10 * c.pc

        assert type(
            savepath
        ) is types.StringType, "File save path is not a string: %r" % savepath
        self.savepath = savepath
        if clobber:
            if os.path.isfile(self.savepath):
                os.remove(self.savepath)

        with h5py.File(savepath, 'w') as f:

            gridshape = np.append(
                [len(self.redshifts), len(self.F.filters)], SED.SED.shape[:-1])

            self.fluxes = f.create_dataset("fluxes", gridshape, dtype='f')
            self.fluxes.attrs['unit'] = units.to_string()

            self.AB = f.create_dataset("mags", gridshape, dtype='f')

            for j, filt in enumerate(self.F.filters):
                self.wl[j] = filt.lambda_c
                self.fwhm[j] = filt.fwhm

                print('Filter {0}' \
                      '(Central WL = {1:.1f}):'.format(j+1,
                                                       filt.lambda_c))

                # Find SED wavelength entries within filter range
                wff = np.logical_and(filt.wave[0] < self.wave,
                                     self.wave < filt.wave[-1])
                wft = self.wave[wff]

                # Interpolate to find throughput values at new wavelength points
                tpt = griddata(filt.wave, filt.response, wft)

                # Join arrays and sort w.r.t to wf
                # Also replace units stripped by concatenate
                wf = np.array(np.concatenate((filt.wave, wft))) * u.AA
                tp = np.concatenate((filt.response, tpt))

                order = np.argsort(wf)
                wf = wf[order]
                tp = tp[order]

                if verbose:
                    niters = len(self.redshifts) * len(SED.tg)
                    bar = ProgressBar(niters)

                for i, z in enumerate(self.redshifts):
                    self.lyman_abs = np.ones(len(self.wave))
                    if madau:
                        self.lyman_abs = np.clip(tau_madau(self.wave, z), 0.,
                                                 1.)

                    for a, age in enumerate(SED.tg):
                        if np.logical_or(age < cosmo.age(z), force_age):
                            fluxes = self.calcflux(SED.SED[:, a], wf, tp, z,
                                                   self.dl[i], units)
                            self.fluxes[i, j, :, a] = fluxes.to(units)
                        else:
                            # Set fluxes for ages older
                            # than universe to zero
                            self.fluxes[i, j, :, a] = 0.
                        if verbose:
                            assert isinstance(bar, object)
                            bar.update()
                print('\n')

            for i, z in enumerate(self.redshifts):
                for j, filt in enumerate(self.F.filters):
                    self.AB[i, j] = (-2.5 * np.log10(
                        (self.fluxes[i, j] * units).to(u.Jy) /
                        (3631 * u.Jy))).value
                    zeros = (self.fluxes[i, j] == 0.)
                    self.AB[i, j][zeros] = np.inf

            # Set up hdf5 dimensions based on SED ranges for convenient
            # slicing if not used for fitting.
            f.create_dataset('z', data=self.redshifts)
            f.create_dataset('ages', data=SED.tg.value)
            f['ages'].attrs['unit'] = SED.tg.unit.to_string()

            f.create_dataset('dust', data=SED.tauv)
            f.create_dataset('metallicities', data=SED.mi)
            f.create_dataset('fesc', data=SED.fesc)

            try:
                s = SED.taus.shape
                taus = f.create_dataset('sfh', data=SED.tau)
                taus.attrs['unit'] = SED.tau.unit.to_string()
            except:
                # For multi-parameter SFHs, use indices for scale
                taus = f.create_dataset('sfh', data=np.arange(len(SED.tau)))

            for dataset in ['fluxes', 'mags']:
                f[dataset].dims.create_scale(f['z'], 'z')
                f[dataset].dims.create_scale(f['sfh'], 'sfh')
                f[dataset].dims.create_scale(f['ages'], 'ages')
                f[dataset].dims.create_scale(f['dust'], 'dust')
                f[dataset].dims.create_scale(f['metallicities'], 'met')
                f[dataset].dims.create_scale(f['fesc'], 'fesc')

                f[dataset].dims[0].attach_scale(f['z'])
                f[dataset].dims[2].attach_scale(f['metallicities'])
                f[dataset].dims[3].attach_scale(f['ages'])
                f[dataset].dims[4].attach_scale(f['sfh'])
                f[dataset].dims[5].attach_scale(f['dust'])
                f[dataset].dims[6].attach_scale(f['fesc'])

            f.create_dataset('wl', data=self.wl)
            f.create_dataset('fwhm', data=self.fwhm)

            # Store all CSP attributes in case they are needed later
            for attribute in SED.__dict__.keys():
                if attribute == 'SED':
                    # SED excluded due to large size
                    continue
                try:
                    f.create_dataset(attribute, data=SED.__dict__[attribute])
                except:
                    # in cases of functions etc.
                    continue

        self.load(self.savepath)
Exemple #39
0
    def compute_bispectrum(self,
                           show_progress=True,
                           use_pyfftw=False,
                           threads=1,
                           nsamples=100,
                           seed=1000,
                           mean_subtract=False,
                           **pyfftw_kwargs):
        '''
        Do the computation.

        Parameters
        ----------
        show_progress : optional, bool
            Show progress bar while sampling the bispectrum.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        nsamples : int, optional
            Sets the number of samples to take at each vector
            magnitude.
        seed : int, optional
            Sets the seed for the distribution draws.
        mean_subtract : bool, optional
            Subtract the mean from the data before computing. This removes the
            "zero frequency" (i.e., constant) portion of the power, resulting
            in a loss of phase coherence along the k_1=k_2 line.
        pyfft_kwargs : Passed to
            `~turbustat.statistics.rfft_to_fft.rfft_to_fft`. See
            `here <https://hgomersall.github.io/pyFFTW/pyfftw/interfaces/interfaces.html#interfaces-additional-args>`_
            for a list of accepted kwargs.
        '''

        if mean_subtract:
            norm_data = self.data - self.data.mean()
        else:
            norm_data = self.data

        if use_pyfftw:
            if PYFFTW_FLAG:
                if pyfftw_kwargs.get('threads') is not None:
                    pyfftw_kwargs.pop('threads')

                fftarr = fft2(norm_data, threads=threads, **pyfftw_kwargs)
            else:
                warn("pyfftw not installed. Reverting to using numpy.")
                use_pyfftw = False

        if not use_pyfftw:
            fftarr = np.fft.fft2(norm_data)

        conjfft = np.conj(fftarr)

        bispec_shape = (int(self.shape[0] / 2.), int(self.shape[1] / 2.))

        self._bispectrum = np.zeros(bispec_shape, dtype=np.complex)
        self._bicoherence = np.zeros(bispec_shape, dtype=np.float)
        self._tracker = np.zeros(self.shape, dtype=np.int16)

        biconorm = np.ones_like(self.bispectrum, dtype=float)

        if show_progress:
            bar = ProgressBar(np.prod(fftarr.shape) / 4.)

        prod = product(range(int(fftarr.shape[0] / 2.)),
                       range(int(fftarr.shape[1] / 2.)))

        with NumpyRNGContext(seed):
            for n, (k1mag, k2mag) in enumerate(prod):
                phi1 = ra.uniform(0, 2 * np.pi, nsamples)
                phi2 = ra.uniform(0, 2 * np.pi, nsamples)

                k1x = np.asarray(
                    [int(k1mag * np.cos(angle)) for angle in phi1])
                k2x = np.asarray(
                    [int(k2mag * np.cos(angle)) for angle in phi2])
                k1y = np.asarray(
                    [int(k1mag * np.sin(angle)) for angle in phi1])
                k2y = np.asarray(
                    [int(k2mag * np.sin(angle)) for angle in phi2])

                k3x = np.asarray([
                    int(k1mag * np.cos(ang1) + k2mag * np.cos(ang2))
                    for ang1, ang2 in zip(phi1, phi2)
                ])
                k3y = np.asarray([
                    int(k1mag * np.sin(ang1) + k2mag * np.sin(ang2))
                    for ang1, ang2 in zip(phi1, phi2)
                ])

                samps = fftarr[k1x, k1y] * fftarr[k2x, k2y] * conjfft[k3x, k3y]

                self._bispectrum[k1mag, k2mag] = np.sum(samps)

                biconorm[k1mag, k2mag] = np.sum(np.abs(samps))

                # Track where we're sampling from in fourier space
                self._tracker[k1x, k1y] += 1
                self._tracker[k2x, k2y] += 1
                self._tracker[k3x, k3y] += 1

                if show_progress:
                    bar.update(n + 1)

        self._bicoherence = (np.abs(self.bispectrum) / biconorm)
        self._bispectrum_amp = np.log10(np.abs(self.bispectrum))
Exemple #40
0
def convolve_model_dir_monochromatic(model_dir, overwrite=False, max_ram=8,
                                     wav_min=-np.inf * u.micron, wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError("monochromatic filters are no longer used for new-style model directories")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(n_wav, int(np.floor(max_ram * 1024. ** 3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [ConvolvedFluxes(model_names=np.zeros(n_models, dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(chunk_size)]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters
Exemple #41
0
    def compute_deltavar(self, allow_huge=False, boundary='wrap',
                         min_weight_frac=0.01, nan_treatment='fill',
                         preserve_nan=False,
                         use_pyfftw=False, threads=1,
                         pyfftw_kwargs={},
                         show_progress=True,
                         keep_convolve_arrays=False):
        '''
        Perform the convolution and calculate the delta variance at all lags.

        Parameters
        ----------
        allow_huge : bool, optional
            Passed to `~astropy.convolve.convolve_fft`. Allows operations on
            images larger than 1 Gb.
        boundary : {"wrap", "fill"}, optional
            Use "wrap" for periodic boundaries, and "fill" for non-periodic.
        min_weight_frac : float, optional
            Set the fraction of the peak of the weight array to mask below.
            Default is 0.01. This will remove most edge artifacts, but is
            not guaranteed to! Increase this value if artifacts are
            encountered (this typically results in large spikes in the
            delta-variance curve).
        nan_treatment : bool, optional
            Enable to interpolate over NaNs in the convolution. Default is
            True.
        use_pyfftw : bool, optional
            Enable to use pyfftw, if it is installed.
        threads : int, optional
            Number of threads to use in FFT when using pyfftw.
        pyfftw_kwargs : Passed to
            See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_
            for a list of accepted kwargs.
        show_progress : bool, optional
            Show a progress bar while convolving the image at each lag.
        keep_convolve_arrays : bool, optional
            Keeps the convolved arrays at each lag. Disabled by default to
            minimize memory usage.

        '''

        self._delta_var = np.empty((len(self.lags)))
        self._delta_var_error = np.empty((len(self.lags)))

        if show_progress:
            bar = ProgressBar(len(self.lags))

        for i, lag in enumerate(self.lags.value):
            core = core_kernel(lag, self.data.shape[0], self.data.shape[1])
            annulus = annulus_kernel(lag, self.diam_ratio, self.data.shape[0],
                                     self.data.shape[1])

            if boundary == "wrap":
                # Don't pad for periodic boundaries
                pad_weights = self.weights
                pad_img = self.data * self.weights
            elif boundary == "fill":
                # Extend to avoid boundary effects from non-periodicity
                pad_weights = np.pad(self.weights, int(lag), padwithzeros)
                pad_img = np.pad(self.data, int(lag), padwithzeros) * \
                    pad_weights
            else:
                raise ValueError("boundary must be 'wrap' or 'fill'. "
                                 "Given {}".format(boundary))

            img_core = \
                convolution_wrapper(pad_img, core, boundary=boundary,
                                    fill_value=np.NaN,
                                    allow_huge=allow_huge,
                                    nan_treatment=nan_treatment,
                                    use_pyfftw=use_pyfftw,
                                    threads=threads,
                                    pyfftw_kwargs=pyfftw_kwargs)
            img_annulus = \
                convolution_wrapper(pad_img, annulus,
                                    boundary=boundary, fill_value=np.NaN,
                                    allow_huge=allow_huge,
                                    nan_treatment=nan_treatment,
                                    use_pyfftw=use_pyfftw,
                                    threads=threads,
                                    pyfftw_kwargs=pyfftw_kwargs)
            weights_core = \
                convolution_wrapper(pad_weights, core,
                                    boundary=boundary, fill_value=np.NaN,
                                    allow_huge=allow_huge,
                                    nan_treatment=nan_treatment,
                                    use_pyfftw=use_pyfftw,
                                    threads=threads,
                                    pyfftw_kwargs=pyfftw_kwargs)
            weights_annulus = \
                convolution_wrapper(pad_weights, annulus,
                                    boundary=boundary, fill_value=np.NaN,
                                    allow_huge=allow_huge,
                                    nan_treatment=nan_treatment,
                                    use_pyfftw=use_pyfftw,
                                    threads=threads,
                                    pyfftw_kwargs=pyfftw_kwargs)

            cutoff_val = min_weight_frac * self.weights.max()
            weights_core[np.where(weights_core <= cutoff_val)] = np.NaN
            weights_annulus[np.where(weights_annulus <= cutoff_val)] = np.NaN

            conv_arr = (img_core / weights_core) - \
                (img_annulus / weights_annulus)
            conv_weight = weights_core * weights_annulus

            if keep_convolve_arrays:
                self._convolved_arrays.append(conv_arr)
                self._convolved_weights.append(weights_core * weights_annulus)

            val, err = _delvar(conv_arr, conv_weight, lag)

            if (val <= 0) or (err <= 0) or np.isnan(val) or np.isnan(err):
                self._delta_var[i] = np.NaN
                self._delta_var_error[i] = np.NaN
            else:
                self._delta_var[i] = val
                self._delta_var_error[i] = err

            if show_progress:
                bar.update(i + 1)
def measure_dendrogram_properties(dend=None, cube303=cube303,
                                  cube321=cube321, cube13co=cube13co,
                                  cube18co=cube18co, noise_cube=noise_cube,
                                  sncube=sncube,
                                  suffix="",
                                  last_index=None,
                                  plot_some=True,
                                  line='303',
                                  write=True):

    assert (cube321.shape == cube303.shape == noise_cube.shape ==
            cube13co.shape == cube18co.shape == sncube.shape)
    assert sncube.wcs is cube303.wcs is sncube.mask._wcs

    metadata = {}
    metadata['data_unit'] = u.K
    metadata['spatial_scale'] =  7.2 * u.arcsec
    metadata['beam_major'] =  30 * u.arcsec
    metadata['beam_minor'] =  30 * u.arcsec
    metadata['wavelength'] =  218.22219*u.GHz
    metadata['velocity_scale'] = u.km/u.s
    metadata['wcs'] = cube303.wcs

    keys = [
            'density_chi2',
            'expected_density',
            'dmin1sig_chi2',
            'dmax1sig_chi2',
            'column_chi2',
            'expected_column',
            'cmin1sig_chi2',
            'cmax1sig_chi2',
            'temperature_chi2',
            'expected_temperature',
            'tmin1sig_chi2',
            'tmax1sig_chi2',
            'eratio321303',
            'ratio321303',
            'logh2column',
            'elogh2column',
            'logabundance',
            'elogabundance',
           ]
    obs_keys = [
            'Stot303',
            'Smin303',
            'Smax303',
            'Stot321',
            'Smean303',
            'Smean321',
            'npix',
            'e303',
            'e321',
            'r321303',
            'er321303',
            '13cosum',
            'c18osum',
            '13comean',
            'c18omean',
            's_ntotal',
            'index',
            'is_leaf',
            'parent',
            'root',
            'lon',
            'lat',
            'vcen',
            'higaldusttem',
            'reff',
            'dustmass',
            'dustmindens',
            'bad',
            #'tkin_turb',
    ]
    columns = {k:[] for k in (keys+obs_keys)}

    log.debug("Initializing dendrogram temperature fitting loop")

    # FORCE wcs to match
    # (technically should reproject here)
    cube13co._wcs = cube18co._wcs = cube303.wcs
    cube13co.mask._wcs = cube18co.mask._wcs = cube303.wcs

    if line == '303':
        maincube = cube303
    elif line == '321':
        maincube = cube321
    else:
        raise ValueError("Unrecognized line: {0}".format(line))

    # Prepare an array to hold the fitted temperatures
    tcubedata = np.empty(maincube.shape, dtype='float32')
    tcubedata[:] = np.nan
    tcubeleafdata = np.empty(maincube.shape, dtype='float32')
    tcubeleafdata[:] = np.nan


    nbad = 0

    catalog = ppv_catalog(dend, metadata)
    pb = ProgressBar(len(catalog))
    for ii,row in enumerate(catalog):
        structure = dend[row['_idx']]
        assert structure.idx == row['_idx'] == ii
        dend_obj_mask = BooleanArrayMask(structure.get_mask(), wcs=cube303.wcs)
        dend_inds = structure.indices()

        view = (slice(dend_inds[0].min(), dend_inds[0].max()+1),
                slice(dend_inds[1].min(), dend_inds[1].max()+1),
                slice(dend_inds[2].min(), dend_inds[2].max()+1),)
        #view2 = cube303.subcube_slices_from_mask(dend_obj_mask)
        submask = dend_obj_mask[view]
        #assert np.count_nonzero(submask.include()) == np.count_nonzero(dend_obj_mask.include())

        sn = sncube[view].with_mask(submask)
        sntot = sn.sum().value
        #np.testing.assert_almost_equal(sntot, structure.values().sum(), decimal=0)

        c303 = cube303[view].with_mask(submask)
        c321 = cube321[view].with_mask(submask)
        co13sum = cube13co[view].with_mask(submask).sum().value
        co18sum = cube18co[view].with_mask(submask).sum().value
        if hasattr(co13sum,'__len__'):
            raise TypeError(".sum() applied to an array has yielded a non scalar.")

        npix = submask.include().sum()
        assert npix == structure.get_npix()
        Stot303 = c303.sum().value
        if np.isnan(Stot303):
            raise ValueError("NaN in cube.  This can't happen: the data from "
                             "which the dendrogram was derived can't have "
                             "NaN pixels.")
        Smax303 = c303.max().value
        Smin303 = c303.min().value

        Stot321 = c321.sum().value
        if npix == 0:
            raise ValueError("npix=0. This is impossible.")
        Smean303 = Stot303/npix
        if Stot303 <= 0 and line=='303':
            raise ValueError("The 303 flux is <=0.  This isn't possible because "
                             "the dendrogram was derived from the 303 data with a "
                             "non-zero threshold.")
        elif Stot303 <= 0 and line=='321':
            Stot303 = 0
            Smean303 = 0
        elif Stot321 <= 0 and line=='321':
            raise ValueError("The 321 flux is <=0.  This isn't possible because "
                             "the dendrogram was derived from the 321 data with a "
                             "non-zero threshold.")
        if np.isnan(Stot321):
            raise ValueError("NaN in 321 line")
        Smean321 = Stot321/npix

        #error = (noise_cube[view][submask.include()]).sum() / submask.include().sum()**0.5
        var = ((noise_cube[dend_obj_mask.include()]**2).sum() / npix**2)
        error = var**0.5
        if np.isnan(error):
            raise ValueError("error is nan: this is impossible by definition.")

        if line == '321' and Stot303 == 0:
            r321303 = np.nan
            er321303 = np.nan
        elif Stot321 < 0:
            r321303 = error / Smean303
            er321303 = (r321303**2 * (var/Smean303**2 + 1))**0.5
        else:
            r321303 = Stot321 / Stot303
            er321303 = (r321303**2 * (var/Smean303**2 + var/Smean321**2))**0.5

        for c in columns:
            assert len(columns[c]) == ii

        columns['index'].append(row['_idx'])
        columns['s_ntotal'].append(sntot)
        columns['Stot303'].append(Stot303)
        columns['Smax303'].append(Smax303)
        columns['Smin303'].append(Smin303)
        columns['Stot321'].append(Stot321)
        columns['Smean303'].append(Smean303)
        columns['Smean321'].append(Smean321)
        columns['npix'].append(npix)
        columns['e303'].append(error)
        columns['e321'].append(error)
        columns['r321303'].append(r321303)
        columns['er321303'].append(er321303)
        columns['13cosum'].append(co13sum)
        columns['c18osum'].append(co18sum)
        columns['13comean'].append(co13sum/npix)
        columns['c18omean'].append(co18sum/npix)
        columns['is_leaf'].append(structure.is_leaf)
        columns['parent'].append(structure.parent.idx if structure.parent else -1)
        columns['root'].append(get_root(structure).idx)
        s_main = maincube._data[dend_inds]
        x,y,z = maincube.world[dend_inds]
        lon = ((z.value-(360*(z.value>180)))*s_main).sum()/s_main.sum()
        lat = (y*s_main).sum()/s_main.sum()
        vel = (x*s_main).sum()/s_main.sum()
        columns['lon'].append(lon)
        columns['lat'].append(lat.value)
        columns['vcen'].append(vel.value)

        mask2d = dend_obj_mask.include().max(axis=0)[view[1:]]
        logh2column = np.log10(np.nanmean(column_regridded.data[view[1:]][mask2d]) * 1e22)
        if np.isnan(logh2column):
            log.info("Source #{0} has NaNs".format(ii))
            logh2column = 24
        elogh2column = elogabundance
        columns['higaldusttem'].append(np.nanmean(dusttem_regridded.data[view[1:]][mask2d]))

        r_arcsec = row['radius']*u.arcsec
        reff = (r_arcsec*(8.5*u.kpc)).to(u.pc, u.dimensionless_angles())
        mass = ((10**logh2column*u.cm**-2)*np.pi*reff**2*2.8*constants.m_p).to(u.M_sun)
        density = (mass/(4/3.*np.pi*reff**3)/constants.m_p/2.8).to(u.cm**-3)

        columns['reff'].append(reff.value)
        columns['dustmass'].append(mass.value)
        columns['dustmindens'].append(density.value)
        mindens = np.log10(density.value)
        if mindens < 3:
            mindens = 3

        if (r321303 < 0 or np.isnan(r321303)) and line != '321':
            raise ValueError("Ratio <0: This can't happen any more because "
                             "if either num/denom is <0, an exception is "
                             "raised earlier")
            #for k in columns:
            #    if k not in obs_keys:
            #        columns[k].append(np.nan)
        elif (r321303 < 0 or np.isnan(r321303)) and line == '321':
            for k in keys:
                columns[k].append(np.nan)
        else:
            # Replace negatives for fitting
            if Smean321 <= 0:
                Smean321 = error
            mf.set_constraints(ratio321303=r321303, eratio321303=er321303,
                               #ratio321322=ratio2, eratio321322=eratio2,
                               logh2column=logh2column, elogh2column=elogh2column,
                               logabundance=logabundance, elogabundance=elogabundance,
                               taline303=Smean303, etaline303=error,
                               taline321=Smean321, etaline321=error,
                               mindens=mindens,
                               linewidth=10)
            row_data = mf.get_parconstraints()
            row_data['ratio321303'] = r321303
            row_data['eratio321303'] = er321303

            for k in row_data:
                columns[k].append(row_data[k])

            # Exclude bad velocities from cubes
            if row['v_cen'] < -80e3 or row['v_cen'] > 180e3:
                # Skip: there is no real structure down here
                nbad += 1
                is_bad = True
            else:
                is_bad = False
                tcubedata[dend_obj_mask.include()] = row_data['expected_temperature']
                if structure.is_leaf:
                    tcubeleafdata[dend_obj_mask.include()] = row_data['expected_temperature']

            columns['bad'].append(is_bad)

            width = row['v_rms']*u.km/u.s
            lengthscale = reff

            #REMOVED in favor of despotic version done in dendrograms.py
            # we use the analytic version here; the despotic version is
            # computed elsewhere (with appropriate gcor factors)
            #columns['tkin_turb'].append(heating.tkin_all(10**row_data['density_chi2']*u.cm**-3,
            #                                             width,
            #                                             lengthscale,
            #                                             width/lengthscale,
            #                                             columns['higaldusttem'][-1]*u.K,
            #                                             crir=0./u.s))

        if len(set(len(c) for k,c in columns.items())) != 1:
            print("Columns are different lengths.  This is not allowed.")
            import ipdb; ipdb.set_trace()

        for c in columns:
            assert len(columns[c]) == ii+1

        if plot_some and not is_bad and (ii-nbad % 100 == 0 or ii-nbad < 50):
            try:
                log.info("T: [{tmin1sig_chi2:7.2f},{expected_temperature:7.2f},{tmax1sig_chi2:7.2f}]"
                         "  R={ratio321303:8.4f}+/-{eratio321303:8.4f}"
                         "  Smean303={Smean303:8.4f} +/- {e303:8.4f}"
                         "  Stot303={Stot303:8.2e}  npix={npix:6d}"
                         .format(Smean303=Smean303, Stot303=Stot303,
                                 npix=npix, e303=error, **row_data))

                pl.figure(1)
                pl.clf()
                mf.denstemplot()
                pl.savefig(fpath("dendrotem/diagnostics/{0}_{1}.png".format(suffix,ii)))
                pl.figure(2).clf()
                mf.parplot1d_all(levels=[0.68268949213708585])
                pl.savefig(fpath("dendrotem/diagnostics/1dplot{0}_{1}.png".format(suffix,ii)))
                pl.draw()
                pl.show()
            except Exception as ex:
                print ex
                pass
        else:
            pb.update(ii+1)

        if last_index is not None and ii >= last_index:
            break

    if last_index is not None:
        catalog = catalog[:last_index+1]

    for k in columns:
        if k not in catalog.keys():
            catalog.add_column(table.Column(name=k, data=columns[k]))

    for mid,lo,hi,letter in (('expected_temperature','tmin1sig_chi2','tmax1sig_chi2','t'),
                             ('expected_density','dmin1sig_chi2','dmax1sig_chi2','d'),
                             ('expected_column','cmin1sig_chi2','cmax1sig_chi2','c')):
        catalog.add_column(table.Column(name='elo_'+letter,
                                        data=catalog[mid]-catalog[lo]))
        catalog.add_column(table.Column(name='ehi_'+letter,
                                        data=catalog[hi]-catalog[mid]))

    if write:
        catalog.write(tpath('PPV_H2CO_Temperature{0}.ipac'.format(suffix)), format='ascii.ipac')

    # Note that there are overlaps in the catalog, which means that ORDER MATTERS
    # in the above loop.  I haven't yet checked whether large scale overwrites
    # small or vice-versa; it may be that both views of the data are interesting.
    tcube = SpectralCube(data=tcubedata, wcs=cube303.wcs,
                         mask=cube303.mask, meta={'unit':'K'},
                         header=cube303.header,
                        )
    tcubeleaf = SpectralCube(data=tcubeleafdata, wcs=cube303.wcs,
                         mask=cube303.mask, meta={'unit':'K'},
                         header=cube303.header,
                        )

    if write:
        log.info("Writing TemperatureCube")
        outpath = 'TemperatureCube_DendrogramObjects{0}.fits'
        tcube.write(hpath(outpath.format(suffix)),
                    overwrite=True)

        outpath_leaf = 'TemperatureCube_DendrogramObjects{0}_leaves.fits'
        tcubeleaf.write(hpath(outpath_leaf.format(suffix)),
                    overwrite=True)


    return catalog, tcube
Exemple #43
0
    def quick_render_movie(self, outdir, size=256, nframes=30,
                           camera_angle=(0,0,1), north_vector=(0,0,1),
                           rot_vector=(1,0,0),
                           colormap='doom',
                           cmap_range='auto',
                           transfer_function='auto',
                           start_index=0,
                           image_prefix="",
                           output_filename='out.mp4',
                           log_scale=False,
                           rescale=True):
        """
        Create a movie rotating the cube 360 degrees from
        PP -> PV -> PP -> PV -> PP

        Parameters
        ----------
        outdir: str
            The output directory in which the individual image frames and the
            resulting output mp4 file should be stored
        size: int
            The size of the individual output frame in pixels (i.e., size=256
            will result in a 256x256 image)
        nframes: int
            The number of frames in the resulting movie
        camera_angle: 3-tuple
            The initial angle of the camera
        north_vector: 3-tuple
            The vector of 'north' in the data cube.  Default is coincident with
            the spectral axis
        rot_vector: 3-tuple
            The vector around which the camera will be rotated
        colormap: str
            A valid colormap.  See `yt.show_colormaps`
        transfer_function: 'auto' or `yt.visualization.volume_rendering.TransferFunction`
            Either 'auto' to use the colormap specified, or a valid
            TransferFunction instance
        log_scale: bool
            Should the colormap be log scaled?
        rescale: bool
            If True, the images will be rescaled to have a common 95th
            percentile brightness, which can help reduce flickering from having
            a single bright pixel in some projections
        start_index : int
            The number of the first image to save
        image_prefix : str
            A string to prepend to the image name for each image that is output
        output_filename : str
            The movie file name to output.  The suffix may affect the file type
            created.  Defaults to 'out.mp4'.  Will be placed in ``outdir``

        Returns
        -------


        """
        if not ytOK:
            raise IOError("yt could not be imported.  Cube renderings are not possible.")

        scale = np.max(self.cube.shape)

        if not os.path.exists(outdir):
            os.makedirs(outdir)
        elif not os.path.isdir(outdir):
            raise OSError("Output directory {0} exists and is not a directory.".format(outdir))

        if cmap_range == 'auto':
            upper = self.cube.max().value
            lower = self.cube.std().value * 3
            cmap_range = [lower,upper]

        if transfer_function == 'auto':
            tfh = self.auto_transfer_function(cmap_range, log=log_scale)
            tfh.tf.map_to_colormap(cmap_range[0], cmap_range[1], colormap=colormap)
            tf = tfh.tf
        else:
            tf = transfer_function

        center = self.dataset.domain_center
        cam = self.dataset.h.camera(center, camera_angle, scale, size, tf,
                                    north_vector=north_vector, fields='flux')

        im  = cam.snapshot()
        images = [im]

        pb = ProgressBar(nframes)
        for ii,im in enumerate(cam.rotation(2 * np.pi, nframes,
                                            rot_vector=rot_vector)):
            images.append(im)
            im.write_png(os.path.join(outdir,"%s%04i.png" % (image_prefix,
                                                             ii+start_index)),
                         rescale=False)
            pb.update(ii+1)
        log.info("Rendering complete in {0}s".format(time.time() - pb._start_time))

        if rescale:
            _rescale_images(images, os.path.join(outdir, image_prefix))

        pipe = _make_movie(outdir, prefix=image_prefix,
                           filename=output_filename)
        
        return images
Exemple #44
0
def render_13co(
    ytcube=yt13co,
    outdir='yt_renders_13CO',
    size=512,
    scale=1200.,
    nframes=180,
    movie=True,
    camera_angle=[0, 0, 1],
    north_vector=[1, 0, 0],
    rot_vector1=[1, 0, 0],
    rot_vector2=[0.5, 0.5, 0.0],
    rot_vector3=[0.0, 0.5, 0.5],
):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf = yt.ColorTransferFunction([0, 30], grey_opacity=True)
    #tf.map_to_colormap(0.1,5,colormap='Reds')
    tf.add_gaussian(2, 1, [1.0, 0.8, 0.0, 1.0])
    tf.add_gaussian(3, 2, [1.0, 0.5, 0.0, 1.0])
    tf.add_gaussian(5, 3, [1.0, 0.0, 0.0, 1.0])
    tf.add_gaussian(10, 5, [1.0, 0.0, 0.0, 0.5])
    tf.map_to_colormap(10, 30, colormap=red, scale=1)

    center = ytcube.dataset.domain_dimensions / 2.
    cam = ytcube.dataset.h.camera(center,
                                  camera_angle,
                                  scale,
                                  size,
                                  tf,
                                  north_vector=north_vector,
                                  fields='flux')

    im = cam.snapshot()

    #images = [im]

    if movie:
        pb = ProgressBar(nframes * 2 + 30)
        for ii, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir, "%04i.png" % (ii))),
                         rescale=False)
            pb.update(ii)
        for jj, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir, "%04i.png" % (ii + jj))),
                         rescale=False)
            pb.update(ii + jj)
        for kk, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir, "%04i.png" % (ii + jj + kk))),
                         rescale=False)
            pb.update(ii + jj + kk)

        TheBrick = ytcube.world2yt([0.253, 0.016, 35])
        for LL, snapshot in enumerate(cam.move_to(TheBrick, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(
                os.path.join(outdir, '%04i.png' % (ii + jj + kk + LL))),
                               rescale=False)
            pb.update(ii + jj + kk + LL)
        for mm, snapshot in enumerate(cam.zoomin(5, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(
                os.path.join(outdir, '%04i.png' % (ii + jj + kk + LL + mm))),
                               rescale=False)
            pb.update(ii + jj + kk + LL + mm)
        for nn, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir,
                             "%04i.png" % (ii + jj + kk + LL + mm + nn))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn)
        for oo, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(outdir,
                             "%04i.png" % (ii + jj + kk + LL + mm + nn + oo))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn + oo)
        for pp, im in enumerate(
                cam.rotation(2 * np.pi, nframes / 3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(
                os.path.join(
                    outdir,
                    "%04i.png" % (ii + jj + kk + LL + mm + nn + oo + pp))),
                         rescale=False)
            pb.update(ii + jj + kk + LL + mm + nn + oo + pp)

        #save_images(images, paths.mpath(outdir))

        pipe = make_movie(paths.mpath(outdir))

        return images
    else:
        return im
            vel_slopes.append(vel_slope.slope)

            dens_spec = threeD_pspec(density.value)
            dens_slope = linregress(np.log10(dens_spec[0][:-1]),
                                    np.log10(dens_spec[1][:-1]))
            dens_slopes.append(dens_slope.slope)

            cube_hdu = make_ppv(velocity, density, vel_disp=np.std(velocity),
                                T=T, threads=4, verbose=True,
                                chan_width=dv_eff / 2.,
                                v_min=-60 * u.km / u.s, v_max=60 * u.km / u.s,
                                max_chan=2000)

            filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}.fits"\
                .format(np.abs(dens), np.abs(vel), i, cube_size)

            cube_hdu.writeto(osjoin(out_dir, filename), overwrite=True)

            bar.update()


    filename = "fBM_3D_velocity_slopes_size_{0}.npy"\
        .format(cube_size)

    np.save(osjoin(out_dir, filename), np.array(vel_slopes))

    filename = "fBM_3D_density_slopes_size_{0}.npy"\
        .format(cube_size)

    np.save(osjoin(out_dir, filename), np.array(dens_slopes))
Exemple #46
0
def match(*args, verbose=True):
    """
    Find sources that match up between any number of dendrocat objects. 
    
    Parameters
    ----------
    obj1 : rsprocess.RadioSource object or rsprocess.MasterCatalog object
        A catalog with which to compare radio sources.
    obj2 : rsprocess.RadioSource object or rsprocess.MasterCatalog object
        A catalog with which to compare radio sources.
        
    Returns
    ----------
    astropy.table.Table object
    """

    from .mastercatalog import MasterCatalog

    current_arg = args[0]
    for k in range(len(args) - 1):
        obj1 = current_arg
        obj2 = args[k + 1]

        all_colnames = set(obj1.catalog.colnames + obj2.catalog.colnames)
        stack = vstack([obj1.catalog, obj2.catalog])

        all_colnames.add('_index')
        try:
            stack.add_column(Column(range(len(stack)), name='_index'))
        except ValueError:
            stack['_index'] = range(len(stack))
        stack = stack[sorted(list(all_colnames))]

        rejected = np.where(stack['rejected'] == 1)[0]

        if verbose:
            print('Combining matches')
            pb = ProgressBar(len(stack) - len(rejected))

        i = 0
        while True:

            if i == len(stack) - 1:
                break

            if i in rejected:
                i += 1
                continue

            teststar = stack[i]
            delta_p = deepcopy(stack[stack['rejected'] == 0]['_idx', '_index',
                                                             'x_cen', 'y_cen'])
            delta_p.remove_rows(
                np.where(delta_p['_index'] == teststar['_index'])[0])
            delta_p['x_cen'] = np.abs(delta_p['x_cen'] - teststar['x_cen'])
            delta_p['y_cen'] = np.abs(delta_p['y_cen'] - teststar['y_cen'])
            delta_p.sort('x_cen')

            threshold = 1e-5
            found_match = False

            dist_col = MaskedColumn(length=len(delta_p),
                                    name='dist',
                                    mask=True)

            for j in range(10):
                dist_col[j] = np.sqrt(delta_p[j]['x_cen']**2. +
                                      delta_p[j]['y_cen']**2)
                if dist_col[j] <= threshold:
                    found_match = True

            delta_p.add_column(dist_col)
            delta_p.sort('dist')

            if found_match:
                match_index = np.where(stack['_index'] == delta_p[0]['_index'])
                match = deepcopy(stack[match_index])
                stack.remove_row(match_index[0][0])

                # Find the common bounding ellipse
                new_x_cen = np.average([match['x_cen'], teststar['x_cen']])
                new_y_cen = np.average([match['y_cen'], teststar['y_cen']])

                # Find new ellipse properties
                new_maj, new_min, new_pa = commonbeam(
                    float(match['major_fwhm']), float(match['minor_fwhm']),
                    float(match['position_angle']),
                    float(teststar['major_fwhm']),
                    float(teststar['minor_fwhm']),
                    float(teststar['position_angle']))

                # Replace properties of test star
                stack[i]['x_cen'] = new_x_cen
                stack[i]['y_cen'] = new_y_cen
                stack[i]['major_fwhm'] = new_maj.value
                stack[i]['minor_fwhm'] = new_min.value
                stack[i]['position_angle'] = new_pa.value

                # Replace masked data with available values from the match
                for k, masked in enumerate(stack.mask[i]):
                    colname = stack.colnames[k]
                    if masked:
                        stack[i][colname] = match[colname]
            i += 1
            if verbose:
                pb.update()

        # Fill masked detection column fields with 'False'
        for colname in stack.colnames:
            if 'detected' in colname:
                stack[colname].fill_value = 0

        stack['_index'] = range(len(stack))
        current_arg = MasterCatalog(obj1, obj2, catalog=stack)
    return current_arg
Exemple #47
0
def pz_to_catalog(pz, zgrid, catalog, verbose=True):

    output = Table()
    pri_peakz = np.zeros_like(catalog['z_spec'])
    pri_upper = np.zeros_like(catalog['z_spec'])
    pri_lower = np.zeros_like(catalog['z_spec'])
    pri_area = np.zeros_like(catalog['z_spec'])

    pri_peakz.name = 'z1_median'
    pri_upper.name = 'z1_max'
    pri_lower.name = 'z1_min'
    pri_area.name = 'z1_area'

    pri_peakz.format = '%.4f'
    pri_upper.format = '%.4f'
    pri_lower.format = '%.4f'
    pri_area.format = '%.3f'

    sec_peakz = np.zeros_like(catalog['z_spec'])
    sec_upper = np.zeros_like(catalog['z_spec'])
    sec_lower = np.zeros_like(catalog['z_spec'])
    sec_area = np.zeros_like(catalog['z_spec'])

    sec_peakz.name = 'z2_median'
    sec_upper.name = 'z2_max'
    sec_lower.name = 'z2_min'
    sec_area.name = 'z2_area'

    sec_peakz.format = '%.4f'
    sec_upper.format = '%.4f'
    sec_lower.format = '%.4f'
    sec_area.format = '%.3f'

    if verbose:
        bar = ProgressBar(len(pz))

    for i, pzi in enumerate(pz):
        peaks, l80s, u80s, areas = get_peak_z(pzi, zgrid)
        peaks = np.array(peaks, ndmin=1)
        l80s = np.array(l80s, ndmin=1)
        u80s = np.array(u80s, ndmin=1)
        areas = np.array(areas, ndmin=1)

        if np.isnan(peaks[0]):
            pri_peakz[i] = -99.
        else:
            pri_peakz[i] = peaks[0]

        pri_upper[i] = u80s[0]
        pri_lower[i] = l80s[0]
        pri_area[i] = areas[0]

        if len(peaks) > 1:
            sec_peakz[i] = peaks[1]
            sec_upper[i] = u80s[1]
            sec_lower[i] = l80s[1]
            sec_area[i] = areas[1]
        else:
            sec_peakz[i] = -99.
            sec_upper[i] = -99.
            sec_lower[i] = -99.
            sec_area[i] = -99.

        if verbose:
            bar.update()

    output.add_column(catalog['id'])
    output.add_column(pri_peakz)
    output.add_column(pri_lower)
    output.add_column(pri_upper)
    output.add_column(pri_area)

    output.add_column(sec_peakz)
    output.add_column(sec_lower)
    output.add_column(sec_upper)
    output.add_column(sec_area)
    return output
def fourier_combine_cubes(cube1, cube2, highresextnum=0,
                          highresscalefactor=1.0,
                          lowresscalefactor=1.0, lowresfwhm=1*u.arcmin,
                          return_regridded_cube2=False,
                          return_hdu=False,
                         ):
    """
    Fourier combine two data cubes

    Parameters
    ----------
    cube1 : SpectralCube
    highresfitsfile : str
        The high-resolution FITS file
    cube2 : SpectralCube
    lowresfitsfile : str
        The low-resolution (single-dish) FITS file
    highresextnum : int
        The extension number to use from the high-res FITS file
    highresscalefactor : float
    lowresscalefactor : float
        A factor to multiply the high- or low-resolution data by to match the
        low- or high-resolution data
    lowresfwhm : `astropy.units.Quantity`
        The full-width-half-max of the single-dish (low-resolution) beam;
        or the scale at which you want to try to match the low/high resolution
        data
    return_hdu : bool
        Return an HDU instead of just a cube.  It will contain two image
        planes, one for the real and one for the imaginary data.
    return_regridded_cube2 : bool
        Return the 2nd cube regridded into the pixel space of the first?
    """
    if isinstance(cube1, str):
        cube1 = SpectralCube.read(cube1)
    if isinstance(cube2, str):
        cube2 = SpectralCube.read(cube2)
    #cube1 = spectral_cube.io.fits.load_fits_cube(highresfitsfile,
    #                                             hdu=highresextnum)
    im1 = cube1._data # want the raw data for this
    hd1 = cube1.header
    assert hd1['NAXIS'] == im1.ndim == 3
    w1 = cube1.wcs
    pixscale = np.abs(w1.wcs.get_cdelt()[0]) # REPLACE EVENTUALLY...

    cube2 = cube2.to(cube1.unit)

    assert cube1.unit == cube2.unit, 'Cubes must have same or equivalent unit'
    assert cube1.unit.is_equivalent(u.Jy/u.beam) or cube1.unit.is_equivalent(u.K), "Cubes must have brightness units."

    #f2 = regrid_fits_cube(lowresfitsfile, hd1)
    f2 = regrid_cube_hdu(cube2.hdu, hd1)
    w2 = wcs.WCS(f2.header)

    nax1,nax2,nax3 = (hd1['NAXIS1'],
                      hd1['NAXIS2'],
                      hd1['NAXIS3'])

    dcube1 = im1 * highresscalefactor
    dcube2 = f2.data * lowresscalefactor
    outcube = np.empty_like(dcube1)

    xgrid,ygrid = (np.indices([nax2,nax1])-np.array([(nax2-1.)/2,(nax1-1.)/2.])[:,None,None])
    fwhm = np.sqrt(8*np.log(2))
    # sigma in pixels
    sigma = ((lowresfwhm/fwhm/(pixscale*u.deg)).decompose().value)
    #sigma_fftspace = (1/(4*np.pi**2*sigma**2))**0.5
    sigma_fftspace = (2*np.pi*sigma)**-1
    log.debug('sigma = {0}, sigma_fftspace={1}'.format(sigma, sigma_fftspace))

    kernel = np.fft.fftshift(np.exp(-(xgrid**2+ygrid**2)/(2*sigma**2)))
    # convert the kernel, which is just a gaussian in image space,
    # to its corresponding kernel in fourier space
    kfft = np.abs(np.fft.fft2(kernel)) # should be mostly real
    # normalize the kernel
    kfft/=kfft.max()
    ikfft = 1-kfft

    pb = ProgressBar(dcube1.shape[0])

    for ii,(im1,im2) in enumerate(zip(dcube1, dcube2)):

        fft1 = np.fft.fft2(np.nan_to_num(im1))
        fft2 = np.fft.fft2(np.nan_to_num(im2))

        fftsum = kfft*fft2 + ikfft*fft1

        combo = np.fft.ifft2(fftsum)
        outcube[ii,:,:] = combo.real

        pb.update(ii+1)

    if return_regridded_cube2:
        return outcube, f2
    elif return_hdu:
        return fits.PrimaryHDU(data=outcube, header=w1.to_header())
    else:
        return outcube
Exemple #49
0
def _convolve_model_dir_1(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + "/convolved"):
        os.mkdir(model_dir + "/convolved")

    # Find all SED files to convolve
    sed_files = (
        glob.glob(model_dir + "/seds/*.fits.gz")
        + glob.glob(model_dir + "/seds/*/*.fits.gz")
        + glob.glob(model_dir + "/seds/*.fits")
        + glob.glob(model_dir + "/seds/*/*.fits")
    )

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(
            model_names=np.zeros(len(sed_files), dtype="U30" if six.PY3 else "S30"),
            apertures=apertures,
            initialize_arrays=True,
        )
        for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug("Convolving {0}".format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order="nu")

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info("Rebinning filters")
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table["MODEL_NAME"])
        fluxes[i].write(model_dir + "/convolved/" + f.name + ".fits", overwrite=overwrite)
Exemple #50
0
def add_data_to_cube(cubefilename, data=None, filename=None, fileheader=None,
                     flatheader='header.txt',
                     cubeheader='cubeheader.txt', nhits=None,
                     smoothto=1, baselineorder=5, velocityrange=None,
                     excludefitrange=None, noisecut=np.inf, do_runscript=False,
                     linefreq=None, allow_smooth=True,
                     data_iterator=data_iterator,
                     coord_iterator=coord_iterator,
                     velo_iterator=velo_iterator,
                     progressbar=False, coordsys='galactic',
                     datalength=None,
                     velocity_offset=0.0, negative_mean_cut=None,
                     add_with_kernel=False, kernel_fwhm=None, fsw=False,
                     kernel_function=Gaussian2DKernel,
                     diagnostic_plot_name=None, chmod=False,
                     continuum_prefix=None,
                     debug_breakpoint=False,
                     default_unit=u.km/u.s,
                     make_continuum=True,
                     weightspec=None,
                     varweight=False):
    """
    Given a .fits file that contains a binary table of spectra (e.g., as
    you would get from the GBT mapping "pipeline" or the reduce_map.pro aoidl
    file provided by Adam Ginsburg), adds each spectrum into the cubefile.

    velocity_offset : 0.0
        Amount to add to the velocity vector before adding it to the cube
        (useful for FSW observations)
    weightspec : np.ndarray
        A spectrum with the same size as the input arrays but containing the relative
        weights of the data
    """

    #if not default_unit.is_equivalent(u.km/u.s):
    #    raise TypeError("Default unit is not a velocity equivalent.")

    if type(nhits) is str:
        log.debug("Loading nhits from %s" % nhits)
        nhits = pyfits.getdata(nhits)
    elif type(nhits) is not np.ndarray:
        raise TypeError("nhits must be a .fits file or an ndarray, but it is ",type(nhits))
    naxis2,naxis1 = nhits.shape

    if velocity_offset and not fsw:
        raise ValueError("Using a velocity offset, but obs type is not "
                         "frequency switched; this is almost certainly wrong, "
                         "but if there's a case for it I'll remove this.")
    if not hasattr(velocity_offset,'unit'):
        velocity_offset = velocity_offset*default_unit


    contimage = np.zeros_like(nhits)
    nhits_once = np.zeros_like(nhits)

    log.debug("Loading data cube {0}".format(cubefilename))
    t0 = time.time()
    # rescale image to weight by number of observations
    image = pyfits.getdata(cubefilename)*nhits
    log.debug(" ".join(("nhits statistics: mean, std, nzeros, size",str(nhits.mean()),str(nhits.std()),str(np.sum(nhits==0)), str(nhits.size))))
    log.debug(" ".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size), str(np.sum(np.isnan(image))))))
    log.debug(" ".join(("nhits shape: ",str(nhits.shape))))
    # default is to set empty pixels to NAN; have to set them
    # back to zero
    image[image!=image] = 0.0
    header = pyfits.getheader(cubefilename)
    # debug print "Cube shape: ",image.shape," naxis3: ",header.get('NAXIS3')," nhits shape: ",nhits.shape

    log.debug("".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size))))

    flathead = get_header(flatheader)
    naxis3 = image.shape[0]
    wcs = pywcs.WCS(flathead)
    cwcs = pywcs.WCS(header)
    vwcs = cwcs.sub([pywcs.WCSSUB_SPECTRAL])
    vunit = u.Unit(vwcs.wcs.cunit[vwcs.wcs.spec])
    cubevelo = vwcs.wcs_pix2world(np.arange(naxis3),0)[0] * vunit
    cd3 = vwcs.wcs.cdelt[vwcs.wcs.spec] * vunit

    if not vunit.is_equivalent(default_unit):
        raise ValueError("The units of the cube and the velocity axis are "
                         "possibly not equivalent.  Change default_unit to "
                         "the appropriate unit (probably {0})".format(vunit))

    if add_with_kernel:
        if wcs.wcs.has_cd():
            cd = np.abs(wcs.wcs.cd[1,1])
        else:
            cd = np.abs(wcs.wcs.cdelt[1])
        # Alternative implementation; may not work for .cd?
        #cd = np.abs(np.prod((wcs.wcs.get_cdelt() * wcs.wcs.get_pc().diagonal())))**0.5

    if velocityrange is not None:
        if hasattr(velocityrange, 'unit'):
            v1,v4 = velocityrange
        else:
            v1,v4 = velocityrange * default_unit
        ind1 = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2 = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # stupid hack.  REALLY stupid hack.  Don't crop.
        if np.abs(ind2-image.shape[0]) < 5:
            ind2 = image.shape[0]
        if np.abs(ind1) < 5:
            ind1 = 0

        #print "Velo match for v1,v4 = %f,%f: %f,%f" % (v1,v4,cubevelo[ind1],cubevelo[ind2])
        # print "Updating CRPIX3 from %i to %i. Cropping to indices %i,%i" % (header.get('CRPIX3'),header.get('CRPIX3')-ind1,ind1,ind2)
        # I think this could be disastrous: cubevelo is already set, but now we're changing how it's set in the header!
        # I don't think there's any reason to have this in the first place
        # header.set('CRPIX3',header.get('CRPIX3')-ind1)

        # reset v1,v4 to the points we just selected
        v1 = cubevelo[ind1]
        v4 = cubevelo[ind2-1]
    else:
        ind1=0
        ind2 = image.shape[0]
        v1,v4 = min(cubevelo),max(cubevelo)

    # debug print "Cube has %i v-axis pixels from %f to %f.  Crop range is %f to %f" % (naxis3,cubevelo.min(),cubevelo.max(),v1,v4)

    #if abs(cdelt) < abs(cd3):
    #    print "Spectra have CD=%0.2f, cube has CD=%0.2f.  Will smooth & interpolate." % (cdelt,cd3)

    # Disable progressbar if debug-logging is enabled (they clash)
    if progressbar and 'ProgressBar' in globals() and log.level > 10:
        if datalength is None:
            pb = ProgressBar(len(data))
        else:
            pb = ProgressBar(datalength)
    else:
        progressbar = False

    skipped = []

    for spectrum,pos,velo in zip(data_iterator(data,fsw=fsw),
                                 coord_iterator(data,coordsys_out=coordsys),
                                 velo_iterator(data,linefreq=linefreq)):

        if log.level <= 10:
            t1 = time.time()

        if not hasattr(velo,'unit'):
            velo = velo * default_unit

        glon,glat = pos
        cdelt = velo[1]-velo[0]
        if cdelt < 0:
            # for interpolation, require increasing X axis
            spectrum = spectrum[::-1]
            velo = velo[::-1]
            if log.level < 5:
                log.debug("Reversed spectral axis... ")

        if (velo.max() < cubevelo.min() or velo.min() > cubevelo.max()):
            raise ValueError("Data out of range.")

        if progressbar and log.level > 10:
            pb.update()

        velo += velocity_offset

        if glon != 0 and glat != 0:
            x,y = wcs.wcs_world2pix(glon,glat,0)
            if np.isnan(x) or np.isnan(y):
                log.warn("".join(("Skipping NaN point {0}, {1} ...".format(glon,glat))))
                continue
            if log.level < 10:
                log.debug("".join(("At point {0},{1} ...".format(glon,glat),)))
            if abs(cdelt) < abs(cd3) and allow_smooth:
                # need to smooth before interpolating to preserve signal
                kernwidth = abs(cd3/cdelt/2.35).decompose().value
                if kernwidth > 2 and kernwidth < 10:
                    xr = kernwidth*5
                    npx = np.ceil(xr*2 + 1)
                elif kernwidth > 10:
                    raise ValueError('Too much smoothing')
                else:
                    xr = 5
                    npx = 11
                #kernel = np.exp(-(np.linspace(-xr,xr,npx)**2)/(2.0*kernwidth**2))
                #kernel /= kernel.sum()
                kernel = Gaussian1DKernel(stddev=kernwidth, x_size=npx)
                smspec = np.convolve(spectrum,kernel,mode='same')
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     smspec)
            else:
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     spectrum)
            OK = (datavect[ind1:ind2] == datavect[ind1:ind2])

            if excludefitrange is None:
                include = OK
            else:
                # Exclude certain regions (e.g., the spectral lines) when computing the noise
                include = OK.copy()

                if not hasattr(excludefitrange,'unit'):
                    excludefitrange = excludefitrange * default_unit

                # Convert velocities to indices
                exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

                # Loop through exclude_inds pairwise
                for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
                    # Do not include the excluded regions
                    include[i1:i2] = False

                if include.sum() == 0:
                    raise ValueError("All data excluded.")

            noiseestimate = datavect[ind1:ind2][include].std()
            contestimate = datavect[ind1:ind2][include].mean()

            if noiseestimate > noisecut:
                log.info("Skipped a data point at %f,%f in file %s because it had excessive noise %f" % (x,y,filename,noiseestimate))
                skipped.append(True)
                continue
            elif negative_mean_cut is not None and contestimate < negative_mean_cut:
                log.info("Skipped a data point at %f,%f in file %s because it had negative continuum %f" % (x,y,filename,contestimate))
                skipped.append(True)
                continue
            elif OK.sum() == 0:
                log.info("Skipped a data point at %f,%f in file %s because it had NANs" % (x,y,filename))
                skipped.append(True)
                continue
            elif OK.sum()/float(abs(ind2-ind1)) < 0.5:
                log.info("Skipped a data point at %f,%f in file %s because it had %i NANs" % (x,y,filename,np.isnan(datavect[ind1:ind2]).sum()))
                skipped.append(True)
                continue
            if log.level < 10:
                log.debug("did not skip...")

            if varweight:
                weight = 1./noiseestimate**2
            else:
                weight = 1.

            if weightspec is None:
                wspec = weight
            else:
                wspec = weight * weightspec


            if 0 < int(np.round(x)) < naxis1 and 0 < int(np.round(y)) < naxis2:
                if add_with_kernel:
                    fwhm = np.sqrt(8*np.log(2))
                    kernel_size = kd = int(np.ceil(kernel_fwhm/fwhm/cd * 5))
                    if kernel_size < 5:
                        kernel_size = kd = 5
                    if kernel_size % 2 == 0:
                        kernel_size = kd = kernel_size+1
                    if kernel_size > 100:
                        raise ValueError("Huge kernel - are you sure?")
                    kernel_middle = mid = (kd-1)/2.
                    xinds,yinds = (np.mgrid[:kd,:kd]-mid+np.array([np.round(x),np.round(y)])[:,None,None]).astype('int')
                    # This kernel is NOT centered, and that's the bloody point.
                    # (I made a very stupid error and used Gaussian2DKernel,
                    # which is strictly centered, in a previous version)
                    kernel2d = np.exp(-((xinds-x)**2+(yinds-y)**2)/(2*(kernel_fwhm/fwhm/cd)**2))

                    dim1 = ind2-ind1
                    vect_to_add = np.outer(datavect[ind1:ind2],kernel2d).reshape([dim1,kd,kd])
                    vect_to_add[True-OK] = 0

                    # need to slice out edges
                    if yinds.max() >= naxis2 or yinds.min() < 0:
                        yok = (yinds[0,:] < naxis2) & (yinds[0,:] >= 0)
                        xinds,yinds = xinds[:,yok],yinds[:,yok]
                        vect_to_add = vect_to_add[:,:,yok]
                        kernel2d = kernel2d[:,yok]
                    if xinds.max() >= naxis1 or xinds.min() < 0:
                        xok = (xinds[:,0] < naxis1) & (xinds[:,0] >= 0)
                        xinds,yinds = xinds[xok,:],yinds[xok,:]
                        vect_to_add = vect_to_add[:,xok,:]
                        kernel2d = kernel2d[xok,:]

                    image[ind1:ind2,yinds,xinds] += vect_to_add*wspec
                    # NaN spectral bins are not appropriately downweighted... but they shouldn't exist anyway...
                    nhits[yinds,xinds] += kernel2d*weight
                    contimage[yinds,xinds] += kernel2d * contestimate*weight
                    nhits_once[yinds,xinds] += kernel2d*weight

                else:
                    image[ind1:ind2,int(np.round(y)),int(np.round(x))][OK] += datavect[ind1:ind2][OK]*weight
                    nhits[int(np.round(y)),int(np.round(x))] += weight
                    contimage[int(np.round(y)),int(np.round(x))] += contestimate*weight
                    nhits_once[int(np.round(y)),int(np.round(x))] += weight

                if log.level < 10:
                    log.debug("Z-axis indices are %i,%i..." % (ind1,ind2,))
                    log.debug("Added a data point at %i,%i" % (int(np.round(x)),int(np.round(y))))
                skipped.append(False)
            else:
                skipped.append(True)
                log.info("Skipped a data point at x,y=%f,%f "
                         "lon,lat=%f,%f in file %s because "
                         "it's out of the grid" % (x,y,glon,glat,filename))

            if debug_breakpoint:
                import ipdb
                ipdb.set_trace()

        if log.level <= 10:
            dt = time.time() - t1
            log.debug("Completed x,y={x:4.0f},{y:4.0f}"
                      " ({x:6.2f},{y:6.2f}) in {dt:6.2g}s".format(x=float(x),
                                                                  y=float(y),
                                                                  dt=dt))

    log.info("Completed 'add_data' loop for"
             " {0} in {1}s".format(cubefilename, time.time()-t0))

    if excludefitrange is not None:
        # this block redefining "include" is used for diagnostics (optional)
        ind1a = np.argmin(np.abs(np.floor(v1-velo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-velo)))+1
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        OK = (data[dname][0,:]==data[dname][0,:])
        OK[:ind1a] = False
        OK[ind2a:] = False

        include = OK

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-velo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")
    else:
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        include = slice(None)


    if diagnostic_plot_name:
        from mpl_plot_templates import imdiagnostics

        pylab.clf()

        dd = data[dname][:,include]
        imdiagnostics(dd,axis=pylab.gca())
        pylab.savefig(diagnostic_plot_name, bbox_inches='tight')

        # Save a copy with the bad stuff flagged out; this should tell whether flagging worked
        skipped = np.array(skipped,dtype='bool')
        dd[skipped,:] = -999
        maskdata = np.ma.masked_equal(dd,-999)
        pylab.clf()
        imdiagnostics(maskdata, axis=pylab.gca())
        dpn_pre,dpn_suf = os.path.splitext(diagnostic_plot_name)
        dpn_flagged = dpn_pre+"_flagged"+dpn_suf
        pylab.savefig(dpn_flagged, bbox_inches='tight')

        log.info("Saved diagnostic plot %s and %s" % (diagnostic_plot_name,dpn_flagged))

    log.debug("nhits statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(nhits.mean(),nhits.std(),np.sum(nhits==0), nhits.size))
    log.debug("Image statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(image.mean(),image.std(),np.sum(image==0), image.size))
    
    imav = image/nhits

    if log.level <= 10:
        nnan = np.count_nonzero(np.isnan(imav))
        log.debug("imav statistics: mean, std, nzeros, size, nnan, ngood: {0} {1} {2} {3} {4} {5}".format(imav.mean(),imav.std(),np.sum(imav==0), imav.size, nnan, imav.size-nnan))
        log.debug("imav shape: {0}".format(imav.shape))

    subcube = imav[ind1:ind2,:,:]

    if log.level <= 10:
        nnan = np.sum(np.isnan(subcube))
        print("subcube statistics: mean, std, nzeros, size, nnan, ngood:",np.nansum(subcube)/subcube.size,np.std(subcube[subcube==subcube]),np.sum(subcube==0), subcube.size, nnan, subcube.size-nnan)
        print("subcube shape: ",subcube.shape)

    H = header.copy()
    if fileheader is not None:
        for k,v in fileheader.items():
            if 'RESTFRQ' in k or 'RESTFREQ' in k:
                header.set(k,v)
            #if k[0] == 'C' and '1' in k and k[-1] != '1':
            #    header.set(k.replace('1','3'), v)
    moreH = get_header(cubeheader)
    for k,v in H.items():
        header.set(k,v)
    for k,v in moreH.items():
        header.set(k,v)
    HDU = pyfits.PrimaryHDU(data=subcube,header=header)
    HDU.writeto(cubefilename,clobber=True,output_verify='fix')

    outpre = cubefilename.replace(".fits","")

    include = np.ones(imav.shape[0],dtype='bool')

    if excludefitrange is not None:
        # this block redifining "include" is used for continuum
        ind1a = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")

    HDU2 = pyfits.PrimaryHDU(data=nhits,header=flathead)
    HDU2.writeto(outpre+"_nhits.fits",clobber=True,output_verify='fix')

    #OKCube = (imav==imav)
    #contmap = np.nansum(imav[naxis3*0.1:naxis3*0.9,:,:],axis=0) / OKCube.sum(axis=0)
    if make_continuum:
        contmap = np.nansum(imav[include,:,:],axis=0) / include.sum()
        HDU2 = pyfits.PrimaryHDU(data=contmap,header=flathead)
        HDU2.writeto(outpre+"_continuum.fits",clobber=True,output_verify='fix')

        if continuum_prefix is not None:
            # Solo continuum image (just this obs set)
            HDU2.data = contimage / nhits_once
            HDU2.writeto(continuum_prefix+"_continuum.fits",clobber=True,output_verify='fix')
            HDU2.data = nhits_once
            HDU2.writeto(continuum_prefix+"_nhits.fits",clobber=True,output_verify='fix')

    log.info("Writing script file {0}".format(outpre+"_starlink.sh"))
    scriptfile = open(outpre+"_starlink.sh",'w')
    outpath,outfn = os.path.split(cubefilename)
    outpath,pre = os.path.split(outpre)
    print(("#!/bin/bash"), file=scriptfile)
    if outpath != '':
        print(('cd %s' % outpath), file=scriptfile)
    print(('. /star/etc/profile'), file=scriptfile)
    print(('kappa > /dev/null'), file=scriptfile)
    print(('convert > /dev/null'), file=scriptfile)
    print(('fits2ndf %s %s' % (outfn,outfn.replace(".fits",".sdf"))), file=scriptfile)
    if excludefitrange is not None:
        v2v3 = ""
        for v2,v3 in zip(excludefitrange[::2],excludefitrange[1::2]):
            v2v3 += "%0.2f %0.2f " % (v2.to(default_unit).value,v3.to(default_unit).value)
        print(('mfittrend %s  ranges=\\\"%0.2f %s %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v2v3,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    else:
        print(('mfittrend %s  ranges=\\\"%0.2f %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    print(('sub %s %s %s' % (outfn.replace(".fits",".sdf"),outfn.replace(".fits","_baseline.sdf"),outfn.replace(".fits","_sub.sdf"))), file=scriptfile)
    print(('sqorst %s_sub mode=pixelscale  axis=3 pixscale=%i out=%s_vrebin' % (pre,smoothto,pre)), file=scriptfile)
    print(('gausmooth %s_vrebin fwhm=1.0 axes=[1,2] out=%s_smooth' % (pre,pre)), file=scriptfile)
    print(('#collapse %s estimator=mean axis="VRAD" low=-400 high=500 out=%s_continuum' % (pre,pre)), file=scriptfile)
    print(('rm %s_sub.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_sub %s_sub.fits' % (pre,pre)), file=scriptfile)
    print(('rm %s_smooth.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_smooth %s_smooth.fits' % (pre,pre)), file=scriptfile)
    print(("# Fix STARLINK's failure to respect header keywords."), file=scriptfile)
    print(('sethead %s_smooth.fits RESTFRQ=`gethead RESTFRQ %s.fits`' % (pre,pre)), file=scriptfile)
    print(('rm %s_baseline.sdf' % (pre)), file=scriptfile)
    print(('rm %s_smooth.sdf' % (pre)), file=scriptfile)
    print(('rm %s_sub.sdf' % (pre)), file=scriptfile)
    print(('rm %s_vrebin.sdf' % (pre)), file=scriptfile)
    print(('rm %s.sdf' % (pre)), file=scriptfile)
    scriptfile.close()

    if chmod:
        scriptfilename = (outpre+"_starlink.sh").replace(" ","")
        #subprocess.call("chmod +x {0}".format(scriptfilename), shell=True)
        st = os.stat(scriptfilename)
        os.chmod(scriptfilename, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR)

    if do_runscript:
        runscript(outpre)

    _fix_ms_kms_file(outpre+"_sub.fits")
    _fix_ms_kms_file(outpre+"_smooth.fits")

    if log.level <= 20:
        log.info("Completed {0} in {1}s".format(pre, time.time()-t0))
        cube = SpectralCube.read(interferometer_fn).with_spectral_unit(u.km/u.s,
                                                                       velocity_convention='radio')
        cube.beam_threshold = 100
        # try to avoid contamination... won't work universally; need to examine
        # individual cubes and have this as a parameter
        #med = cube.spectral_slab(90*u.km/u.s, 160*u.km/u.s).median(axis=0).value
        med = cube.spectral_slab(velocity_ranges[species][0]*u.km/u.s,
                                 velocity_ranges[species][1]*u.km/u.s).median(axis=0).value
        os.system('cp {0} {1}'.format(interferometer_fn, medsubfn))
        fh = fits.open(medsubfn, mode='update')
        log.info("Median subtracting")
        pb = ProgressBar(len(fh[0].data))
        for ii,imslice in enumerate(fh[0].data):
            fh[0].data[ii] = imslice - med
            fh.flush()
            pb.update()
        fh.close()

    cube = SpectralCube.read(medsubfn).with_spectral_unit(u.GHz)

    if hasattr(cube, 'beam'):
        jtok = cube.beam.jtok(cube.spectral_axis).value
        print("Jansky/beam -> Kelvin factor = {0}".format(jtok))
    else:
        jtok = np.array([bm.jtok(x).value for bm,x in zip(cube.beams,
                                                          cube.spectral_axis)])
        print("Median Jansky/beam -> Kelvin factor = {0}".format(np.median(jtok)))

    minghz, maxghz = cube.spectral_extrema

    OK = False
Exemple #52
0
def convolve_model_dir_monochromatic(model_dir,
                                     overwrite=False,
                                     max_ram=8,
                                     wav_min=-np.inf * u.micron,
                                     wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError(
            "monochromatic filters are no longer used for new-style model directories"
        )

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = sorted(
        glob.glob(model_dir + '/seds/*.fits.gz') +
        glob.glob(model_dir + '/seds/*/*.fits.gz') +
        glob.glob(model_dir + '/seds/*.fits') +
        glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(
        n_wav, int(np.floor(max_ram * 1024.**3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(
            chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [
            ConvolvedFluxes(model_names=np.zeros(
                n_models, dtype='U30' if six.PY3 else 'S30'),
                            apertures=apertures,
                            initialize_arrays=True) for i in range(chunk_size)
        ]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(
                model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters
def fit_all_tex(xaxis,
                cube,
                cubefrequencies,
                indices,
                degeneracies,
                ecube=None,
                replace_bad=False):
    """
    Parameters
    ----------
    replace_bad : bool
        Attempt to replace bad (negative) values with their upper limits?
    """

    tmap = np.empty(cube.shape[1:])
    Nmap = np.empty(cube.shape[1:])

    yy, xx = np.indices(cube.shape[1:])
    pb = ProgressBar(xx.size)
    count = 0

    for ii, jj in (zip(yy.flat, xx.flat)):
        if any(np.isnan(cube[:, ii, jj])):
            tmap[ii, jj] = np.nan
        else:
            if replace_bad:
                uplims = nupper_of_kkms(
                    replace_bad,
                    cubefrequencies,
                    einsteinAij[indices],
                    degeneracies,
                ).value
            else:
                uplims = None

            nuppers = nupper_of_kkms(
                cube[:, ii, jj],
                cubefrequencies,
                einsteinAij[indices],
                degeneracies,
            )
            if ecube is not None:
                nupper_error = nupper_of_kkms(
                    ecube[:, ii, jj],
                    cubefrequencies,
                    einsteinAij[indices],
                    degeneracies,
                ).value
                uplims = 3 * nupper_error
                if replace_bad:
                    raise ValueError("replace_bad is ignored now...")
            else:
                nupper_error = None

            fit_result = fit_tex(xaxis,
                                 nuppers.value,
                                 errors=nupper_error,
                                 uplims=uplims)
            tmap[ii, jj] = fit_result[1].value
            Nmap[ii, jj] = fit_result[0].value
        pb.update(count)
        count += 1

    return tmap, Nmap
Exemple #54
0
    results = {}

    if linename in cached_gaussfit_results:
        print("Loading {0} from cache".format(linename))
        results = cached_gaussfit_results[linename]
    else:
        print("Fitting {0}, which is not in cache".format(linename))
        pb = ProgressBar(cube.shape[0])

        for vel,vslice in (zip(cube.spectral_axis, cube)):
            closest = np.argmin(np.abs(vel-velocities))
            if np.abs(velocities[closest] - vel) > vdiff:
                #print("Skipping velocity {0}, closest is {1} -> {2}".format(vel, closest,
                #                                                            velocities[closest]))
                pb.update()
                continue

            thisvel = velocities[closest].value
            guess_regs = guesses[thisvel]

            ampguess = vslice.max().value

            model_list = []
            for reg in guess_regs:

                p_init = models.Gaussian2D(amplitude=ampguess,
                                           x_mean=reg.center.x,
                                           y_mean=reg.center.y,
                                           x_stddev=bmmaj_px/STDDEV_TO_FWHM*0.75,
                                           y_stddev=bmmin_px/STDDEV_TO_FWHM*0.75,
Exemple #55
0
    def compute_spatial_distrib(self,
                                radius=None,
                                periodic=True,
                                min_frac=0.8,
                                show_progress=True):
        '''
        Compute the moments over circular region with the specified radius.

        Parameters
        ----------
        radius : `~astropy.units.Quantity`, optional
            Override the radius size of the region.
        periodic : bool, optional
            Specify whether the boundaries can be wrapped. Default is True.
        min_frac : float, optional
            A number between 0 and 1 that sets the minimum fraction of data in
            each region that are finite. A value of 1.0 requires that no NaNs
            be in the region.
        show_progress : bool, optional
            Show a progress bar during the creation of the covariance matrix.
        '''

        # Require the fraction to be > 0 and <=1
        if min_frac <= 0.0 or min_frac > 1.:
            raise ValueError("min_frac must be larger than 0 and less than"
                             "or equal to 1.")

        self._mean_array = np.empty(self.data.shape)
        self._variance_array = np.empty(self.data.shape)
        self._skewness_array = np.empty(self.data.shape)
        self._kurtosis_array = np.empty(self.data.shape)

        # Use the new radius when another given
        if radius is not None:
            self.radius = radius

        # Convert to pixels. We need this to be an integer so round down to
        # the nearest integer values
        pix_rad = np.ceil(self._to_pixel(self.radius).value).astype(int)

        if periodic:
            pad_img = np.pad(self.data, pix_rad, mode="wrap")
            pad_weights = np.pad(self.weights, pix_rad, mode="wrap")
        else:
            pad_img = np.pad(self.data, pix_rad, padwithnans)
            pad_weights = np.pad(self.weights, pix_rad, padwithnans)

        circle_mask = circular_region(pix_rad)

        if show_progress:
            bar = ProgressBar((pad_img.shape[0] - 2 * pix_rad) *
                              (pad_img.shape[1] - 2 * pix_rad))

        # Loop through every point within the non-padded shape.
        prod = product(range(pix_rad, pad_img.shape[0] - pix_rad),
                       range(pix_rad, pad_img.shape[1] - pix_rad))

        for n, (i, j) in enumerate(prod):
            img_slice = pad_img[i - pix_rad:i + pix_rad + 1,
                                j - pix_rad:j + pix_rad + 1]
            wgt_slice = pad_weights[i - pix_rad:i + pix_rad + 1,
                                    j - pix_rad:j + pix_rad + 1]

            valid_img_frac = \
                np.isfinite(img_slice).sum() / float(img_slice.size)
            valid_wgt_frac = \
                np.isfinite(wgt_slice).sum() / float(wgt_slice.size)

            if valid_img_frac < min_frac or valid_wgt_frac < min_frac:
                self.mean_array[i - pix_rad, j - pix_rad] = np.NaN
                self.variance_array[i - pix_rad, j - pix_rad] = np.NaN
                self.skewness_array[i - pix_rad, j - pix_rad] = np.NaN
                self.kurtosis_array[i - pix_rad, j - pix_rad] = np.NaN

            else:
                img_slice = img_slice * circle_mask
                wgt_slice = wgt_slice * circle_mask

                moments = compute_moments(img_slice, wgt_slice)

                self.mean_array[i - pix_rad, j - pix_rad] = moments[0]
                self.variance_array[i - pix_rad, j - pix_rad] = moments[1]
                self.skewness_array[i - pix_rad, j - pix_rad] = moments[2]
                self.kurtosis_array[i - pix_rad, j - pix_rad] = moments[3]

            if show_progress:
                bar.update(n + 1)
Exemple #56
0
def _convolve_model_dir_1(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(model_names=np.zeros(
            len(sed_files), dtype='U30' if six.PY3 else 'S30'),
                        apertures=apertures,
                        initialize_arrays=True) for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value,
                                                      binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum(
                    (s.error * f.response)**2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(
                    np.sum((s.error * f.response)**2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Exemple #57
0
def render_13co(ytcube=yt13co, outdir='yt_renders_13CO',
                size=512, scale=1200., nframes=180,
                movie=True,
                camera_angle=[0, 0, 1],
                north_vector = [1, 0, 0],
                rot_vector1=[1,0,0],
                rot_vector2=[0.5,0.5,0.0],
                rot_vector3=[0.0,0.5,0.5],
               ):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf = yt.ColorTransferFunction([0,30], grey_opacity=True)
    #tf.map_to_colormap(0.1,5,colormap='Reds')
    tf.add_gaussian(2, 1, [1.0, 0.8, 0.0, 1.0])
    tf.add_gaussian(3, 2, [1.0, 0.5, 0.0, 1.0])
    tf.add_gaussian(5, 3, [1.0, 0.0, 0.0, 1.0])
    tf.add_gaussian(10, 5, [1.0, 0.0, 0.0, 0.5])
    tf.map_to_colormap(10, 30, colormap=red, scale=1)


    center = ytcube.dataset.domain_dimensions /2.
    cam = ytcube.dataset.h.camera(center, camera_angle, scale, size, tf,
                      north_vector=north_vector, fields='flux')

    im  = cam.snapshot()

    #images = [im]

    if movie:
        pb = ProgressBar(nframes*2+30)
        for ii,im in enumerate(cam.rotation(2 * np.pi, nframes/3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" % (ii))),
                         rescale=False)
            pb.update(ii)
        for jj,im in enumerate(cam.rotation(2 * np.pi, nframes/3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" %
                                                  (ii+jj))), rescale=False)
            pb.update(ii+jj)
        for kk,im in enumerate(cam.rotation(2 * np.pi, nframes/3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" %
                                                  (ii+jj+kk))), rescale=False)
            pb.update(ii+jj+kk)

        TheBrick = ytcube.world2yt([0.253, 0.016, 35])
        for LL, snapshot in enumerate(cam.move_to(TheBrick, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(os.path.join(outdir,'%04i.png' %
                                                        (ii+jj+kk+LL))),
                               rescale=False)
            pb.update(ii+jj+kk+LL)
        for mm, snapshot in enumerate(cam.zoomin(5, 15)):
            #images.append(snapshot)
            snapshot.write_png(paths.mpath(os.path.join(outdir,'%04i.png' %
                                                        (ii+jj+kk+LL+mm))),
                               rescale=False)
            pb.update(ii+jj+kk+LL+mm)
        for nn,im in enumerate(cam.rotation(2*np.pi, nframes/3, rot_vector=rot_vector1)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" % (ii+jj+kk+LL+mm+nn))),
                         rescale=False)
            pb.update(ii+jj+kk+LL+mm+nn)
        for oo,im in enumerate(cam.rotation(2 * np.pi, nframes/3, rot_vector=rot_vector2)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" %
                                                  (ii+jj+kk+LL+mm+nn+oo))), rescale=False)
            pb.update(ii+jj+kk+LL+mm+nn+oo)
        for pp,im in enumerate(cam.rotation(2 * np.pi, nframes/3, rot_vector=rot_vector3)):
            #images.append(im)
            im.write_png(paths.mpath(os.path.join(outdir,"%04i.png" %
                                                  (ii+jj+kk+LL+mm+nn+oo+pp))), rescale=False)
            pb.update(ii+jj+kk+LL+mm+nn+oo+pp)

        #save_images(images, paths.mpath(outdir))

        pipe = make_movie(paths.mpath(outdir))
        
        return images
    else:
        return im
def dataGen2(f):
    r"""
	Generates a dictionary of galaxy objects from a file

	Parameters
		f - Type: str. The name of the file

	Returns
		galaxies - Type: dict. A dictionary of galaxy objects with object IDs as keys
	"""

    data = pd.read_csv(f)
    #print(data['objID'])

    galaxies = {}
    if not 'nearbyID' in data.columns:  # As of now, this column is always included in datasets not
        # including object IDs of nearby galaxies and never in
        # data sets that do include them
        print("If this shows up, it's a problem")  # Debug
        return ValueError()
        # Include number of nearby neighbors if it is present
        if 'modelMag_u' in data.columns:
            if 'Column1' in data.columns:
                for i in range(len(data['objID'])):
                    galaxies[int(data['objID'][i])] = galaxy(
                        int(data['objID'][i]),
                        data['ra'][i],
                        data['dec'][i],
                        data['z'][i],
                        0.,
                        0.,
                        data['bpt'][i],
                        u=data['modelMag_u'][i],
                        r=data['modelMag_r'][i],
                        nearby=(int(data['Column1'][i]) - 1))
            # Exclude number of nearby neighbors if it is not present
            else:
                for i in range(len(data['objID'])):
                    galaxies[int(data['objID'][i])] = galaxy(
                        int(data['objID'][i]),
                        data['ra'][i],
                        data['dec'][i],
                        data['z'][i],
                        0.,
                        0.,
                        data['bpt'][i],
                        u=data['modelMag_u'][i],
                        r=data['modelMag_r'][i])
        else:
            if 'Column1' in data.columns:
                for i in range(len(data['objID'])):
                    galaxies[int(data['objID'][i])] = galaxy(
                        int(data['objID'][i]),
                        data['ra'][i],
                        data['dec'][i],
                        data['z'][i],
                        0.,
                        0.,
                        data['bpt'][i],
                        nearby=(int(data['Column1'][i]) - 1))
            # Exclude number of nearby neighbors if it is not present
            else:
                for i in range(len(data['objID'])):
                    galaxies[int(data['objID'][i])] = galaxy(
                        int(data['objID'][i]), data['ra'][i], data['dec'][i],
                        data['z'][i], 0., 0., data['bpt'][i])
    # This works slightly different when we want to get IDs of nearby galaxies
    else:
        sampleKey = 1237645879551066262  # Debug
        sampleKey2 = 1237645941824356443  # Debug
        print("Checkpoint 1")  # Debug
        bar = ProgressBar(len(data['objID']))
        for i in range(len(data['objID'])):
            objid = int(data['objID'][i])
            nearID = int(data['nearbyID'][i])
            #if data['nearbyID'][i] in data['objID']:
            if objid in galaxies:
                print("\nCheckpoint 2")  # Debug
                print("Target ID:", objid)  # Debug
                print("Nearby ID:", nearID)  # Debug
                print("Key 1:", galaxies[objid].nearbyIDs)  # Debug
                galaxies[objid].nearbyIDs.append(nearID)
                print("Key 1:", galaxies[objid].objId)  # Debug
                print("Key 1:", galaxies[objid].nearbyIDs)  # Debug
                #print("Key 1:", galaxies[sampleKey].nearbyIDs)   # Debug
                if sampleKey2 in galaxies:  # Debug
                    print("Found key 2!")  # Debug
                    print("Key 2:", galaxies[sampleKey2].nearbyIDs)  # Debug
            else:
                print("\nCheckpoint 3")  # Debug
                print("Target ID:", objid)  # Debug
                print("Nearby ID:", nearID)  # Debug
                galaxies[objid] = galaxy(objid, data['ra'][i], data['dec'][i],
                                         data['z'][i], 0., 0., data['bpt'][i])
                #galaxies[objID].nearbyIDs.append(nearID)
                print("Key 1:", galaxies[sampleKey].nearbyIDs)  # Debug
                if sampleKey2 in galaxies:
                    print("Found key 2!")  # Debug
                    print("Key 2:", galaxies[sampleKey2].nearbyIDs)  # Debug
            bar.update()
            del objid
            del nearID

        print("\n")
        for key in galaxies:  # Iterate through dictionary of galaxies
            galaxies[key].nearby = len(
                galaxies[key].nearbyIDs
            )  # Set number of nearby neighbors as length of list of nearby neighbors

        print("Key 1 before pass:"******"Key 1 Mr before pass:"******"Key 2 before pass:", galaxies[sampleKey2].nearbyIDs)  # Debug

    return galaxies
Exemple #59
0
def render_chem(yth2co321=yth2co321, yth2co303=yth2co303, ytsio=ytsio,
                outdir='yt_renders_chem3', size=512, scale=1100.,
                nframes=60,
                north_vector=[1,0,0],
                rot_vector=[1,0,0],
                movie=True, camera_angle=[-0.6, 0.4, 0.6]):

    if not os.path.exists(paths.mpath(outdir)):
        os.makedirs(paths.mpath(outdir))

    tf1 = yt.ColorTransferFunction([0.1,2], grey_opacity=True)
    #tf1.add_gaussian(0.1,0.05, [1,0,0,1])
    #tf1.map_to_colormap(0, 0.5, scale=1, colormap='Reds')
    tf1.add_gaussian(0.25, 0.01, [1,0,0,1])
    tf1.add_step(0.25,0.5,[1,0,0,1])
    #tf1.add_step(1,2,[1,1,1,1])
    tf1.map_to_colormap(0.5,2,scale=1,colormap=red)
    tf2 = yt.ColorTransferFunction([0.1,2], grey_opacity=True)
    #tf2.add_gaussian(0.1,0.05, [0,1,0,1])
    #tf2.map_to_colormap(0, 0.5, scale=1, colormap='Greens')
    tf2.add_gaussian(0.25, 0.01, [0,1,0,1])
    tf2.add_step(0.25,0.5,[0,1,0,1])
    tf2.map_to_colormap(0.5,2,scale=1,colormap=green)
    tf3 = yt.ColorTransferFunction([0.1,2], grey_opacity=True)
    #tf3.add_gaussian(0.1,0.05, [0,0,1,1])
    #tf3.map_to_colormap(0, 0.5, scale=1, colormap='Blues')
    tf3.add_gaussian(0.25, 0.01, [0,0,1,1])
    tf3.add_step(0.25,0.5,[0,0,1,1])
    tf3.map_to_colormap(0.5,2,scale=1,colormap=blue)

    center = yth2co303.dataset.domain_dimensions /2.
    camh2co303 = yth2co303.dataset.h.camera(center, camera_angle, scale, size, tf3,
                                    north_vector=north_vector, fields='flux')
    camh2co321 = yth2co321.dataset.h.camera(center, camera_angle, scale, size, tf2,
                                    north_vector=north_vector, fields='flux')
    camsio = ytsio.dataset.h.camera(center, camera_angle, scale, size, tf1,
                            north_vector=north_vector, fields='flux')

    imh2co303  = camh2co303.snapshot()
    imh2co321  = camh2co321.snapshot()
    imsio  = camsio.snapshot()
    
    pl.figure(1)
    pl.clf()
    pl.imshow(imh2co303+imh2co321+imsio)
    pl.figure(2)
    pl.clf()
    pl.imshow(imh2co303[:,:,:3]+imh2co321[:,:,:3]+imsio[:,:,:3])

    if movie:
        images_h2co303 = [imh2co303]
        images_h2co321 = [imh2co321]
        images_sio = [imsio]

        r1 = camh2co303.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r2 = camh2co321.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        r3 = camsio.rotation(2 * np.pi, nframes, rot_vector=rot_vector)
        pb = ProgressBar(nframes * 3)
        for (ii,(imh2co303,imh2co321,imsio)) in enumerate(izip(r1, r2, r3)):
            images_h2co303.append(imh2co303)
            images_h2co321.append(imh2co321)
            images_sio.append(imsio)

            imh2co303=imh2co303.swapaxes(0,1)
            imh2co303.write_png(paths.mpath(os.path.join(outdir,"h2co303_%04i.png" % (ii))),
                                        rescale=False)
            pb.update(ii*3)
            imsio=imsio.swapaxes(0,1)
            imsio.write_png(paths.mpath(os.path.join(outdir,"sio_%04i.png" % (ii))),
                                        rescale=False)
            pb.update(ii*3+1)
            imh2co321=imh2co321.swapaxes(0,1)
            imh2co321.write_png(paths.mpath(os.path.join(outdir,"h2co321_%04i.png" % (ii))),
                                        rescale=False)
            pb.update(ii*3+2)

        pb.next()


        save_images([i1+i2+i3
                     for i1,i2,i3 in izip(images_h2co303, images_h2co321, images_sio)],
                     paths.mpath(outdir))

        make_movie(paths.mpath(outdir))

        return images_h2co303,images_h2co321,images_sio
    else:
        return imh2co303,imh2co321,imsio