Exemple #1
0
    def read(self, max_events=None):
        """
        Read the file using the appropriate method depending on the file origin

        Parameters
        ----------
        max_events : int
            Maximum number of events to read

        Returns
        -------
        source : generator
            A generator that can be iterated over to obtain events
        """

        # Obtain relevent source
        log.debug("[file] Reading file...")
        if max_events:
            log.info("[file] Max events being read = {}".format(max_events))
        switch = {
            'hessio':
                lambda: hessio_event_source(get_path(self.input_path),
                                            max_events=max_events),
            'targetio':
                lambda: targetio_source(self.input_path,
                                        max_events=max_events),
        }
        try:
            source = switch[self.origin]()
        except KeyError:
            log.exception("unknown file origin '{}'".format(self.origin))
            raise
        log.debug("[file] Reading complete")

        return source
Exemple #2
0
    def to_region(self):
        """
        Converts to region, ``regions.Region`` object
        """

        coords = self.convert_coords()
        log.debug(coords)
        viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize',
                        'symbol', 'symsize', 'fontsize', 'fontstyle', 'usetex',
                        'labelpos', 'labeloff', 'linewidth', 'linestyle',
                        'point', 'textangle', 'fontweight']

        if isinstance(coords[0], SkyCoord):
            reg = self.shape_to_sky_region[self.region_type](*coords)
        elif isinstance(coords[0], PixCoord):
            reg = self.shape_to_pixel_region[self.region_type](*coords)
        else:
            self._raise_error("No central coordinate")

        reg.visual = RegionVisual()
        reg.meta = RegionMeta()

        # both 'text' and 'label' should be set to the same value, where we
        # default to the 'text' value since that is the one used by ds9 regions
        label = self.meta.get('text',
                              self.meta.get('label', ""))
        if label != '':
            reg.meta['label'] = label
        for key in self.meta:
            if key in viz_keywords:
                reg.visual[key] = self.meta[key]
            else:
                reg.meta[key] = self.meta[key]
        reg.meta['include'] = self.include
        return reg
Exemple #3
0
    def __call__(self, *args, **kwargs):

        log.debug("Fitter called with args={0} and kwargs={1}".format(args, kwargs))
        use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
        if use_lmfit:
            return self.lmfitter(*args,**kwargs)
        return self.fitter(*args,**kwargs)
Exemple #4
0
    def to_region(self):
        """
        Converts to region, ``regions.Region`` object
        """

        coords = self.convert_coords()
        log.debug(coords)
        viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize',
                        'symsize', 'fontsize', 'fontstyle', 'usetex',
                        'labelpos', 'labeloff', 'linewidth', 'linestyle']

        if isinstance(coords[0], BaseCoordinateFrame):
            reg = self.shape_to_sky_region[self.region_type](*coords)
        elif isinstance(coords[0], PixCoord):
            reg = self.shape_to_pixel_region[self.region_type](*coords)
        else:
            self._raise_error("No central coordinate")

        reg.visual = RegionVisual()
        reg.meta = RegionMeta()
        for key in self.meta.keys():
            if key in viz_keywords:
                reg.visual[key] = self.meta[key]
            else:
                reg.meta[key] = self.meta[key]
        reg.meta['include'] = self.include
        return reg
def status(XCov_filename):
    ll_name = "xd_log_likelihood"
    with h5py.File(XCov_filename, mode='r') as f:
        if ll_name not in f['search']:
            logger.info("0 done")
            return

        ll = f['search'][ll_name]
        ndone = np.isfinite(ll).sum()
        nnot = np.isnan(ll).sum()
        logger.info("{} done, {} not done".format(ndone, nnot))

        # check what blocks are unfinished
        if nnot != 0:
            idx, = np.where(np.isnan(ll))
            diff = idx[1:]-idx[:-1]
            derp, = np.where(diff > 1)
            if 0 not in derp:
                derp = np.concatenate(([0], derp, [len(idx)-1]))

            logger.debug("Unfinished blocks:")
            blocks = []
            for d1,d2 in zip(derp[:-1],derp[1:]):
                if d1 == 0:
                    blocks.append("{}-{}".format(idx[d1], idx[d2]))
                else:
                    blocks.append("{}-{}".format(idx[d1+1], idx[d2]))
            logger.debug(", ".join(blocks))
Exemple #6
0
def _fast_reader(index_map, data):
    """
    Use scipy.ndimage.find_objects to quickly identify subsets of the data
    to increase speed of dendrogram loading
    """

    flux_by_structure, indices_by_structure = {},{}

    from scipy import ndimage
    idxs = np.unique(index_map[index_map > -1])

    # ndimage ignores 0 and -1, but we want index 0
    object_slices = ndimage.find_objects(index_map+1)
    index_cube = np.indices(index_map.shape)

    # Need to have same length, otherwise assumptions above are wrong
    assert len(idxs) == len(object_slices)
    log.debug('Creating index maps for {0} indices...'.format(len(idxs)))

    for idx,sl in ProgressBar(zip(idxs, object_slices)):
        match = index_map[sl] == idx
        sl2 = (slice(None),) + sl
        match_inds = index_cube[sl2][:, match]
        coords = list(zip(*match_inds))
        dd = data[sl][match].tolist()
        flux_by_structure[idx] = dd
        indices_by_structure[idx] = coords

    return flux_by_structure, indices_by_structure
Exemple #7
0
    def get_event(self, event_req, id_flag=False):
        """
        Loop through events until the requested event is found

        Parameters
        ----------
        event_req : int
            Event index requested
        id_flag : bool
            'event_req' refers to event_id instead of event_index

        Returns
        -------
        event : `ctapipe` event-container

        """
        if not id_flag:
            log.info("[file][read] Finding event index {}...".format(event_req))
        else:
            log.info("[file][read] Finding event id {}...".format(event_req))
        source = self.read()
        for event in source:
            event_id = event.dl0.event_id
            index = event.count if not id_flag else event_id
            if not index == event_req:
                log.debug("[event_id] skipping event: {}".format(event_id))
                continue
            log.info("[file] Event {} found".format(event_req))
            return event
        log.info("[file][read] Event does not exist!")
        return None
Exemple #8
0
    def button2action(self, event=None, debug=False, subtract=True,
                      powerlaw=None, fit_original=False,
                      spline=False,
                      spline_sampling=None,
                      spline_downsampler=np.median,
                      baseline_fit_color='orange', **kwargs):
        """
        Do the baseline fitting and save and plot the results.

        """
        if debug: print "Button 2/3 Baseline.  Subtract=",subtract
        if self.subtracted:
            self.unsubtract()

        # interactive guesspeakwidth passes this on; it should be excised
        if 'nwidths' in kwargs:
            kwargs.pop('nwidths')

        if powerlaw is None:
            powerlaw = self.powerlaw

        self.clear_highlights()

        self._xfit_units = self.Spectrum.xarr.unit

        log.debug("Fitting baseline: powerlaw={0} "
                  "spline={1}".format(self.powerlaw, spline))
        self.fit(powerlaw=powerlaw, includemask=self.includemask,
                 order=self.order, spline=spline,
                 spline_sampling=spline_sampling,
                 spline_downsampler=spline_downsampler)

        if subtract:
            if self.subtracted and fit_original:
                # use the spectrum with the old baseline added in (that's what we fit to)
                self.Spectrum.data = self.spectofit - self.basespec
            else:
                self.Spectrum.data -= self.basespec
            self.subtracted = True
        else:
            if self.subtracted:
                self.unsubtract()
            self.subtracted = False

        if self.Spectrum.plotter.axis is not None:
            if debug: print "Plotting baseline"
            if event is not None:
                # preserve frame if fitting interactively
                kwargs.update({'use_window_limits':True})
            self.plot_baseline(baseline_fit_color=baseline_fit_color, **kwargs)

        # disconnect interactive window (and more importantly, reconnect to
        # original interactive cmds)
        self.clear_all_connections()

        if hasattr(self.Spectrum,'header'):
            history.write_history(self.Spectrum.header,
                    "BASELINE order=%i pars=%s" % (self.order,
                        ",".join([str(s) for s in self.baselinepars])) +
                        "(powerlaw)" if self.powerlaw else "")
def get_guesses(index, catalog, max_comp=3):
    """
    put the guesses into flat format and limit their numbers
    """
    amp,vc,vsig,r = catalog['Smean303', 'v_cen', 'v_rms',
                            'r303321'][index].columns.values()
    keep_velo = (vsig < 10) & (vsig > 1)
    if np.count_nonzero(keep_velo) > max_comp:
        # Also exclude small objects when there are many overlaps
        big_objs = catalog[index]['npix'] > 100
        # but only if excluding them isn't overly restrictive
        if np.count_nonzero(big_objs & keep_velo) >= max_comp:
            keep_velo &= big_objs
    keep_inds = np.argsort(amp[keep_velo])[-max_comp:]
    log.debug('Kept {0}, amps {1}'.format(np.array(index)[keep_velo][keep_inds],
                                          np.array(amp)[keep_velo][keep_inds]))
    amp,vc,vsig,r = [a[keep_velo][keep_inds] for a in (amp,vc,vsig,r)]
    glon = [xc - 360 if xc > 180 else xc
            for xc in catalog['x_cen']]
    # TODO: make sure the velocity units remain consistent
    # The logic here is to filter out things at vlsr<-50 km/s that are not in Sgr C;
    # these are mostly other lines in Sgr B2
    result = [pars
              for pars,glon in zip(zip(*[x.data for x in [amp,vc/1e3,vsig,r,amp]]),
                                      glon)
             ]

    return result
Exemple #10
0
    def annotations(self, shortvarnames=None, debug=False):
        """
        Return a list of TeX-formatted labels

        The values and errors are formatted so that only the significant digits
        are displayed.  Rounding is performed using the decimal package.

        Parameters
        ----------
        shortvarnames : list
            A list of variable names (tex is allowed) to include in the
            annotations.  Defaults to self.shortvarnames

        Examples
        --------
        >>> # Annotate a Gaussian
        >>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
        """
        from decimal import Decimal  # for formatting

        svn = self.shortvarnames if shortvarnames is None else shortvarnames
        # if pars need to be replicated....
        if len(svn) < self.npeaks * self.npars:
            svn = svn * self.npeaks

        parvals = self.parinfo.values
        parerrs = self.parinfo.errors

        loop_list = [
            (
                parvals[ii + jj * self.npars + self.vheight],
                parerrs[ii + jj * self.npars + self.vheight],
                svn[ii + jj * self.npars],
                self.parinfo.fixed[ii + jj * self.npars + self.vheight],
                jj,
            )
            for jj in range(self.npeaks)
            for ii in range(self.npars)
        ]

        label_list = []
        for (value, error, varname, fixed, varnumber) in loop_list:
            log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
            if fixed or error == 0:
                label = "$%s(%i)$=%8s" % (
                    varname,
                    varnumber,
                    Decimal("%g" % value).quantize(Decimal("%0.6g" % (value))),
                )
            else:
                label = "$%s(%i)$=%8s $\\pm$ %8s" % (
                    varname,
                    varnumber,
                    Decimal("%g" % value).quantize(Decimal("%0.2g" % (min(np.abs([value, error]))))),
                    Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),
                )
            label_list.append(label)

        labels = tuple(mpcb.flatten(label_list))
        return labels
Exemple #11
0
    def solar_system_shapiro_delay(self, toas, acc_delay=None):
        """
        Returns total shapiro delay to due solar system objects.
        If the PLANET_SHAPIRO model param is set to True then
        planets are included, otherwise only the value for the
        Sun is calculated.

        Requires Astrometry or similar model that provides the
        ssb_to_psb_xyz method for direction to pulsar.

        If planets are to be included, TOAs.compute_posvels() must
        have been called with the planets=True argument.
        """
        # Start out with 0 delay with units of seconds
        tbl = toas.table
        delay = numpy.zeros(len(tbl))
        for ii, key in enumerate(tbl.groups.keys):
            grp = tbl.groups[ii]
            obs = tbl.groups.keys[ii]['obs']
            loind, hiind = tbl.groups.indices[ii:ii+2]
            if key['obs'].lower() == 'barycenter':
                log.debug("Skipping Shapiro delay for Barycentric TOAs")
                continue
            psr_dir = self.ssb_to_psb_xyz_ICRS(epoch=grp['tdbld'].astype(numpy.float64))
            delay[loind:hiind] += self.ss_obj_shapiro_delay(grp['obs_sun_pos'],
                                    psr_dir, self._ss_mass_sec['sun'])
            if self.PLANET_SHAPIRO.value:
                for pl in ('jupiter', 'saturn', 'venus', 'uranus'):
                    delay[loind:hiind] += self.ss_obj_shapiro_delay(grp['obs_'+pl+'_pos'],
                                                   psr_dir, self._ss_mass_sec[pl])
        return delay * u.second
Exemple #12
0
    def containment_radius_image(self, fraction):
        """Compute containment radius image.

        Parameters
        ----------
        fraction : float
            Containment fraction

        Returns
        -------
        image : `numpy.ndarray`
            Containment radius image
        """
        out = np.zeros(self.shape, dtype=float)
        npix = self.size
        for ii in range(npix):
            if (100 * ii) % npix == 0:
                percent = 100. * ii / npix
                log.debug('Processing pixel {ii:5d} of {npix:5d} ({percent:5.2f}%)'
                          ''.format(**locals()))
            try:
                psf = self._get_psf(ii)
                out.flat[ii] = psf.containment_radius(fraction)
            except ValueError:
                # This is what happens to pixels in the map without PSF info
                out.flat[ii] = np.nan

        return out
    def test(self):
        for n in range(self.N):
            logger.debug("Orbit {}".format(n))

            # x,v = self.w[:3,:,n],self.w[3:,:,n]
            x = self.w.pos.value[...,n]
            v = self.w.vel.value[...,n]
            actions,angles,freqs = isochrone_xv_to_aa(x, v, self.potential)

            for i in range(3):
                assert np.allclose(actions[i,1:], actions[i,0], rtol=1E-5)

            # Compare to genfunc
            s_v = (v*u.kpc/u.Myr).to(u.km/u.s).value
            s_w = np.vstack((x,s_v))
            m = self.potential.parameters['m'] / 1E11
            b = self.potential.parameters['b']
            aa = np.array([toy_potentials.angact_iso(s_w[:,i].T, params=(m,b)) for i in range(s_w.shape[1])])
            s_actions = (aa[:,:3]*u.km/u.s*u.kpc).decompose(galactic).value
            s_angles = aa[:,3:]

            assert np.allclose(actions, s_actions.T, rtol=1E-8)
            assert_angles_allclose(angles, s_angles.T, rtol=1E-8)

            # test roundtrip
            x2,v2 = isochrone_aa_to_xv(actions, angles, self.potential)

            assert np.allclose(x, x2, rtol=1E-8)
            assert np.allclose(v, v2, rtol=1E-8)
Exemple #14
0
    def get_charge_resolution(self):
        """
        Calculate and obtain the charge resolution graph arrays.

        Returns
        -------
        true_charge : ndarray
            The X axis true charges.
        chargeres : ndarray
            The Y axis charge resolution values.
        chargeres_error : ndarray
            The error on the charge resolution.
        scaled_chargeres : ndarray
            The Y axis charge resolution divided by the Goal.
        scaled_chargeres_error : ndarray
            The error on the charge resolution divided by the Goal.
        """
        log.debug('[chargeres] Calculating charge resolution')
        true_charge = np.fromiter(iter(self.sum_dict.keys()), dtype=int)
        summed_charge = np.fromiter(iter(self.sum_dict.values()), dtype=float)
        num = np.fromiter(iter(self.n_dict.values()), dtype=int)

        chargeres = np.sqrt((summed_charge / num) + true_charge) / true_charge
        chargeres_error = chargeres * (1 / np.sqrt(2 * num))

        scale = self.goal(true_charge)
        scaled_chargeres = chargeres/scale
        scaled_chargeres_error = chargeres_error/scale

        return true_charge, chargeres, chargeres_error, \
            scaled_chargeres, scaled_chargeres_error
    def test(self):
        """
            !!!!! NOTE !!!!!
            For Harmonic Oscillator, Sanders' code works for the units I use...
        """
        for n in range(self.N):
            logger.debug("Orbit {}".format(n))

            # x,v = self.w[:3,:,n],self.w[3:,:,n]
            x = self.w.pos.value[...,n]
            v = self.w.vel.value[...,n]
            actions,angles = harmonic_oscillator_xv_to_aa(x, v, self.potential)

            for i in range(3):
                assert np.allclose(actions[i,1:], actions[i,0], rtol=1E-5)

            # Compare to genfunc
            s_w = np.vstack((x,v))
            omega = self.potential.parameters['omega']
            aa = np.array([toy_potentials.angact_ho(s_w[:,i].T, omega=omega) for i in range(s_w.shape[1])])
            s_actions = aa[:,:3]
            s_angles = aa[:,3:]

            assert np.allclose(actions, s_actions.T, rtol=1E-8)
            assert_angles_allclose(angles, s_angles.T, rtol=1E-8)
Exemple #16
0
def to_table(container):
    """
    Convert a `ctapipe.core.Container` to an `astropy.Table` with one row

    Parameters
    ----------
    container: ctapipe.core.Container

    Returns
    -------
    Table: astropy.Table
    """
    names = list()
    columns = list()
    for k, v in writeable_items(container).items():

        v_arr = np.array(v)
        v_arr = v_arr.reshape((1,) + v_arr.shape)
        log.debug("Creating column for item '{0}' of shape {1}".
                  format(k, v_arr.shape))
        names.append(k)
        columns.append(Column(v_arr))

    return Table(data=columns,  # dtypes are inferred by columns
                 names=names,
                 meta=container.meta)
    def test(self):
        """
            !!!!! NOTE !!!!!
            For Harmonic Oscillator, Sanders' code works for the units I use...
        """
        for n in range(self.N):
            logger.debug("Orbit {}".format(n))

            x,v = self.w[:,n,:3],self.w[:,n,3:]
            ww = self.w[:,n]
            actions,angles = harmonic_oscillator_xv_to_aa(x, v, self.potential)

            for i in range(3):
                assert np.allclose(actions[1:,i], actions[0,i], rtol=1E-5)

            # Compare to genfunc
            omega = self.potential.parameters['omega']
            aa = np.array([toy_potentials.angact_ho(w, omega=omega) for w in ww])
            s_actions = aa[:,:3]
            s_angles = aa[:,3:]

            assert np.allclose(actions, s_actions, rtol=1E-8)
            assert np.allclose(angles, s_angles, rtol=1E-8)

            # test roundtrip
            # x2,v2 = harmonic_oscillator_aa_to_xv(actions, angles, self.potential)

            # TODO: figure out transform back
            continue

            rel_err_x = np.abs((x2 - x) / x)
            rel_err_v = np.abs((v2 - v) / v)

            assert rel_err_x.max() < (1E-8)
            assert rel_err_v.max() < (1E-8)
    def _run_wrapper(self, index):
        logger.info("Orbit {0}".format(index))

        # unpack input argument dictionary
        import gary.potential as gp
        potential = gp.load(os.path.join(self.cache_path, self.config.potential_filename))

        # read out just this initial condition
        norbits = len(self.w0)
        allfreqs = np.memmap(self.cache_file, mode='r',
                             shape=(norbits,), dtype=self.cache_dtype)

        # short-circuit if this orbit is already done
        if allfreqs['success'][index]:
            logger.debug("Orbit {0} already successfully completed.".format(index))
            return None

        # Only pass in things specified in _run_kwargs (w0 and potential required)
        kwargs = dict([(k,self.config[k]) for k in self.config.keys() if k in self._run_kwargs])
        res = self.run(w0=self.w0[index], potential=potential, **kwargs)
        res['index'] = index

        # cache res into a tempfile, return name of tempfile
        tmpfile = os.path.join(self._tmpdir, "{0}-{1}.pickle".format(self.__class__.__name__, index))
        with open(tmpfile, 'w') as f:
            pickle.dump(res, f)
        return tmpfile
Exemple #19
0
    def test_actions(self):
        # t = self.t[::10]
        t = self.t

        N_max = 6
        for n in range(self.N):
            print("\n\n")
            logger.info("======================= Orbit {} =======================".format(n))
            # w = self.w[:,::10,n]
            w = self.w[...,n]
            orb = self.orbit[:,n]
            circ = orb.circulation()

            # get values from Sanders' code
            logger.debug("Computing actions from genfunc...")
            s_actions,s_angles,s_freqs,toy_potential = sanders_act_ang_freq(t, w, circ, N_max=N_max)

            logger.debug("Computing actions...")
            ret = find_actions(orb, N_max=N_max, toy_potential=toy_potential)
            actions = ret['actions']
            angles = ret['angles']
            freqs = ret['freqs']

            logger.info("Action ratio: {}".format(actions / s_actions))
            logger.info("Angle ratio: {}".format(angles / s_angles))
            logger.info("Freq ratio: {}".format(freqs / s_freqs))

            assert np.allclose(actions.value, s_actions, rtol=1E-5)
            assert np.allclose(angles.value, s_angles, rtol=1E-5)
            assert np.allclose(freqs.value, s_freqs, rtol=1E-5)
Exemple #20
0
def make_action_files(t, w, potential, suffix="", overwrite=False,
                      force_harmonic_oscillator=False, N_max=6):

    action_filename = os.path.join(plot_path, "actions{}.npy".format(suffix))

    if overwrite and os.path.exists(action_filename):
        os.remove(action_filename)

    if not os.path.exists(action_filename):
        # compute the actions and angles for the orbit
        actions,angles,freqs = sd.cross_validate_actions(t, w[:,0], N_max=N_max, nbins=100,
                                    force_harmonic_oscillator=force_harmonic_oscillator,
                                    units=potential.units, skip_failures=True,
                                    overlap=0) #w.shape[0]//100)

        # now compute for the full time series
        r = sd.find_actions(t, w[:,0], N_max=N_max, units=potential.units, return_Sn=True,
                            force_harmonic_oscillator=force_harmonic_oscillator)
        full_actions,full_angles,full_freqs = r[:3]
        Sn,dSn_dJ,nvecs = r[3:]

        np.save(action_filename, (actions,angles,freqs) + r)
        logger.debug("Actions computed and saved to file: {}".format(action_filename))
    else:
        r = np.load(action_filename)
        actions,angles,freqs = r[:3]
        full_actions,full_angles,full_freqs = r[3:6]
        Sn,dSn_dJ,nvecs = r[6:]
        logger.debug("Actions read from file: {}".format(action_filename))

    return actions,angles,freqs,full_actions,full_angles,full_freqs
    def test(self):
        for n in range(self.N):
            logger.debug("Orbit {}".format(n))

            x,v = self.w[:,n,:3],self.w[:,n,3:]
            s_v = (v*u.kpc/u.Myr).to(u.km/u.s).value
            s_w = np.hstack((x,s_v))
            actions,angles = isochrone_xv_to_aa(x, v, self.potential)

            for i in range(3):
                assert np.allclose(actions[1:,i], actions[0,i], rtol=1E-5)

            # Compare to genfunc
            m = self.potential.parameters['m'] / 1E11
            b = self.potential.parameters['b']
            aa = np.array([toy_potentials.angact_iso(w, params=(m,b)) for w in s_w])
            s_actions = (aa[:,:3]*u.km/u.s*u.kpc).decompose(galactic).value
            s_angles = aa[:,3:]

            assert np.allclose(actions, s_actions, rtol=1E-8)
            assert np.allclose(angles, s_angles, rtol=1E-8)

            # test roundtrip
            x2,v2 = isochrone_aa_to_xv(actions, angles, self.potential)

            rel_err_x = np.abs((x2 - x) / x)
            rel_err_v = np.abs((v2 - v) / v)

            assert rel_err_x.max() < (1E-8)
            assert rel_err_v.max() < (1E-8)
Exemple #22
0
def _matplotlib_pil_bug_present():
    """
    Determine whether PIL images should be pre-flipped due to a bug in Matplotlib.

    Prior to Matplotlib 1.2.0, RGB images provided as PIL objects were
    oriented wrongly. This function tests whether the bug is present.
    """

    from matplotlib.image import pil_to_array

    try:
        from PIL import Image
    except:
        import Image

    from astropy import log

    array1 = np.array([[1, 2], [3, 4]], dtype=np.uint8)
    image = Image.fromarray(array1)
    array2 = pil_to_array(image)

    if np.all(array1 == array2):
        log.debug("PIL Image flipping bug not present in Matplotlib")
        return False
    elif np.all(array1 == array2[::-1, :]):
        log.debug("PIL Image flipping bug detected in Matplotlib")
        return True
    else:
        log.warn("Could not properly determine Matplotlib behavior for RGB images - image may be flipped incorrectly")
        return False
Exemple #23
0
def main(iso_filename, XCov_filename, interpolate=True, overwrite=False):

    # FOR PARSEC ISOCHRONE (reversing it for interpolation)
    iso = ascii.read(iso_filename, header_start=13)[:114][::-1]
    iso = nprf.stack_arrays((iso[:25], iso[27:]),usemask=False) # because of stupid red clump turnaround

    # FOR DARTMOUTH ISOCHRONE (reversing it for interpolation)
    # iso = ascii.read(iso_filename, header_start=8)[::-1]

    # output hdf5 file
    with h5py.File(XCov_filename, mode='r+') as f:

        # feature and covariance matrices for all stars
        X = ps1_isoc_to_XCov(iso, W=mixing_matrix, interpolate=interpolate)

        if 'isochrone' in f and overwrite:
            f.__delitem__('isochrone')
            logger.debug("Overwriting isochrone data")

        if 'isochrone' not in f:
            g = f.create_group('isochrone')
        else:
            g = f['isochrone']

        if 'X' not in f['isochrone']:
            g.create_dataset('X', X.shape, dtype='f', data=X)

        f.flush()
        logger.debug("Saved isochrone to {}".format(XCov_filename))
def _load_kernel_link(ephem, link=''):
    load_kernel = False # a flag for checking if the kernel has been loaded
    # search_list = [link,jpl_kernel_http, jpl_kernel_ftp]
    # NOTE the JPL ftp site is disabled. Instead, we duplicated the JPL ftp
    # site on nanograv server.
    search_list = [link, nanograv_http, jpl_kernel_http, jpl_kernel_ftp]
    if link != '':
        search_list.append('')
    for l in search_list:
        if l == '':
            ephem_link = ephem # Astropy default ephem does not like .bsp in the end.
        else:
            ephem_link = l+"%s.bsp" % ephem
        if load_kernel:
            break
        try:
            log.debug('Trying to set astropy ephemeris to {0}'.format(ephem_link))
            coor.solar_system_ephemeris.set(ephem_link)
            load_kernel = True
        except Exception as ex:
            #log.info('Exception! {0} {1} {2}'.format(type(ex), ex.args, ex))
            try:
                log.debug('Trying to download and set astropy ephemeris to {0}'.format(ephem_link))
                aut.data.download_file(ephem_link, timeout=300, cache=True)
                coor.solar_system_ephemeris.set(ephem_link)
                load_kernel = True
            except Exception as ex2:
                #log.info('Exception2! {0} {1} {2}'.format(type(ex2), ex2.args, ex2))
                load_kernel = False
    return load_kernel
def cleanup_git_directory(directory='database/', delete_untracked=False,
                          allow_fail=True):

    uncommitted_files = subprocess.call(['git', 'diff-files', '--quiet'],
                                        cwd=directory)
    if uncommitted_files:
        log.debug("Found uncommitted file changes in {0}.".format(directory))
        reset = subprocess.call(['git', 'reset', '--hard', 'HEAD'],
                                cwd=directory)
        if allow_fail:
            assert reset == 0

        if delete_untracked:
            untracked_deleted = subprocess.call(['git', 'clean', '-f'],
                                                cwd=directory)
            if allow_fail:
                assert untracked_deleted == 0

    uncommited_staged_changes = \
        subprocess.call(['git', 'diff-index', '--quiet', '--cached', 'HEAD'],
                        cwd=directory)
    if uncommited_staged_changes:
        log.debug("Found uncommitted, staged changes in {0}."
                  .format(directory))
        reset = subprocess.call(['git', 'reset', '--hard', 'HEAD'],
                                cwd=directory)
        if allow_fail:
            assert reset == 0
Exemple #26
0
def _slow_reader(index_map, data):
    """
    Loop over each valid pixel in the index_map and add its coordinates and
    data to the flux_by_structure and indices_by_structure dicts

    This is slower than _fast_reader but faster than that implementation would
    be without find_objects.  The bottleneck is doing `index_map == idx` N
    times.
    """
    flux_by_structure, indices_by_structure = {}, {}
    # Do a fast iteration through d.data, adding the indices and data values
    # to the two dictionaries declared above:
    indices = np.array(np.where(index_map > -1)).transpose()

    log.debug("Creating index maps for {0} coordinates...".format(len(indices)))
    for coord in ProgressBar(indices):
        coord = tuple(coord)
        idx = index_map[coord]
        if idx in flux_by_structure:
            flux_by_structure[idx].append(data[coord])
            indices_by_structure[idx].append(coord)
        else:
            flux_by_structure[idx] = [data[coord]]
            indices_by_structure[idx] = [coord]

    return flux_by_structure, indices_by_structure
Exemple #27
0
    def __init__(self, name, FPorbname, tt2tdb_mode = 'pint'):


        if FPorbname.startswith('@'):
            # Read multiple orbit files names
            FPlist = []
            fnames = [ll.strip() for ll in open(FPorbname[1:]).readlines()]
            for fn in fnames:
                FPlist.append(load_FPorbit(fn))
            self.FPorb = vstack(FPlist)
            # Make sure full table is sorted
            self.FPorb.sort('MJD_TT')
        else:
            self.FPorb = load_FPorbit(FPorbname)
        # Now build the interpolator here:
        self.X = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['X'])
        self.Y = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Y'])
        self.Z = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Z'])
        self.Vx = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vx'])
        self.Vy = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vy'])
        self.Vz = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vz'])
        super(NICERObs, self).__init__(name=name, tt2tdb_mode=tt2tdb_mode)
        # Print this warning once, mainly for @paulray
        if self.tt2tdb_mode.lower().startswith('pint'):
            log.debug('Using location=None for TT to TDB conversion (pint mode)')
        elif self.tt2tdb_mode.lower().startswith('geo'):
            log.warning('Using location geocenter for TT to TDB conversion')
Exemple #28
0
def main(argv=None):
    parser = argparse.ArgumentParser(description="PINT tool for command-line barycentering calculations.")

    parser.add_argument("time",help="MJD (UTC, by default)")
    parser.add_argument("--timescale",default="utc",
        help="Time scale for MJD argument ('utc', 'tt', 'tdb'), default=utc")
    parser.add_argument("--format",
        help="Format for time argument ('mjd' or any astropy.Time format (e.g. 'isot'), see <http://docs.astropy.org/en/stable/time/#time-format>)",
        default="mjd")
    parser.add_argument("--freq",type=float,default=np.inf,
        help="Frequency to use, MHz")
    parser.add_argument("--obs",default="Geocenter",
        help="Observatory code (default = Geocenter)")
    parser.add_argument("--parfile",help="par file to read model from",default=None)
    parser.add_argument("--ra",
        help="RA to use (e.g. '12h22m33.2s' if not read from par file)")
    parser.add_argument("--dec",
        help="Decl. to use (e.g. '19d21m44.2s' if not read from par file)")
    parser.add_argument("--dm",
        help="DM to use (if not read from par file)",type=float,default=0.0)
    parser.add_argument("--ephem",default="DE421",help="Ephemeris to use")
    parser.add_argument("--use_gps",default=False,action='store_true',help="Apply GPS to UTC clock corrections")
    parser.add_argument("--use_bipm",default=False,action='store_true',help="Use TT(BIPM) instead of TT(TAI)")


    args = parser.parse_args(argv)

    if args.format in ("mjd","jd", "unix"):
        # These formats require conversion from string to longdouble first
        fmt = args.format
        # Never allow format == 'mjd' because it fails when scale is 'utc'
        # Change 'mjd' to 'pulsar_mjd' to deal with this.
        if fmt == "mjd":
            fmt = "pulsar_mjd"
        t = Time(np.longdouble(args.time),scale=args.timescale,format=fmt,
            precision=9)
        print(t)
    else:
        t = Time(args.time,scale=args.timescale,format=args.format, precision=9)
    log.debug(t.iso)

    t = toa.TOA(t,freq=args.freq,obs=args.obs)
    # Build TOAs and compute TDBs and positions from ephemeris
    ts = toa.get_TOAs_list([t],ephem=args.ephem, include_bipm=args.use_bipm,
        include_gps=args.use_gps, planets=False)

    if args.parfile is not None:
        m=pint.models.get_model(args.parfile)
    else:
        # Construct model by hand
        m=pint.models.StandardTimingModel
        # Should check if 12:13:14.2 syntax is used and support that as well!
        m.RAJ.quantity = Angle(args.ra)
        m.DECJ.quantity = Angle(args.dec)
        m.DM.quantity = args.dm*u.parsec/u.cm**3

    tdbtimes = m.get_barycentric_toas(ts)

    print("{0:.16f}".format(tdbtimes[0].value))
    return
    def get_fitvals(p, plot=False, order=order, second_ratio=second_ratio,
                    outfile=outfile, lock=lock):
        if tuple(p) in fitted_positions:
            return

        log.debug("Fitting position {0}".format(p))
        result = fit_position(p, dendrogram=dendrogram, catalog=catalog,
                              pcube=pcube,
                              plot=False, order=order,
                              second_ratio=second_ratio)

        fitted_positions.append(tuple(p))
        if result is None:
            parvalues.append(None)
            parerrors.append(None)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(p[0], p[1], None, None))
                        outfile.flush()
            return
        else:
            parvalues.append(result.specfit.parinfo.values)
            parerrors.append(result.specfit.parinfo.errors)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(p[0], p[1],
                                                                    result.specfit.parinfo.values,
                                                                    result.specfit.parinfo.errors))
                        outfile.flush()
            return result.specfit.parinfo.values, result.specfit.parinfo.errors
Exemple #30
0
def merge_light_catalogue():
    """Merge the light tiled catalogues into one big file."""
    output_filename = os.path.join(constants.DESTINATION,
                                   'concatenated',
                                   'iphas-dr2-light.fits')

    instring = ''
    for lon in np.arange(25, 215+1, constants.STRIPWIDTH):
        for part in ['a', 'b']:
            path = os.path.join(constants.DESTINATION,
                                'concatenated',
                                'light',
                                'iphas-dr2-{0:03d}{1}-light.fits'.format(
                                                                    lon, part))
            instring += 'in={0} '.format(path)

    # Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set
    param = {'stilts': constants.STILTS,
             'in': instring,
             'out': output_filename}

    cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'
    mycmd = cmd.format(**param)
    log.debug(mycmd)
    status = os.system(mycmd)
    log.info('concat: '+str(status))

    return status
Exemple #31
0
def to_cache(response, cache_file):
    log.debug("Caching data to {0}".format(cache_file))
    with open(cache_file, "wb") as f:
        pickle.dump(response, f)
 def _nowcs_header(self):
     """
     Return a copy of the header with no WCS information attached
     """
     log.debug("Stripping WCS from header")
     return wcs_utils.strip_wcs_from_header(self._header)
Exemple #33
0
    def _activate_form(self, response, form_index=0, inputs={}, cache=True,
                       method=None):
        """
        Parameters
        ----------
        method: None or str
            Can be used to override the form-specified method
        """
        # Extract form from response
        root = BeautifulSoup(response.content, 'html5lib')
        form = root.find_all('form')[form_index]
        # Construct base url
        form_action = form.get('action')
        if "://" in form_action:
            url = form_action
        elif form_action.startswith('/'):
            url = '/'.join(response.url.split('/', 3)[:3]) + form_action
        else:
            url = response.url.rsplit('/', 1)[0] + '/' + form_action
        # Identify payload format
        fmt = None
        form_method = form.get('method').lower()
        if form_method == 'get':
            fmt = 'get'  # get(url, params=payload)
        elif form_method == 'post':
            if 'enctype' in form.attrs:
                if form.attrs['enctype'] == 'multipart/form-data':
                    fmt = 'multipart/form-data'  # post(url, files=payload)
                elif form.attrs['enctype'] == 'application/x-www-form-urlencoded':
                    fmt = 'application/x-www-form-urlencoded'  # post(url, data=payload)
            else:
                fmt = 'post'  # post(url, params=payload)
        # Extract payload from form
        payload = []
        for form_elem in form.find_all(['input', 'select', 'textarea']):
            value = None
            is_file = False
            tag_name = form_elem.name
            key = form_elem.get('name')
            if tag_name == 'input':
                is_file = (form_elem.get('type') == 'file')
                value = form_elem.get('value')
                if form_elem.get('type') in ['checkbox','radio']:
                    if form_elem.has_attr('checked'):
                        if not value:
                            value = 'on'
                    else:
                        value = None
            elif tag_name == 'select':
                if form_elem.get('multiple') is not None:
                    value = []
                    for option in form_elem.select('option[value]'):
                        if option.get('selected') is not None:
                            value.append(option.get('value'))
                else:
                    for option in form_elem.select('option[value]'):
                        if option.get('selected') is not None:
                            value = option.get('value')
                    # select the first option field if none is selected
                    if value is None:
                        value = form_elem.select('option[value]')[0].get('value')

            if key in inputs:
                value = str(inputs[key])
            if (key is not None) and (value is not None):
                if fmt == 'multipart/form-data':
                    if is_file:
                        payload.append(
                            (key, ('', '', 'application/octet-stream')))
                    else:
                        if type(value) is list:
                            for v in value:
                                payload.append((key, ('', v)))
                        else:
                            payload.append((key, ('', value)))
                else:
                    if type(value) is list:
                        for v in value:
                            payload.append((key, v))
                    else:
                        payload.append((key, value))

        # for future debugging
        self._payload = payload
        log.debug("Form: payload={0}".format(payload))

        if method is not None:
            fmt = method

        # Send payload
        if fmt == 'get':
            response = self._request("GET", url, params=payload, cache=cache)
        elif fmt == 'post':
            response = self._request("POST", url, params=payload, cache=cache)
        elif fmt == 'multipart/form-data':
            response = self._request("POST", url, files=payload, cache=cache)
        elif fmt == 'application/x-www-form-urlencoded':
            response = self._request("POST", url, data=payload, cache=cache)

        return response
Exemple #34
0
    def query_instrument(self, instrument, column_filters={}, columns=[],
                         open_form=False, help=False, cache=True, **kwargs):
        """
        Query instrument specific raw data contained in the ESO archive.

        Parameters
        ----------
        instrument : string
            Name of the instrument to query, one of the names returned by
            `list_instruments()`.
        column_filters : dict
            Constraints applied to the query.
        columns : list of strings
            Columns returned by the query.
        open_form : bool
            If `True`, opens in your default browser the query form
            for the requested instrument.
        help : bool
            If `True`, prints all the parameters accepted in
            `column_filters` and `columns` for the requested `instrument`.
        cache : bool
            Cache the response for faster subsequent retrieval

        Returns
        -------
        table : `~astropy.table.Table`
            A table representing the data available in the archive for the
            specified instrument, matching the constraints specified in
            ``kwargs``. The number of rows returned is capped by the
            ROW_LIMIT configuration item.

        """

        if instrument in ('feros','harps'):
            url = 'http://archive.eso.org/wdb/wdb/eso/repro/form'
        else:
            url = "http://archive.eso.org/wdb/wdb/eso/{0}/form".format(instrument)
        table = None
        if open_form:
            webbrowser.open(url)
        elif help:
            self._print_help(url, instrument)
        else:
            instrument_form = self._request("GET", url, cache=cache)
            query_dict = {}
            query_dict.update(column_filters)
            # TODO: replace this with individually parsed kwargs
            query_dict.update(kwargs)
            query_dict["wdbo"] = "csv/download"

            # Default to returning the DP.ID since it is needed for header acquisition
            query_dict['tab_dp_id'] = (kwargs.pop('tab_dp_id')
                                       if 'tab_db_id' in kwargs
                                       else 'on')

            for k in columns:
                query_dict["tab_" + k] = True
            if self.ROW_LIMIT >= 0:
                query_dict["max_rows_returned"] = self.ROW_LIMIT
            else:
                query_dict["max_rows_returned"] = 10000
            instrument_response = self._activate_form(instrument_form,
                                                      form_index=0,
                                                      inputs=query_dict, cache=cache)
            content = instrument_response.content
            #First line is always garbage
            content = content.split(b'\n', 1)[1]
            log.debug("Response content:\n{0}".format(content))
            if _check_response(content):
                try:
                    table = Table.read(BytesIO(content), format="ascii.csv", comment='^#')
                except Exception as ex:
                    # astropy 0.3.2 raises an anonymous exception; this is
                    # intended to prevent that from causing real problems
                    if 'No reader defined' in ex.args[0]:
                        table = Table.read(BytesIO(content), format="ascii",
                                           delimiter=',')
                    else:
                        raise ex
                return table
            else:
                warnings.warn("Query returned no results", NoResultsWarning)
Exemple #35
0
def _get_timeref(hdu):
    event_hdr = hdu.header

    timeref = event_hdr['TIMEREF']
    log.debug("TIMEREF {0}".format(timeref))
    return timeref
Exemple #36
0
def _get_timesys(hdu):
    event_hdr = hdu.header
    timesys = event_hdr['TIMESYS']
    log.debug("TIMESYS {0}".format(timesys))
    return timesys
Exemple #37
0
def ammonia(xarr,
            trot=20,
            tex=None,
            ntot=14,
            width=1,
            xoff_v=0.0,
            fortho=0.0,
            tau=None,
            fillingfraction=None,
            return_tau=False,
            background_tb=TCMB,
            verbose=False,
            return_components=False,
            debug=False,
            line_names=line_names):
    """
    Generate a model Ammonia spectrum based on input temperatures, column, and
    gaussian parameters

    Parameters
    ----------
    xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
        Array of wavelength/frequency values
    trot: float
        The rotational temperature of the lines.  This is the excitation
        temperature that governs the relative populations of the rotational
        states.
    tex: float or None
        Excitation temperature. Assumed LTE if unspecified (``None``) or if
        tex>trot.  This is the excitation temperature for *all* of the modeled
        lines, which means we are explicitly assuming T_ex is the same for all
        lines.
    ntot: float
        Total log column density of NH3.  Can be specified as a float in the
        range 5-25
    width: float
        Line width in km/s
    xoff_v: float
        Line offset in km/s
    fortho: float
        Fraction of NH3 molecules in ortho state.  Default assumes all para
        (fortho=0).
    tau: None or float
        If tau (optical depth in the 1-1 line) is specified, ntot is NOT fit
        but is set to a fixed value.  The optical depths of the other lines are
        fixed relative to tau_oneone
    fillingfraction: None or float
        fillingfraction is an arbitrary scaling factor to apply to the model
    return_tau: bool
        Return a dictionary of the optical depths in each line instead of a
        synthetic spectrum
    return_components: bool
        Return a list of arrays, one for each hyperfine component, instead of
        just one array
    background_tb : float
        The background brightness temperature.  Defaults to TCMB.
    verbose: bool
        More messages
    debug: bool
        For debugging.

    Returns
    -------
    spectrum: `numpy.ndarray`
        Synthetic spectrum with same shape as ``xarr``
    component_list: list
        List of `numpy.ndarray`'s, one for each hyperfine component
    tau_dict: dict
        Dictionary of optical depth values for the various lines
        (if ``return_tau`` is set)
    """

    from .ammonia_constants import (ckms, ccms, h, kb, Jortho, Jpara, Brot,
                                    Crot)

    # Convert X-units to frequency in GHz
    if xarr.unit.to_string() != 'GHz':
        xarr = xarr.as_unit('GHz')

    if tex is None:
        log.warning("Assuming tex=trot")
        tex = trot
    elif isinstance(tex, dict):
        for k in tex:
            assert k in line_names, "{0} not in line list".format(k)
        line_names = tex.keys()

    from .ammonia_constants import line_name_indices, line_names as original_line_names

    # recreate line_names keeping only lines with a specified tex
    # using this loop instead of tex.keys() preserves the order & data type
    line_names = [k for k in original_line_names if k in line_names]

    if 5 <= ntot <= 25:
        # allow ntot to be specified as a logarithm.  This is
        # safe because ntot < 1e10 gives a spectrum of all zeros, and the
        # plausible range of columns is not outside the specified range
        lin_ntot = 10**ntot
    else:
        raise ValueError("ntot, the logarithmic total column density,"
                         " must be in the range 5 - 25")

    tau_dict = {}
    """
    Column density is the free parameter.  It is used in conjunction with
    the full partition function to compute the optical depth in each band
    """
    Zpara = (2 * Jpara + 1) * np.exp(-h * (Brot * Jpara * (Jpara + 1) +
                                           (Crot - Brot) * Jpara**2) /
                                     (kb * trot))
    Zortho = 2 * (2 * Jortho + 1) * np.exp(-h * (Brot * Jortho * (Jortho + 1) +
                                                 (Crot - Brot) * Jortho**2) /
                                           (kb * trot))
    Qpara = Zpara.sum()
    Qortho = Zortho.sum()

    log.debug("Partition Function: Q_ortho={0}, Q_para={1}".format(
        Qortho, Qpara))

    for linename in line_names:
        if ortho_dict[linename]:
            # define variable "ortho_or_para_frac" that will be the ortho
            # fraction in the case of an ortho transition or the para
            # fraction for a para transition
            ortho_or_parafrac = fortho
            Z = Zortho
            Qtot = Qortho
        else:
            ortho_or_parafrac = 1.0 - fortho
            Z = Zpara
            Qtot = Qpara

        # for a complete discussion of these equations, please see
        # https://github.com/keflavich/pyspeckit/blob/ammonia_equations/examples/AmmoniaLevelPopulation.ipynb
        # and
        # http://low-sky.github.io/ammoniacolumn/
        # and
        # https://github.com/pyspeckit/pyspeckit/pull/136

        # short variable names for readability
        frq = freq_dict[linename]
        partition = Z[line_name_indices[linename]]
        aval = aval_dict[linename]

        # Total population of the higher energy inversion transition
        population_rotstate = lin_ntot * ortho_or_parafrac * partition / Qtot

        if isinstance(tex, dict):
            expterm = ((1 - np.exp(-h * frq / (kb * tex[linename]))) /
                       (1 + np.exp(-h * frq / (kb * tex[linename]))))
        else:
            expterm = ((1 - np.exp(-h * frq / (kb * tex))) /
                       (1 + np.exp(-h * frq / (kb * tex))))
        fracterm = (ccms**2 * aval / (8 * np.pi * frq**2))
        widthterm = (ckms / (width * frq * (2 * np.pi)**0.5))

        tau_i = population_rotstate * fracterm * expterm * widthterm
        tau_dict[linename] = tau_i

        log.debug("Line {0}: tau={1}, expterm={2}, pop={3},"
                  " partition={4}".format(linename, tau_i, expterm,
                                          population_rotstate, partition))

    # allow tau(11) to be specified instead of ntot
    # in the thin case, this is not needed: ntot plays no role
    # this process allows you to specify tau without using the approximate equations specified
    # above.  It should remove ntot from the calculations anyway...
    if tau is not None:
        tau11_temp = tau_dict['oneone']
        # re-scale all optical depths so that tau is as specified, but the relative taus
        # are sest by the kinetic temperature and partition functions
        for linename, t in iteritems(tau_dict):
            tau_dict[linename] = t * tau / tau11_temp

    if return_tau:
        return tau_dict

    model_spectrum = _ammonia_spectrum(xarr,
                                       tex,
                                       tau_dict,
                                       width,
                                       xoff_v,
                                       fortho,
                                       line_names,
                                       background_tb=background_tb,
                                       fillingfraction=fillingfraction,
                                       return_components=return_components)

    if model_spectrum.min() < 0 and background_tb == TCMB:
        raise ValueError("Model dropped below zero.  That is not possible "
                         " normally.  Here are the input values: " +
                         ("tex: {0} ".format(tex)) + ("trot: %f " % trot) +
                         ("ntot: %f " % ntot) + ("width: %f " % width) +
                         ("xoff_v: %f " % xoff_v) + ("fortho: %f " % fortho))

    if verbose or debug:
        log.info("trot: %g  tex: %s  ntot: %g  width: %g  xoff_v: %g  "
                 "fortho: %g  fillingfraction: %g" %
                 (trot, tex, ntot, width, xoff_v, fortho, fillingfraction))

    return model_spectrum
    def retrieve_data(self,
                      datasets,
                      continuation=False,
                      destination=None,
                      with_calib='none'):
        """
        Retrieve a list of datasets form the ESO archive.

        Parameters
        ----------
        datasets : list of strings or string
            List of datasets strings to retrieve from the archive.
        destination: string
            Directory where the files are copied.
            Files already found in the destination directory are skipped,
            unless continuation=True.
            Default to astropy cache.
        continuation : bool
            Force the retrieval of data that are present in the destination
            directory.
        with_calib : string
            Retrieve associated calibration files: 'none' (default), 'raw' for
            raw calibrations, or 'processed' for processed calibrations.

        Returns
        -------
        files : list of strings or string
            List of files that have been locally downloaded from the archive.

        Examples
        --------
        >>> dptbl = Eso.query_instrument('apex', pi_coi='ginsburg')
        >>> dpids = [row['DP.ID'] for row in dptbl if 'Map' in row['Object']]
        >>> files = Eso.retrieve_data(dpids)

        """
        calib_options = {
            'none': '',
            'raw': 'CalSelectorRaw2Raw',
            'processed': 'CalSelectorRaw2Master'
        }

        if with_calib not in calib_options:
            raise ValueError("invalid value for 'with_calib', "
                             "it must be 'none', 'raw' or 'processed'")

        if isinstance(datasets, six.string_types):
            return_list = False
            datasets = [datasets]
        else:
            return_list = True
        if not isinstance(datasets, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        # First: Detect datasets already downloaded
        log.info("Detecting already downloaded datasets...")
        datasets_to_download, files = self._check_existing_files(
            datasets, continuation=continuation, destination=destination)

        # Second: Check that the datasets to download are in the archive
        log.info("Checking availability of datasets to download...")
        valid_datasets = [
            self.verify_data_exists(ds) for ds in datasets_to_download
        ]
        if not all(valid_datasets):
            invalid_datasets = [
                ds for ds, v in zip(datasets_to_download, valid_datasets)
                if not v
            ]
            raise ValueError("The following data sets were not found on the "
                             "ESO servers: {0}".format(invalid_datasets))

        # Third: Download the other datasets
        log.info("Downloading datasets...")
        if datasets_to_download:
            if not self.authenticated():
                self.login()
            url = "http://archive.eso.org/cms/eso-data/eso-data-direct-retrieval.html"
            with suspend_cache(self):  # Never cache staging operations
                log.info("Contacting retrieval server...")
                retrieve_data_form = self._request("GET", url, cache=False)
                retrieve_data_form.raise_for_status()
                log.info("Staging request...")
                inputs = {"list_of_datasets": "\n".join(datasets_to_download)}
                data_confirmation_form = self._activate_form(
                    retrieve_data_form,
                    form_index=-1,
                    inputs=inputs,
                    cache=False)

                data_confirmation_form.raise_for_status()

                root = BeautifulSoup(data_confirmation_form.content,
                                     'html5lib')
                login_button = root.select('input[value=LOGIN]')
                if login_button:
                    raise LoginError("Not logged in. "
                                     "You must be logged in to download data.")
                inputs = {}
                if with_calib != 'none':
                    inputs['requestCommand'] = calib_options[with_calib]

                # TODO: There may be another screen for Not Authorized; that
                # should be included too
                # form name is "retrieve"; no id
                data_download_form = self._activate_form(
                    data_confirmation_form,
                    form_index=-1,
                    inputs=inputs,
                    cache=False)
                log.info("Staging form is at {0}".format(
                    data_download_form.url))
                root = BeautifulSoup(data_download_form.content, 'html5lib')
                state = root.select('span[id=requestState]')[0].text
                t0 = time.time()
                while state not in ('COMPLETE', 'ERROR'):
                    time.sleep(2.0)
                    data_download_form = self._request("GET",
                                                       data_download_form.url,
                                                       cache=False)
                    root = BeautifulSoup(data_download_form.content,
                                         'html5lib')
                    state = root.select('span[id=requestState]')[0].text
                    print("{0:20.0f}s elapsed".format(time.time() - t0),
                          end='\r')
                    sys.stdout.flush()
                if state == 'ERROR':
                    raise RemoteServiceError("There was a remote service "
                                             "error; perhaps the requested "
                                             "file could not be found?")

            if with_calib != 'none':
                # when requested files with calibrations, some javascript is
                # used to display the files, which prevent retrieving the files
                # directly. So instead we retrieve the download script provided
                # in the web page, and use it to extract the list of files.
                # The benefit of this is also that in the download script the
                # list of files is de-duplicated, whereas on the web page the
                # calibration files would be duplicated for each exposure.
                link = root.select('a[href$=/script]')[0]
                if 'downloadRequest' not in link.text:
                    # Make sure that we found the correct link
                    raise RemoteServiceError(
                        "A link was found in the download file for the "
                        "calibrations that is not a downloadRequest link "
                        "and therefore appears invalid.")

                href = link.attrs['href']
                script = self._request("GET", href, cache=False)
                fileLinks = re.findall(
                    r'"(https://dataportal.eso.org/dataPortal/api/requests/.*)"',
                    script.text)

                # urls with api/ require using Basic Authentication, though
                # it's easier for us to reuse the existing requests session (to
                # avoid asking agin for a username/password if it is not
                # stored). So we remove api/ from the urls:
                fileLinks = [
                    f.replace(
                        'https://dataportal.eso.org/dataPortal/api/requests',
                        'https://dataportal.eso.org/dataPortal/requests')
                    for f in fileLinks
                ]

                log.info("Detecting already downloaded datasets, "
                         "including calibrations...")
                fileIds = [f.rsplit('/', maxsplit=1)[1] for f in fileLinks]
                filteredIds, files = self._check_existing_files(
                    fileIds,
                    continuation=continuation,
                    destination=destination)

                fileLinks = [
                    f for f, fileId in zip(fileLinks, fileIds)
                    if fileId in filteredIds
                ]
            else:
                fileIds = root.select('input[name=fileId]')
                fileLinks = [
                    "http://dataportal.eso.org/dataPortal" +
                    fileId.attrs['value'].split()[1] for fileId in fileIds
                ]

            log.debug("Files:\n{}".format('\n'.join(fileLinks)))
            for fileLink in fileLinks:
                fileId = fileLink.rsplit('/', maxsplit=1)[1]
                log.info("Downloading file {0}...".format(fileId))
                filename = self._request("GET",
                                         fileLink,
                                         save=True,
                                         continuation=True)

                if filename.endswith(('.gz', '.7z', '.bz2', '.xz')):
                    log.info("Unzipping file {0}...".format(fileId))
                    filename = system_tools.gunzip(filename)

                if destination is not None:
                    log.info("Copying file {0} to {1}...".format(
                        fileId, destination))
                    destfile = os.path.join(destination,
                                            os.path.basename(filename))
                    shutil.move(filename, destfile)
                    files.append(destfile)
                else:
                    files.append(filename)

        # Empty the redirect cache of this request session
        # Only available and needed for requests versions < 2.17
        try:
            self._session.redirect_cache.clear()
        except AttributeError:
            pass
        log.info("Done!")
        if (not return_list) and (len(files) == 1):
            files = files[0]
        return files
    def query_surveys(self,
                      surveys='',
                      cache=True,
                      help=False,
                      open_form=False,
                      **kwargs):
        """
        Query survey Phase 3 data contained in the ESO archive.

        Parameters
        ----------
        survey : string or list
            Name of the survey(s) to query.  Should beone or more of the
            names returned by `~astroquery.eso.EsoClass.list_surveys`.  If
            specified as a string, should be a comma-separated list of
            survey names.
        cache : bool
            Cache the response for faster subsequent retrieval

        Returns
        -------
        table : `~astropy.table.Table` or `None`
            A table representing the data available in the archive for the
            specified survey, matching the constraints specified in ``kwargs``.
            The number of rows returned is capped by the ROW_LIMIT
            configuration item. `None` is returned when the query has no
            results.

        """

        url = "http://archive.eso.org/wdb/wdb/adp/phase3_main/form"
        if open_form:
            webbrowser.open(url)
        elif help:
            self._print_surveys_help(url, cache=cache)
        else:
            survey_form = self._request("GET", url, cache=cache)
            query_dict = kwargs
            query_dict["wdbo"] = "csv/download"
            if isinstance(surveys, six.string_types):
                surveys = surveys.split(",")
            query_dict['collection_name'] = surveys
            if self.ROW_LIMIT >= 0:
                query_dict["max_rows_returned"] = int(self.ROW_LIMIT)
            else:
                query_dict["max_rows_returned"] = 10000

            survey_response = self._activate_form(survey_form,
                                                  form_index=0,
                                                  form_id='queryform',
                                                  inputs=query_dict,
                                                  cache=cache)

            content = survey_response.content
            # First line is always garbage
            content = content.split(b'\n', 1)[1]
            log.debug("Response content:\n{0}".format(content))
            if _check_response(content):
                table = Table.read(BytesIO(content),
                                   format="ascii.csv",
                                   comment="^#")
                return table
            else:
                warnings.warn("Query returned no results", NoResultsWarning)
Exemple #40
0
    def query_instrument(self,
                         instrument,
                         column_filters={},
                         columns=[],
                         open_form=False,
                         help=False,
                         cache=True,
                         **kwargs):
        """
        Query instrument specific raw data contained in the ESO archive.

        Parameters
        ----------
        instrument : string
            Name of the instrument to query, one of the names returned by
            `~astroquery.eso.EsoClass.list_instruments`.
        column_filters : dict
            Constraints applied to the query.
        columns : list of strings
            Columns returned by the query.
        open_form : bool
            If `True`, opens in your default browser the query form
            for the requested instrument.
        help : bool
            If `True`, prints all the parameters accepted in
            ``column_filters`` and ``columns`` for the requested
            ``instrument``.
        cache : bool
            Cache the response for faster subsequent retrieval.

        Returns
        -------
        table : `~astropy.table.Table`
            A table representing the data available in the archive for the
            specified instrument, matching the constraints specified in
            ``kwargs``. The number of rows returned is capped by the
            ROW_LIMIT configuration item.

        """

        if instrument in ('feros', 'harps', 'grond'):
            url = 'http://archive.eso.org/wdb/wdb/eso/eso_archive_main/form'
        else:
            url = ("http://archive.eso.org/wdb/wdb/eso/{0}/form".format(
                instrument))
        table = None
        if open_form:
            webbrowser.open(url)
        elif help:
            self._print_instrument_help(url, instrument)
        else:
            instrument_form = self._request("GET", url, cache=cache)
            query_dict = {}
            query_dict.update(column_filters)
            # TODO: replace this with individually parsed kwargs
            query_dict.update(kwargs)
            query_dict["wdbo"] = "csv/download"

            # Default to returning the DP.ID since it is needed for header
            # acquisition
            query_dict['tab_dp_id'] = kwargs.pop('tab_dp_id', 'on')

            if instrument in ('feros', 'harps', 'ground'):
                query_dict['instrument'] = instrument.upper()

            for k in columns:
                query_dict["tab_" + k] = True
            if self.ROW_LIMIT >= 0:
                query_dict["max_rows_returned"] = self.ROW_LIMIT
            else:
                query_dict["max_rows_returned"] = 10000
            # used to be form 0, but now there's a new 'logout' form at the top
            # (form_index = -1 and 0 both work now that form_id is included)
            instrument_response = self._activate_form(instrument_form,
                                                      form_index=-1,
                                                      form_id='queryform',
                                                      inputs=query_dict,
                                                      cache=cache)

            content = instrument_response.content
            # First line is always garbage
            content = content.split(b'\n', 1)[1]
            log.debug("Response content:\n{0}".format(content))
            if _check_response(content):
                table = Table.read(BytesIO(content),
                                   format="ascii.csv",
                                   comment='^#')
                return table
            else:
                warnings.warn("Query returned no results", NoResultsWarning)
Exemple #41
0
def fits_key_arithmetic(meta,
                        operand1,
                        operation,
                        operand2,
                        keylist=None,
                        handle_image=None):
    """Apply arithmetic to FITS keywords

    meta : ordered_dict

        FITS header of operand1 *after* processing by other arithmetic
        operations.  Sensible use of this feature requires
        ``handle_meta`` to be set to 'first_found' or callable that
        returns a FITS header  

    operand1 : `NDData`-like instance
        Generally the self of the calling object

    operation : callable
            The operation that is performed on the `NDData`. Supported are
            `numpy.add`, `numpy.subtract`, `numpy.multiply` and
            `numpy.true_divide`.

    operand2 : `NDData`-like instance
        Generally the self of the calling object

    keylist : list

        List of FITS keywords to apply ``operation`` to.  Each keyword
        value stands in the place of ``operand1`` and a new keyword
        value is calculated using the ``operation`` and ``operand2.``
        If ``operand2`` is an image, ``handle_image`` will be called
        to convert it to a scalar or ``None`` (see ``handle_image``)

    handle_image : callable

        Called with arguments of fits_key_arithmetic (minus
        ``handle_image``) when ``operand2`` is an image.  Return value
        of ``None`` signifies application of ``operation`` would
        nullify keywords in ``keylist,`` which are then removed.  If
        transformation of ``operand2`` into a scalar is possible

    """
    if meta is None or keylist is None:
        return meta
    # Get a list of non-None values for our keylist
    kvlist = [
        kv for kv in [(k, meta.get(k)) for k in keylist] if kv[1] is not None
    ]
    if kvlist is None:
        return meta
    dimso2 = sum(list(operand2.shape))
    if dimso2 == 0:
        # Scalar
        o2 = operand2.data
    else:
        if handle_image is None:
            o2 = None
        else:
            o2 = handle_image(meta,
                              operand1,
                              operation,
                              operand2,
                              keylist=keylist)
    for k, v in kvlist:
        if o2 is None:
            del meta[k]
            log.debug(
                f'Cannot express operand2 as single number, deleting {k}')
        else:
            try:
                unit = meta.cards(k).unit
            except:
                unit = operand1.unit

            # Do the calculation with or without units
            if operand1.unit is None and operand2.unit is None:
                v = operation(v, o2)
            elif operand1.unit is None:
                v = operation(v * u.dimensionless_unscaled, o2 * operand2.unit)
            elif operand2.unit is None:
                v = operation(v * operand1.unit, o2 * u.dimensionless_unscaled)
            else:
                v = operation(v * operand1.unit, o2 * operand2.unit)

            try:
                meta[k] = v
            except:
                meta[k] = v.value
    return meta
Exemple #42
0
def make_spw_cube(spw='spw{0}', spwnum=0, fntemplate='OrionSourceI',
                  overwrite_existing=False, bmaj_limits=None,
                  fnsuffix="", filesuffix='image.pbcor.fits',
                  first_endchannel='*',
                  cropends=False,
                  minimize=True,
                  debug_mode=False,
                  check_last_plane=False,
                  add_beam_info=True):
    """
    Parameters
    ----------
    spw : str
        String template for the input/output name
    spwnum : int
        The spectral window number
    fntemplate : str
        Filename template (goes into the glob)
    overwrite_existing : bool
        Overwrite data in the output cube?
    cropends: bool or int
        Number of pixels to crop off the ends of an image
    minimize: bool
        Compute the spatial minimal subcube before building the cube?  Slices
        for all subsequent cubes will be computed from the first cube.
    """
    if debug_mode:
        lvl = log.getEffectiveLevel()
        log.setLevel('DEBUG')

    spw = spw.format(spwnum)

    big_filename = '{1}_{0}{2}_lines.fits'.format(spw, fntemplate, fnsuffix)

    header_fn = glob.glob('OrionSourceI.B7.{0}.lines0-{4}.maskedclarkclean1000.{3}'
                          .format(spw, fntemplate, fnsuffix, filesuffix,
                                  first_endchannel))
    if len(header_fn) != 1:
        raise ValueError("Found too many or too few matches: {0}".format(header_fn))
    else:
        header_fn = header_fn[0]

    # First set up an empty file
    if not os.path.exists(big_filename):
        log.info("Creating large cube based on header {0}".format(header_fn))

        if minimize:
            cube0 = SpectralCube.read(header_fn)
            slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                    spatial_only=True)
            # use the calculated 3rd dimension, plus the difference of the
            # x and y slices
            #header['NAXIS2'] = slices[1].stop-slices[1].start
            #header['NAXIS1'] = slices[2].stop-slices[2].start
            header = cube0[slices].header
        else:
            header = fits.getheader(header_fn)

        # Make an arbitrary, small data before prepping the header
        data = np.zeros((100, 100), dtype=np.float32)
        hdu = fits.PrimaryHDU(data=data, header=header)
        cdelt_sign = np.sign(hdu.header['CDELT3'])
        # Set the appropriate output size (this can be extracted from the LISTOBS)
        naxis3_in = header['NAXIS3']
        header['NAXIS3'] = nchans_total[spwnum]
        header_wcs = wcs.WCS(fits.getheader(header_fn))
        header_specwcs = header_wcs.sub([wcs.WCSSUB_SPECTRAL])
        if cdelt_sign == -1:
            ind0, ind1 = getinds(header_fn)
            #5/20/2017: redoing some of this, and the text below is frightening but no longer relevant
            # a +1 was on the next line before an edit on 4/10/2017
            # it may have been rendered irrelevant when I included +1
            # channel in each cube?  Not clear - the arithmetic no longer
            # makes sense but is empirically necessary.
            assert ind0 == 0

            # these reindex the cube so that it has an increasing cdelt.
            header['CRPIX3'] = 1 #nchans_total[spwnum]
            header['CRVAL3'] = header_specwcs.wcs_pix2world([nchans_total[spwnum]],1)[0][0]
            header['CDELT3'] = np.abs(header_specwcs.wcs.cdelt[0])

            # ensure that the new CRVAL evaluated at its own position matches
            # the CRVAL3.  This should be impossible to fail unless WCS itself
            # fails
            newheaderspecwcs = wcs.WCS(header).sub([wcs.WCSSUB_SPECTRAL])
            crval3 = newheaderspecwcs.wcs_pix2world([header['CRPIX3']], 1)[0][0]
            np.testing.assert_array_almost_equal_nulp(crval3, header['CRVAL3'])


        shape = (header['NAXIS3'], header['NAXIS2'], header['NAXIS1'])



        # Write to disk
        header.tofile(big_filename)
        # Using the 'append' io method, update the *header*
        with open(big_filename, 'rb+') as fobj:
            # Seek past the length of the header, plus the length of the
            # data we want to write.
            # The -1 is to account for the final byte that we are about to
            # write:
            # 'seek' works on bytes, so divide #bits / (bytes/bit)
            fobj.seek(len(header.tostring()) + (shape[0] *
                                                shape[1] *
                                                shape[2] *
                                                int(np.abs(header['BITPIX'])/8)) -
                      1)
            fobj.write(b'\0')

        big_cube = SpectralCube.read(big_filename)
        header_cube = SpectralCube.read(header_fn)
        # in both cases, SpectralCube sorts the extrema
        if cdelt_sign == 1:
            np.testing.assert_array_almost_equal_nulp(big_cube.spectral_extrema[0].value,
                                                      header_cube.spectral_extrema[0].value)
            np.testing.assert_array_almost_equal_nulp(big_cube.wcs.wcs.cdelt,
                                                      header_cube.wcs.wcs.cdelt)
        elif cdelt_sign == -1:
            np.testing.assert_array_almost_equal_nulp(big_cube.spectral_extrema[1].value,
                                                      header_cube.spectral_extrema[1].value)
            np.testing.assert_array_almost_equal_nulp(big_cube.wcs.wcs.cdelt[-1]*-1,
                                                      header_cube.wcs.wcs.cdelt[-1])

        log.info("Cube creation completed.  Now moving on to populating it.")


    # Find the appropriate files (this is NOT a good way to do this!  Better to
    # provide a list.  But wildcards are quick & easy...
    fileglob = "OrionSourceI.B7.{0}.lines*{3}".format(spw, fntemplate, fnsuffix,
                                                  filesuffix)
    files = glob.glob(fileglob)
    log.info("Files to be merged with glob {0}: ".format(fileglob))
    log.info(str(files))

    # open the file in update mode (it should have the right dims now)
    hdul = fits.open(big_filename, mode='update')
    main_wcs = wcs.WCS(hdul[0].header).sub([wcs.WCSSUB_SPECTRAL])

    if add_beam_info:
        shape = hdul[0].data.shape[0]
        if len(hdul) > 1 and isinstance(hdul[1], fits.BinTableHDU):
            pass
        else:
            hdul.append(fits.BinTableHDU(np.recarray(shape,
                                                     names=['BMAJ','BMIN','BPA','CHAN','POL'],
                                                     formats=['f4','f4','f4','i4','i4'])))

    # sorted so that we deal with zero first, since it has potential to be a problem.
    for fn in ProgressBar(sorted(files)):
        log.info("inds={0} fn={1}".format(getinds(fn), fn))
        ind0,ind1 = getinds(fn)

        # this is not correct...?
        # or maybe it only applies if cropends is set....
        # if ind0 == 0:
        #     ind1 = ind1 + 1

        cdelt = fits.getheader(fn)['CDELT3']
        if 'cdelt_sign' not in locals():
            cdelt_sign = np.sign(cdelt)
            log.warn("cdelt_sign was not defined: overwriting a"
                     " previously-existing file.  "
                     "This may not be what you want; the data could be going "
                     "opposite the parent cube.  Check that the original "
                     "header is OK. sign(CDELT) is now {0}, "
                     "while for the big header it is {1}"
                     .format(cdelt_sign,
                             np.sign(fits.getheader(big_filename)['CDELT3'])))

        if cropends:
            # don't crop 1st or last pixel in full cube
            if ind0 > 0:
                log.debug("ind0 going from {0} to {1}".format(ind0,ind0+cropends))
                ind0 = ind0 + cropends
                if cdelt_sign == 1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
                else:
                    dataind1 = -cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
            else:
                if cdelt_sign == 1:
                    dataind0 = 0
                    log.debug("dataind0 going to {0}".format(0))
                elif cdelt_sign == -1:
                    log.debug("dataind1 going to {0}".format(None))
                    dataind1 = None

            if (ind1 < nchans_total[spwnum] - 1):
                log.debug("ind1 going from {0} to {1}".format(ind1,ind1-cropends))
                ind1 = ind1 - cropends
                if cdelt_sign == 1:
                    dataind1 = - cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
                elif cdelt_sign == -1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
            else:
                if cdelt_sign == 1:
                    dataind1 = None
                else:
                    log.debug("dataind0 going to {0}".format(0))
                    dataind0 = 0
        else:
            dataind0 = 0
            dataind1 = None

        if cdelt_sign == -1:
            log.debug("Reversing indices from {0} {1} to ".format(ind0,ind1))
            ind1, ind0 = (nchans_total[spwnum] - ind0,
                          nchans_total[spwnum] - ind1)
            log.debug("{0} {1}".format(ind0, ind1))
            if ind0 < 0:
                ind0 = 0

        log.info("inds have been remapped to {0}, {1}".format(ind0, ind1))


        plane = hdul[0].data[ind0]
        lastplane = hdul[0].data[ind1-1]
        if np.all(plane == 0) or overwrite_existing or (check_last_plane and np.all(lastplane==0)):
            log.info("Replacing indices {0}->{2} {1}"
                     .format(getinds(fn), fn, (ind0,ind1)))

            data = fits.getdata(fn)
            dwcs = wcs.WCS(fits.getheader(fn)).sub([wcs.WCSSUB_SPECTRAL])

            dataind1 = data.shape[0]+(dataind1 or 0)

            # handle the case where I made the indices NOT match the cube...
            # this is really stupid and should be removed because I should have
            # made the input cubes correct.  Oh well.
            if np.abs(ind1 - ind0) < np.abs(dataind1 - dataind0):
                dataind1 = dataind0 + np.abs(ind1-ind0)

            if cdelt_sign == -1:
                dataind0, dataind1 = dataind1, dataind0
                dwcs0 = dwcs.wcs_pix2world([dataind0-1], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1], 0)[0][0]
            else:
                dwcs0 = dwcs.wcs_pix2world([dataind0], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1-1], 0)[0][0]
            hwcs0 = main_wcs.wcs_pix2world([ind0], 0)[0][0]
            hwcs1 = main_wcs.wcs_pix2world([ind1-1], 0)[0][0]
            
            if not np.isclose(hwcs0, dwcs0, atol=0.5*np.abs(cdelt), rtol=0):
                log.error("current data, big cube indices: {0},{1} and {2},{3}"
                          .format(dataind0,dataind1,ind0,ind1))
                raise ValueError("World coordinates of first pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                                 .format(dwcs0,hwcs0,dwcs0-hwcs0,(dwcs0-hwcs0)/cdelt))
            if not np.isclose(hwcs1, dwcs1, atol=0.5*np.abs(cdelt), rtol=0):
                log.error("current data, big cube indices: {0},{1} and {2},{3}"
                          .format(dataind0,dataind1,ind0,ind1))
                raise ValueError("World coordinates of last pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                                 .format(dwcs1,hwcs1,dwcs1-hwcs1,(dwcs1-hwcs1)/cdelt))

            if 'slices' not in locals():
                if minimize:
                    log.info("Determining slices")
                    cube0 = SpectralCube.read(header_fn)
                    slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                            spatial_only=True)
                    log.info("Slices are {0}".format(slices))
                else:
                    slices = (slice(None),)*3


            if bmaj_limits is not None:
                log.info("Identifying acceptable beams")
                beamtable = fits.open(fn)[1]
                ok_beam = ((beamtable.data['BMAJ'] > bmaj_limits[0]) &
                           (beamtable.data['BMAJ'] < bmaj_limits[1]))
                data[~ok_beam] = np.nan
                log.info("Found {0} bad beams of {1}".format((~ok_beam).sum(),
                                                             ok_beam.size))

            if cdelt_sign == -1:
                if dataind1 == 0:
                    dataslice = slice(dataind0-1, None, -1)
                elif dataind1 >= 1:
                    dataslice = slice(dataind0-1, dataind1-1, -1)
                else:
                    raise ValueError("Something is wrong with dataind0")
            else:
                dataslice = slice(dataind0, dataind1, 1)
            log.info("Dataslice is {0}".format(dataslice))

            assert hdul[0].data[ind0:ind1].shape == data[dataslice, slices[1], slices[2]].shape

            if not debug_mode:
                if add_beam_info:
                    log.info("Adding beam information")
                    beamtable = fits.open(fn)[1]
                    hdul[1].data[ind0:ind1] = beamtable.data[dataslice]


                log.info("Inserting data")
                hdul[0].data[ind0:ind1,:,:] = data[dataslice, slices[1], slices[2]]
                log.info("Flushing")
                hdul.flush()
                log.info("Done with iteration for {0}".format(fn))

    if debug_mode:
        log.setLevel(lvl)
Exemple #43
0
    def stage_data(self, uids, expand_tarfiles=False, return_json=False):
        """
        Obtain table of ALMA files

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'
        expand_tarfiles : bool
            Expand the tarfiles to obtain lists of all contained files.  If
            this is specified, the parent tarfile will *not* be included
        return_json : bool
            Return a list of the JSON data sets returned from the query.  This
            is primarily intended as a debug routine, but may be useful if there
            are unusual scheduling block layouts.

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """

        dataarchive_url = self._get_dataarchive_url()

        # allow for the uid to be specified as single entry
        if isinstance(uids, str):
            uids = [uids]

        tables = []
        for uu in uids:
            log.debug("Retrieving metadata for {0}".format(uu))
            uid = clean_uid(uu)
            req = self._request(
                'GET',
                '{dataarchive_url}/rh/data/expand/{uid}'.format(
                    dataarchive_url=dataarchive_url, uid=uid),
                cache=False)
            req.raise_for_status()
            try:
                jdata = req.json()
            # Note this exception does not work in Python 2.7
            except json.JSONDecodeError:
                if 'Central Authentication Service' in req.text or 'recentRequests' in req.url:
                    # this indicates a wrong server is being used;
                    # the "pre-feb2020" stager will be phased out
                    # when the new services are deployed
                    raise RemoteServiceError(
                        "Failed query!  This shouldn't happen - please "
                        "report the issue as it may indicate a change in "
                        "the ALMA servers.")
                else:
                    raise

            if return_json:
                tables.append(jdata)
            else:
                if jdata['type'] != 'PROJECT':
                    log.error(
                        "Skipped uid {uu} because it is not a project and"
                        "lacks the appropriate metadata; it is a "
                        "{jdata}".format(uu=uu, jdata=jdata['type']))
                    continue
                if expand_tarfiles:
                    table = uid_json_to_table(
                        jdata, productlist=['ASDM', 'PIPELINE_PRODUCT'])
                else:
                    table = uid_json_to_table(jdata,
                                              productlist=[
                                                  'ASDM', 'PIPELINE_PRODUCT',
                                                  'PIPELINE_PRODUCT_TARFILE',
                                                  'PIPELINE_AUXILIARY_TARFILE'
                                              ])
                table['sizeInBytes'].unit = u.B
                table.rename_column('sizeInBytes', 'size')
                table.add_column(
                    Column(data=[
                        '{dataarchive_url}/dataPortal/{name}'.format(
                            dataarchive_url=dataarchive_url, name=name)
                        for name in table['name']
                    ],
                           name='URL'))

                isp = self.is_proprietary(uid)
                table.add_column(
                    Column(data=[isp for row in table], name='isProprietary'))

                tables.append(table)
                log.debug("Completed metadata retrieval for {0}".format(uu))

        if len(tables) == 0:
            raise ValueError("No valid UIDs supplied.")

        if return_json:
            return tables

        table = table_vstack(tables)

        return table
Exemple #44
0
def main(data_file, hdf5_key, cache_filename):

    # Read the orbital parameters!
    samples_filename = "{}.h5".format(os.path.splitext(cache_filename)[0])
    _path,_basename = os.path.split(samples_filename)
    if not _path:
        samples_filename = os.path.join(paths.root, "cache", samples_filename)
    logger.debug("Reading orbital parameter samples from '{}'".format(samples_filename))

    name = os.path.splitext(_basename)[0]
    plot_path = os.path.join(paths.plots, name)
    logger.info("Saving plots to: {}".format(plot_path))
    os.makedirs(plot_path, exist_ok=True)

    with h5py.File(samples_filename, 'r') as g:
        jitter = g.attrs['jitter_m/s']*u.m/u.s
        P_min = g.attrs['P_min_day']*u.day
        P_max = g.attrs['P_max_day']*u.day

        # read the orbital parameters
        opars = OrbitalParams.from_hdf5(g)

    # ------------------------------------------------------------------------
    # Read the data
    full_path = os.path.abspath(data_file)
    logger.debug("Reading data from input file at '{}'".format(full_path))
    with h5py.File(full_path, 'r') as f:
        if hdf5_key is not None:
            g = f[hdf5_key]
        else:
            g = f

        bmjd = g['mjd'][:]
        rv = quantity_from_hdf5(g, 'rv')
        rv_err = quantity_from_hdf5(g, 'rv_err')
    data = RVData(bmjd, rv, stddev=rv_err)
    data_jitter = RVData(bmjd, rv, stddev=np.sqrt(rv_err**2 + jitter**2))

    # ------------------------------------------------------------------------
    # plot RV curves
    dmjd = bmjd.max() - bmjd.min()
    t_grid = np.linspace(bmjd.min()-0.1*dmjd, bmjd.max()+0.1*dmjd, 1024)
    fig,ax = plt.subplots(1,1,figsize=(15,5))

    # HACK: where to set the units to plot in?
    rv_unit = u.km/u.s

    # UNDER-plot the data with jitter error-bars
    data_jitter.plot(ax=ax, rv_unit=rv_unit,
                     marker=None, ecolor='#de2d26', alpha=0.5)

    # HACK: where to set the number of lines to plot?
    plot_rv_curves(opars[:512], t_grid, rv_unit=rv_unit,
                   data=data, ax=ax)

    ax.set_title(name)
    fig.tight_layout()
    fig.savefig(os.path.join(plot_path, 'rv-curves.png'), dpi=300)

    # ------------------------------------------------------------------------
    # Corner plot!
    fig = plot_corner(opars, alpha=0.1) # TODO: kwargs??
    fig.suptitle(name, fontsize=26)
    fig.savefig(os.path.join(plot_path, 'corner.png'), dpi=300)
    def __init__(self, tbackground=2.73, gridsize=[250., 101., 100.]):
        t0 = time.time()
        self.texgrid303 = texgrid303 = fits.getdata(
            gpath('fjdu_pH2CO_303_tex_5kms.fits'))
        self.taugrid303 = taugrid303 = fits.getdata(
            gpath('fjdu_pH2CO_303_tau_5kms.fits'))
        self.texgrid321 = texgrid321 = fits.getdata(
            gpath('fjdu_pH2CO_321_tex_5kms.fits'))
        self.taugrid321 = taugrid321 = fits.getdata(
            gpath('fjdu_pH2CO_321_tau_5kms.fits'))
        self.texgrid322 = texgrid322 = fits.getdata(
            gpath('fjdu_pH2CO_322_tex_5kms.fits'))
        self.taugrid322 = taugrid322 = fits.getdata(
            gpath('fjdu_pH2CO_322_tau_5kms.fits'))
        self.texgrid404 = texgrid404 = fits.getdata(
            gpath('fjdu_pH2CO_404_tex_5kms.fits'))
        self.taugrid404 = taugrid404 = fits.getdata(
            gpath('fjdu_pH2CO_404_tau_5kms.fits'))
        self.texgrid422 = texgrid422 = fits.getdata(
            gpath('fjdu_pH2CO_422_tex_5kms.fits'))
        self.taugrid422 = taugrid422 = fits.getdata(
            gpath('fjdu_pH2CO_422_tau_5kms.fits'))
        self.texgrid423 = texgrid423 = fits.getdata(
            gpath('fjdu_pH2CO_423_tex_5kms.fits'))
        self.taugrid423 = taugrid423 = fits.getdata(
            gpath('fjdu_pH2CO_423_tau_5kms.fits'))
        self.hdr = hdr = hdrb = fits.getheader(
            gpath('fjdu_pH2CO_303_tex_5kms.fits'))

        t1 = time.time()
        log.debug("Loading grids took {0:0.1f} seconds".format(t1 - t0))

        self.Tbackground = tbackground
        self.tline303a = ((1.0 - np.exp(-np.array(self.taugrid303))) *
                          (self.texgrid303 - self.Tbackground))
        self.tline321a = ((1.0 - np.exp(-np.array(self.taugrid321))) *
                          (self.texgrid321 - self.Tbackground))
        self.tline322a = ((1.0 - np.exp(-np.array(self.taugrid322))) *
                          (self.texgrid322 - self.Tbackground))
        self.tline404a = ((1.0 - np.exp(-np.array(self.taugrid404))) *
                          (self.texgrid404 - self.Tbackground))
        self.tline423a = ((1.0 - np.exp(-np.array(self.taugrid423))) *
                          (self.texgrid423 - self.Tbackground))
        self.tline422a = ((1.0 - np.exp(-np.array(self.taugrid422))) *
                          (self.texgrid422 - self.Tbackground))

        zinds, yinds, xinds = np.indices(self.tline303a.shape)
        upsample_factor = np.array(
            [
                gridsize[0] / self.tline303a.shape[0],  # temperature
                gridsize[1] / self.tline303a.shape[1],  # density
                gridsize[2] / self.tline303a.shape[2]
            ],  # column
            dtype='float')
        uzinds, uyinds, uxinds = upsinds = np.indices(
            [x * us for x, us in zip(self.tline303a.shape, upsample_factor)],
            dtype='float')
        self.tline303 = map_coordinates(self.tline303a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')
        self.tline321 = map_coordinates(self.tline321a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')
        self.tline322 = map_coordinates(self.tline322a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')
        self.tline404 = map_coordinates(self.tline404a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')
        self.tline422 = map_coordinates(self.tline422a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')
        self.tline423 = map_coordinates(self.tline423a,
                                        upsinds /
                                        upsample_factor[:, None, None, None],
                                        mode='nearest')

        self.tline = {
            303: self.tline303,
            321: self.tline321,
            322: self.tline322,
            422: self.tline422,
            423: self.tline423,
            404: self.tline404,
        }

        assert self.hdr['CTYPE2'].strip() == 'LOG-DENS'
        assert self.hdr['CTYPE1'].strip() == 'LOG-COLU'

        self.columnarr = (
            (uxinds + self.hdr['CRPIX1'] - 1) * self.hdr['CDELT1'] /
            float(upsample_factor[2]) + self.hdr['CRVAL1'])  # log column
        self.densityarr = (
            (uyinds + self.hdr['CRPIX2'] - 1) * self.hdr['CDELT2'] /
            float(upsample_factor[1]) + self.hdr['CRVAL2'])  # log density
        self.temparr = (
            (uzinds + self.hdr['CRPIX3'] - 1) * self.hdr['CDELT3'] /
            float(upsample_factor[0]) + self.hdr['CRVAL3'])  # lin temperature
        self.drange = [self.densityarr.min(), self.densityarr.max()]
        self.crange = [self.columnarr.min(), self.columnarr.max()]
        self.trange = [self.temparr.min(), self.temparr.max()]
        self.darr = self.densityarr[0, :, 0]
        self.carr = self.columnarr[0, 0, :]
        self.tarr = self.temparr[:, 0, 0]
        self.axes = {'dens': self.darr, 'col': self.carr, 'tem': self.tarr}
        self.labels = {
            'dens': 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]',
            'col': 'p-H$_2$CO [log cm$^{-2}$/(km s$^{-1}$ pc)]',
            'tem': 'Temperature (K)'
        }

        # While the individual lines are subject to filling factor uncertainties, the
        # ratio is not.
        self.modelratio1 = self.tline321 / self.tline303
        self.modelratio2 = self.tline322 / self.tline321
        self.modelratio_423_404 = self.tline423 / self.tline404
        self.modelratio_422_404 = self.tline422 / self.tline404
        self.modelratio_404_303 = self.tline404 / self.tline303

        self.model_logabundance = np.log10(10**self.columnarr / u.pc.to(u.cm) /
                                           10**self.densityarr)

        t2 = time.time()
        log.debug("Grid initialization took {0:0.1f} seconds total,"
                  " {1:0.1f} since loading grids.".format(t2 - t0, t2 - t1))
Exemple #46
0
    def download_files(self,
                       files,
                       savedir=None,
                       cache=True,
                       continuation=True,
                       skip_unauthorized=True):
        """
        Given a list of file URLs, download them

        Note: Given a list with repeated URLs, each will only be downloaded
        once, so the return may have a different length than the input list

        Parameters
        ----------
        files : list
            List of URLs to download
        savedir : None or str
            The directory to save to.  Default is the cache location.
        cache : bool
            Cache the download?
        continuation : bool
            Attempt to continue where the download left off (if it was broken)
        skip_unauthorized : bool
            If you receive "unauthorized" responses for some of the download
            requests, skip over them.  If this is False, an exception will be
            raised.
        """

        if self.USERNAME:
            auth = self._get_auth_info(self.USERNAME)
        else:
            auth = None

        downloaded_files = []
        if savedir is None:
            savedir = self.cache_location
        for fileLink in unique(files):
            try:
                log.debug("Downloading {0} to {1}".format(fileLink, savedir))
                check_filename = self._request('HEAD',
                                               fileLink,
                                               auth=auth,
                                               stream=True)
                check_filename.raise_for_status()
                if 'text/html' in check_filename.headers['Content-Type']:
                    raise ValueError("Bad query.  This can happen if you "
                                     "attempt to download proprietary "
                                     "data when not logged in")

                filename = self._request("GET",
                                         fileLink,
                                         save=True,
                                         savedir=savedir,
                                         timeout=self.TIMEOUT,
                                         cache=cache,
                                         auth=auth,
                                         continuation=continuation)
                downloaded_files.append(filename)
            except requests.HTTPError as ex:
                if ex.response.status_code == 401:
                    if skip_unauthorized:
                        log.info("Access denied to {url}.  Skipping to"
                                 " next file".format(url=fileLink))
                        continue
                    else:
                        raise (ex)
                elif ex.response.status_code == 403:
                    log.error("Access denied to {url}".format(url=fileLink))
                    if 'dataPortal' in fileLink and 'sso' not in fileLink:
                        log.error("The URL may be incorrect.  Try using "
                                  "{0} instead of {1}".format(
                                      fileLink.replace('dataPortal/',
                                                       'dataPortal/sso/'),
                                      fileLink))
                    raise ex
                elif ex.response.status_code == 500:
                    # empirically, this works the second time most of the time...
                    filename = self._request("GET",
                                             fileLink,
                                             save=True,
                                             savedir=savedir,
                                             timeout=self.TIMEOUT,
                                             cache=cache,
                                             auth=auth,
                                             continuation=continuation)
                    downloaded_files.append(filename)
                else:
                    raise ex
        return downloaded_files
Exemple #47
0
        raise ValueError("IERS B data requested for future MJD {}".format(mjd))
    might_be_old = is_url_in_cache(IERS_B_URL)
    iers_b = IERS_B.open(download_file(IERS_B_URL, cache=True))
    if might_be_old and iers_b[-1]["MJD"].to_value(u.d) < mjd:
        # Try wiping the download and re-downloading
        clear_download_cache(IERS_B_URL)
        iers_b = IERS_B.open(download_file(IERS_B_URL, cache=True))
    if iers_b[-1]["MJD"].to_value(u.d) < mjd:
        raise ValueError(
            "IERS B data not yet available for MJD {}".format(mjd))
    return iers_b


# On import, make sure the IERS and leap seconds tables are updated.
log.debug(
    "Running get_iers_up_to_date() to update IERS B table, and checking for updated leap seconds."
)
get_iers_up_to_date()
astropy.time.update_leap_seconds()

SECS_PER_DAY = erfa.DAYSEC
# Earth rotation rate in radians per UT1 second
#
# This is from Capitaine, Guinot, McCarthy, 2000 and is
# in IAU Resolution B1.8 on the Earth Rotation Angle (ERA)
# and the relation of it to UT1.  The number 1.00273781191135448
# below is a defining constant.  See here:
# http://iau-c31.nict.go.jp/pdf/2009_IAUGA_JD6/JD06_capitaine_wallace.pdf
OM = 1.00273781191135448 * 2.0 * np.pi / SECS_PER_DAY
# arcsec to radians
asec2rad = 4.84813681109536e-06
Exemple #48
0
while i<len(gti_t0):
    span = (gti_t1[i]-t0)*86400
    gap = (gti_t0[i]-t1)*86400
    #log.info("     {0}  {1:.6f} {2:.6f} span {3} gap {4}".format(i,gti_t0[i],gti_t1[i],span,gap))
    if span<args.maxspan and gap<args.maxgap:
        # If still within MAX, just move end time to end of this GTI
        t1 = gti_t1[i]
        exp += gti_dt[i]
        i+=1
    else:
        span = (t1-t0)*86400
        #log.info("DEBUG: {0} {1} {2} {3}".format(exp,args.minexp,span,args.minspan))
        if (exp>args.minexp) and (span>args.minspan):
            # This GTI pushes us beyond MAX, so write chunk and start new
            log.info("Writing chunk {0} - {1} (span {2}, exp {3})".format(t0,t1,(t1-t0)*86400,exp))
            log.debug("METs {} - {}, MJDREF {}".format(mjd2met(t0),mjd2met(t1),MJDREF))
            write_chunk(args.eventname,mjd2met(t0),mjd2met(t1),args.outbase,fileidx)
            fileidx += 1
        else:
            log.info("DISCARDING chunk {0} - {1} (span {2}, exp {3})".format(t0,t1,(t1-t0)*86400,exp))
        t0 = gti_t0[i]
        t1 = gti_t0[i]
        exp = 0.0
# If there is one segment left, do it.
if (exp>args.minexp) and (span>args.minspan):
    log.info("Writing final chunk {0} - {1} (span {2}, exp {3})".format(t0,t1,(t1-t0)*86400,exp))
    write_chunk(args.eventname,mjd2met(t0),mjd2met(t1),args.outbase,fileidx)
    fileidx += 1
else:
    log.info("DISCARDING final chunk {0} - {1} (span {2}, exp {3})".format(t0,t1,(t1-t0)*86400,exp))
Exemple #49
0
    def stage_data(self, uids):
        """
        Stage ALMA data

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """
        """
        With log.set_level(10)
        INFO: Staging files... [astroquery.alma.core]
        DEBUG: First request URL: https://almascience.eso.org/rh/submission [astroquery.alma.core]
        DEBUG: First request payload: {'dataset': [u'ALMA+uid___A002_X3b3400_X90f']} [astroquery.alma.core]
        DEBUG: First response URL: https://almascience.eso.org/rh/checkAuthenticationStatus/3f98de33-197e-4692-9afa-496842032ea9/submission [astroquery.alma.core]
        DEBUG: Request ID: 3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        DEBUG: Submission URL: https://almascience.eso.org/rh/submission/3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        .DEBUG: Data list URL: https://almascience.eso.org/rh/requests/anonymous/786823226 [astroquery.alma.core]
        """

        if isinstance(uids, six.string_types):
            uids = [uids]
        if not isinstance(uids, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        log.info("Staging files...")

        self._get_dataarchive_url()

        url = urljoin(self.dataarchive_url, 'rh/submission')
        log.debug("First request URL: {0}".format(url))
        #'ALMA+uid___A002_X391d0b_X7b'
        #payload = [('dataset','ALMA+'+clean_uid(uid)) for uid in uids]
        payload = {'dataset': ['ALMA+' + clean_uid(uid) for uid in uids]}
        log.debug("First request payload: {0}".format(payload))

        self._staging_log = {'first_post_url': url}

        # Request staging for the UIDs
        # This component cannot be cached, since the returned data can change
        # if new data are uploaded
        response = self._request('POST',
                                 url,
                                 data=payload,
                                 timeout=self.TIMEOUT,
                                 cache=False)
        self._staging_log['initial_response'] = response
        log.debug("First response URL: {0}".format(response.url))
        response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            time.sleep(1)
            # CANNOT cache this stage: it not a real data page!  results in
            # infinite loops
            response = self._request('POST',
                                     url,
                                     data=payload,
                                     timeout=self.TIMEOUT,
                                     cache=False)
            self._staging_log['initial_response'] = response
            if 'j_spring_cas_security_check' in response.url:
                log.warn("Staging request was not successful.  Try again?")
            response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            raise RemoteServiceError("Could not access data.  This error "
                                     "can arise if the data are private and "
                                     "you do not have access rights or are "
                                     "not logged in.")

        request_id = response.url.split("/")[-2]
        assert len(request_id) == 36
        self._staging_log['request_id'] = request_id
        log.debug("Request ID: {0}".format(request_id))

        # Submit a request for the specific request ID identified above
        submission_url = urljoin(self.dataarchive_url,
                                 os.path.join('rh/submission', request_id))
        log.debug("Submission URL: {0}".format(submission_url))
        self._staging_log['submission_url'] = submission_url
        staging_submission = self._request('GET', submission_url, cache=True)
        self._staging_log['staging_submission'] = staging_submission
        staging_submission.raise_for_status()

        data_page_url = staging_submission.url
        self._staging_log['data_page_url'] = data_page_url
        dpid = data_page_url.split("/")[-1]
        assert len(dpid) == 9
        self._staging_log['staging_page_id'] = dpid

        # CANNOT cache this step: please_wait will happen infinitely
        data_page = self._request('GET', data_page_url, cache=False)
        self._staging_log['data_page'] = data_page
        data_page.raise_for_status()

        has_completed = False
        while not has_completed:
            time.sleep(1)
            summary = self._request('GET',
                                    os.path.join(data_page_url, 'summary'),
                                    cache=False)
            summary.raise_for_status()
            print(".", end='')
            sys.stdout.flush()
            has_completed = summary.json()['complete']

        self._staging_log['summary'] = summary
        summary.raise_for_status()
        self._staging_log['json_data'] = json_data = summary.json()

        username = self._username if hasattr(self,
                                             '_username') else 'anonymous'

        # templates:
        # https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
        # 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
        # uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar

        url_decomposed = urlparse(data_page_url)
        base_url = ('{uri.scheme}://{uri.netloc}/'
                    'dataPortal/requests/{username}/'
                    '{staging_page_id}/ALMA'.format(
                        uri=url_decomposed,
                        staging_page_id=dpid,
                        username=username,
                    ))
        tbl = self._json_summary_to_table(json_data, base_url=base_url)

        # staging_root = BeautifulSoup(data_page.content)
        # downloadFileURL = staging_root.find('form').attrs['action']
        # data_list_url = os.path.split(downloadFileURL)[0]

        # # Old version, unreliable: data_list_url = staging_submission.url
        # log.debug("Data list URL: {0}".format(data_list_url))
        # self._staging_log['data_list_url'] = data_list_url

        # time.sleep(1)
        # data_list_page = self._request('GET', data_list_url, cache=True)
        # self._staging_log['data_list_page'] = data_list_page
        # data_list_page.raise_for_status()

        # if 'Error' in data_list_page.text:
        #     errormessage = staging_root.find('div', id='errorContent').string.strip()
        #     raise RemoteServiceError(errormessage)

        # tbl = self._parse_staging_request_page(data_list_page)

        return tbl
Exemple #50
0
 def join_catalog(self, catalog, r_tol, bandpass, no_new=False):
     """Join a specific observational catalog to the Star table.
     
     Parameters
     ----------
     catalog : 
         The :class:`starplex.database.models.Catalog` to join to the
         `Star` table.
     r_tol : float
         Join search radius in arcseconds.
     bandpass : float
         The :class:`starplex.database.models.Bandpass` to use to sort
         the stars being added by brightness.
     no_new : bool
         If ``True`` then stars from this catalog will be matched to stars
         in the ``Star`` table, but unmatched stars will not create new
         entries. This option can be useful for matching observed star
         catalogs to a reference catalog. Default is ``False``.
     """
     r_tol_m = degree_to_meter(r_tol / 3600.)
     matched_count = 0
     new_count = 0
     # Query catalog stars for this catalog, ordering brightnest to
     # faintest; that are not in the Star table already
     cstar_query = self._s.query(CatalogStar).\
         join(Observation, Observation.catalogstar_id == CatalogStar.id).\
         filter(CatalogStar.catalog == catalog).\
         filter(CatalogStar.star == None).\
         order_by(Observation.mag.desc())
     log.debug("cstar_query.count {0:d}".format(cstar_query.count()))
     for i, cstar in enumerate(cstar_query):
         # FIXME returns too many stars
         q = self._s.query(Star).\
                 filter(cstar.coord.ST_DWithin(
                     Star.coord, r_tol_m, False)).\
                 order_by(cstar.coord.ST_Distance(Star.coord))
         _ingested = False
         if i % 100 == 0:
             log.debug("{0:d}, {1:d}".format(i, q.count()))
         if q.count() > 0:
             for matched_star in q:
                 # Check this star is not already included in this catalog
                 # FIXME this query seems wrong?
                 star_catalogs_q = self._s.query(Catalog.id).\
                         join(CatalogStar,
                                 CatalogStar.catalog_id == Catalog.id).\
                         join(Star,
                                 Star.id == CatalogStar.star_id).\
                         filter(matched_star.id == Star.id)
                 # unpack to a list of Catalog ids
                 catalog_ids = [x[0] for x in star_catalogs_q.all()]
                 if i % 100 == 0:
                     log.debug("\t{}".format(str(matched_star)))
                     log.debug("\tcount of member catalogs: {0:d}".
                             format(star_catalogs_q.count()))
                     log.debug("\t{}".format(str(catalog_ids)))
                 if cstar.catalog_id not in catalog_ids:
                     # This star can be joined
                     self._add_to_star(cstar, matched_star)
                     matched_count += 1
                     _ingested = True
                     break
         if _ingested == False and not no_new:
             # No match; add this star directly
             self._add_new_star(cstar)
             new_count += 1
         if i % 100 == 0:
             log.debug("\tmatched %i new %i" % (matched_count, new_count))
Exemple #51
0
    def from_config(cls, config):
        """ Construct a StreamModel from a configuration dictionary.
            Typically comes from a YAML file via `streams.io.read_config`.

            Parameters
            ----------
            config : dict
        """

        if not isinstance(config, dict):
            from ..io import read_config
            config = read_config(config)

        # Set the log level based on config file - default is debug
        log_level = config.get('log_level', "DEBUG")
        logger.setLevel(getattr(logging, log_level.upper()))

        # Use a seed for random number generators
        seed = config.get('seed', np.random.randint(100000))
        logger.debug("Using seed: {}".format(seed))
        np.random.seed(seed)
        random.seed(seed)

        # Read star data from specified file
        star_file = config.get('star_data')
        if not os.path.exists(star_file):
            star_file = os.path.join(config['streams_path'], star_file)
        star_data = np.genfromtxt(star_file, names=True)

        prog_file = config.get('progenitor_data')
        if not os.path.exists(prog_file):
            prog_file = os.path.join(config['streams_path'], prog_file)
        prog_data = np.genfromtxt(prog_file, names=True)

        # If limiting the number of stars
        # TODO: allow selecting on some expression
        try:
            nstars = int(config.get('nstars'))
        except TypeError:
            nstars = None

        if nstars is not None:
            star_data = star_data[:nstars]
        logger.info("Using {} stars".format(len(star_data)))

        # Turn star and progenitor tables into 6D arrays with proper structure
        stars_obs = np.vstack([star_data[name] for name in heliocentric_names]).T.copy()
        try:
            stars_err = np.vstack([star_data["err_{}".format(name)] for name in heliocentric_names]).T.copy()
        except ValueError:
            logger.warning("Star data uncertainty columns misnamed or don't exist.")
            stars_err = None

        stars = StreamComponent(stars_obs, err=stars_err,
                                parameters=OrderedDict([('tail',star_data['tail'])]))

        # -------------------------------------------------------------------------------
        prog_cfg = config.get('progenitor')
        prog_obs = np.vstack([prog_data[name] for name in heliocentric_names]).T.copy()
        try:
            prog_err = np.vstack([prog_data["err_{}".format(name)] for name in heliocentric_names]).T.copy()
        except ValueError:
            logger.warning("Progenitor data uncertainty columns misnamed or don't exist.")
            prog_err = None

        # Progenitor mass:
        try:
            m0 = float(prog_cfg['mass'])
        except:
            m0 = eval(prog_cfg['mass'])

        if isinstance(m0, BasePrior):
            prior = m0
            m0 = ModelParameter(name="m0", shape=(1,), prior=prior)
        else:
            frozen = m0
            m0 = ModelParameter(name="m0", shape=(1,), prior=BasePrior())
            m0.frozen = frozen

        # Progenitor mass-loss:
        try:
            mdot = float(prog_cfg['mass_loss_rate'])
        except:
            mdot = eval(prog_cfg['mass_loss_rate'])
        if isinstance(mdot, BasePrior):
            prior = mdot
            mdot = ModelParameter(name="mdot", shape=(1,), prior=prior)
        else:
            frozen = mdot
            mdot = ModelParameter(name="mdot", shape=(1,), prior=BasePrior())
            mdot.frozen = frozen

        prog = StreamComponent(prog_obs, err=prog_err,
                               parameters=OrderedDict([('m0',m0), ('mdot',mdot)]))

        # -------------------------------------------------------------------------------

        # Read integration stuff
        dt = float(config['integration'].get('dt'))
        nintegrate = int(config['integration'].get('nsteps'))
        logger.debug("Will integrate for {} steps, with a timestep of {} Myr"
                     .format(nintegrate, dt))
        logger.info("Integration time: {} Myr".format(nintegrate*dt))

        # -------------------------------------------------------------------------------

        # Potential
        try:
            Potential = getattr(sp, config["potential"]["class"])
        except AttributeError:
            Potential = eval(config["potential"]["class"])
        logger.info("Using potential '{}'...".format(Potential))

        # potential parameters to vary
        vary_pars = config["potential"].get("parameters", dict())
        for k,v in vary_pars.items():
            vary_pars[k] = eval(v)

        # get fixed parameters
        fixed_pars = config["potential"].get("fixed", dict())

        potential = RewinderPotential(Potential, priors=vary_pars, fixed_pars=fixed_pars)

        # -------------------------------------------------------------------------------

        # Hyper-parameters
        hyperpars = config.get('hyperparameters', dict())
        for name,v in hyperpars.items():
            logger.debug("Adding hyper-parameter: {}".format(name))
            try:
                prior = float(v)
                logger.debug("--- {} = {}".format(name,prior))
            except ValueError:
                prior = eval(v)
                logger.debug("--- prior passed in for {}: {}".format(name,prior))

            if isinstance(prior, BasePrior):
                hyperpars[name] = ModelParameter(name=name, prior=prior)
            else:
                hyperpars[name] = ModelParameter(name=name, prior=BasePrior())
                hyperpars[name].frozen = prior

        # -------------------------------------------------------------------------------

        # Initialize the model
        model = cls(rewinder_potential=potential,
                    progenitor=prog, stars=stars,
                    dt=dt, nsteps=nintegrate,
                    extra_parameters=hyperpars)

        model.config = config

        return model
Exemple #52
0
    def __new__(self, xarr, unit=None, refX=None, redshift=None,
                refX_unit=None, velocity_convention=None, use128bits=False,
                bad_unit_response='raise', equivalencies=u.spectral(),
                center_frequency=None, center_frequency_unit=None):
        """
        Make a new spectroscopic axis instance
        Default units Hz

        Parameter
        ----------
        xarr : np.ndarray
            An array of X-axis values in whatever unit specified
        unit : str
            Any valid spectroscopic X-axis unit (km/s, Hz, angstroms, etc.).
            Spaces will be removed.
        refX : float
            Reference frequency/wavelength
        refX_unit : str | astropy.units.Unit
            Units of the reference frequency/wavelength
        center_frequency: float
            The reference frequency for determining a velocity.
            Required for conversions between frequency/wavelength/energy and velocity.
            (redundant with refX)
        center_frequency_unit: string
            If converting between velocity and any other spectroscopic type,
            need to specify the central frequency around which that velocity is
            calculated.
            (redundant with refX_unit)
        equivalencies : list
            astropy equivalencies list containing tuples of the form:
            (from_unit, to_unit, forward, backward)
            forward and backward are functions that convert values between those units
        bad_unit_response : 'raise' | 'pixel'
            What should pyspeckit do if the units are not recognized?  Default
            is to raise an exception.  Can make pixel units instead
        """
        if use128bits:
            dtype='float128'
        else:
            dtype='float64'

        # Only need to convert xarr to array if it's not already one (e.g., if
        # it's a list)
        if not isinstance(xarr, np.ndarray):
            subarr = np.array(xarr, dtype=dtype)
            log.debug("Created subarr from a non-ndarray {0}".format(type(xarr)))
        else:
            if not xarr.flags['OWNDATA']:
                # nothing owns its data.  We nearly always have to copy this. =(
                #log.warning("The X array does not 'own' its data."
                #            "  It will therefore be copied.")
                #warnings.warn("The X array does not 'own' its data."
                #              "  It will therefore be copied.")
                subarr = xarr.copy()
            else:
                log.debug("xarr owns its own data.  Continuing as normal.")
                subarr = xarr

        subarr = subarr.view(self)

        # Only need to set xarr's unit if it's not a quantity
        # or if it is unitless
        if not isinstance(xarr, u.Quantity) or xarr.unit == u.dimensionless_unscaled:
            subarr._unit = self.validate_unit(unit, bad_unit_response)

        subarr.refX = refX

        if refX_unit is None:
            if hasattr(refX, 'unit'):
                refX_unit = refX.unit
            elif subarr._unit in frequency_dict:
                refX_unit = subarr.unit
            else:
                refX_unit = 'Hz'
        subarr.refX_unit = refX_unit

        subarr.redshift = redshift
        subarr.wcshead = {}
        subarr.velocity_convention = velocity_convention

        if center_frequency is None:
            if refX is not None:
                center_frequency = refX
        if center_frequency_unit is None:
            if refX_unit is not None:
                center_frequency_unit = refX_unit
            else:
                center_frequency_unit = unit
        if center_frequency is not None:
            subarr.center_frequency = u.Quantity(center_frequency, center_frequency_unit)
        else:
            subarr.center_frequency = None

        temp1, temp2 = self.find_equivalencies(subarr.velocity_convention,
                                               subarr.center_frequency,
                                               equivalencies)
        subarr.center_frequency, subarr._equivalencies = temp1,temp2

        return subarr
Exemple #53
0
    def slice(self,
              start=None,
              stop=None,
              unit='pixel',
              copy=True,
              xcopy=True,
              preserve_fits=False):
        """
        Slicing the spectrum

        .. WARNING:: this is the same as cropping right now, but it returns a
            copy instead of cropping inplace

        Parameters
        ----------
        start : numpy.float or int or astropy quantity
            start of slice
        stop :  numpy.float or int or astropy quantity
            stop of slice
        unit : str
            allowed values are any supported physical unit, 'pixel'
        copy : bool
            Return a 'view' of the data or a copy?
        preserve_fits : bool
            Save the fitted parameters from self.fitter?
        """

        if hasattr(start, 'unit'):
            start_ind = self.xarr.x_to_pix(start)
        elif unit in ('pixel', 'pixels'):
            start_ind = start
        else:
            start_ind = self.xarr.x_to_pix(start, xval_unit=unit)

        if hasattr(stop, 'unit'):
            stop_ind = self.xarr.x_to_pix(stop)
        elif unit in ('pixel', 'pixels'):
            stop_ind = stop
        else:
            stop_ind = self.xarr.x_to_pix(stop, xval_unit=unit)

        if start_ind > stop_ind:
            start_ind, stop_ind = stop_ind, start_ind

        spectrum_slice = slice(start_ind, stop_ind)

        log.debug("Slicing from {start} to {stop} with unit {unit} and copy="
                  "{copy}, xcopy={xcopy}, preserve_fits={preserve_fits}."
                  "start_ind = {start_ind}, stop_ind= {stop_ind}".format(
                      start=start,
                      stop=stop,
                      unit=unit,
                      copy=copy,
                      xcopy=xcopy,
                      preserve_fits=preserve_fits,
                      start_ind=start_ind,
                      stop_ind=stop_ind))

        if copy:
            sp = self.copy()
        else:
            sp = self
        sp.data = sp.data[spectrum_slice]
        if sp.error is not None:
            sp.error = sp.error[spectrum_slice]
        if copy or xcopy:
            sp.xarr = sp.xarr[spectrum_slice].copy()
        else:
            sp.xarr = sp.xarr[spectrum_slice]

        if copy:
            # create new specfit / baseline instances (otherwise they'll be the wrong length)
            sp._register_fitters(registry=self.Registry)
            sp.baseline = baseline.Baseline(sp)
            sp.specfit = fitters.Specfit(sp, Registry=sp.Registry)
        else:
            # inplace modification
            sp.baseline.crop(start_ind, stop_ind)
            sp.specfit.crop(start_ind, stop_ind)

        if preserve_fits:
            sp.specfit.modelpars = self.specfit.modelpars
            sp.specfit.parinfo = self.specfit.parinfo
            sp.baseline.baselinepars = self.baseline.baselinepars
            sp.baseline.order = self.baseline.order

        return sp
Exemple #54
0
    def compute_posvels(self, ephem="DE421", planets=False):
        """Compute positions and velocities of the observatories and Earth.

        Compute the positions and velocities of the observatory (wrt
        the Geocenter) and the center of the Earth (referenced to the
        SSB) for each TOA.  The JPL solar system ephemeris can be set
        using the 'ephem' parameter.  The positions and velocities are
        set with PosVel class instances which have astropy units.
        """
        # Record the planets choice for this instance
        self.planets = planets
        log.info(
            'Compute positions and velocities of observatories and Earth (planets = {0}), using {1} ephemeris'
            .format(planets, ephem))
        # Remove any existing columns
        cols_to_remove = ['ssb_obs_pos', 'ssb_obs_vel', 'obs_sun_pos']
        for c in cols_to_remove:
            if c in self.table.colnames:
                log.info('Column {0} already exists. Removing...'.format(c))
                self.table.remove_column(c)
        for p in ('jupiter', 'saturn', 'venus', 'uranus'):
            name = 'obs_' + p + '_pos'
            if name in self.table.colnames:
                log.info('Column {0} already exists. Removing...'.format(name))
                self.table.remove_column(name)

        self.table.meta['ephem'] = ephem
        ssb_obs_pos = table.Column(name='ssb_obs_pos',
                                   data=numpy.zeros((self.ntoas, 3),
                                                    dtype=numpy.float64),
                                   unit=u.km,
                                   meta={
                                       'origin': 'SSB',
                                       'obj': 'OBS'
                                   })
        ssb_obs_vel = table.Column(name='ssb_obs_vel',
                                   data=numpy.zeros((self.ntoas, 3),
                                                    dtype=numpy.float64),
                                   unit=u.km / u.s,
                                   meta={
                                       'origin': 'SSB',
                                       'obj': 'OBS'
                                   })
        obs_sun_pos = table.Column(name='obs_sun_pos',
                                   data=numpy.zeros((self.ntoas, 3),
                                                    dtype=numpy.float64),
                                   unit=u.km,
                                   meta={
                                       'origin': 'OBS',
                                       'obj': 'SUN'
                                   })
        if planets:
            plan_poss = {}
            for p in ('jupiter', 'saturn', 'venus', 'uranus'):
                name = 'obs_' + p + '_pos'
                plan_poss[name] = table.Column(name=name,
                                               data=numpy.zeros(
                                                   (self.ntoas, 3),
                                                   dtype=numpy.float64),
                                               unit=u.km,
                                               meta={
                                                   'origin': 'OBS',
                                                   'obj': p
                                               })

        # Now step through in observatory groups
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii + 2]
            site = get_observatory(obs)
            tdb = time.Time(grp['tdb'], precision=9)
            ssb_obs = site.posvel(tdb, ephem)
            log.debug("SSB obs pos {0}".format(ssb_obs.pos[:, 0]))
            ssb_obs_pos[loind:hiind, :] = ssb_obs.pos.T.to(u.km)
            ssb_obs_vel[loind:hiind, :] = ssb_obs.vel.T.to(u.km / u.s)
            sun_obs = objPosVel_wrt_SSB('sun', tdb, ephem) - ssb_obs
            obs_sun_pos[loind:hiind, :] = sun_obs.pos.T.to(u.km)
            if planets:
                for p in ('jupiter', 'saturn', 'venus', 'uranus'):
                    name = 'obs_' + p + '_pos'
                    dest = p
                    pv = objPosVel_wrt_SSB(dest, tdb, ephem) - ssb_obs
                    plan_poss[name][loind:hiind, :] = pv.pos.T.to(u.km)
        cols_to_add = [ssb_obs_pos, ssb_obs_vel, obs_sun_pos]
        if planets:
            cols_to_add += plan_poss.values()
        log.info('Adding columns ' + ' '.join([cc.name for cc in cols_to_add]))
        self.table.add_columns(cols_to_add)
Exemple #55
0
"""
This is obsolete, even though it was used.  tp_7m_12m_combine.py is an improved
and more efficient version of this.
"""
import numpy as np
from spectral_cube import SpectralCube
from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
from uvcombine import spectral_regrid, feather_simple, fourier_combine_cubes
from astropy import units as u
from astropy import log

log.setLevel("DEBUG")
log.debug('Reading...')
cube = SpectralCube.read('SgrB2_b3_7M_12M.HC3N.image.pbcor_medsub.fits').with_spectral_unit(u.GHz)
cube.allow_huge_operations=True
log.debug("Read file")
cube_k = cube.to(u.K, cube.beam.jtok_equiv(cube.spectral_axis))
log.debug("Converted first file to K")


tpcube = SpectralCube.read('../tp/tp_concat.spw17.image.fits').with_spectral_unit(u.GHz)
crop_freqs = cube_k.spectral_axis[0], cube_k.spectral_axis[-1]
crop_channels = sorted((tpcube.closest_spectral_channel(crop_freqs[0]), tpcube.closest_spectral_channel(crop_freqs[1])))
tpcube = tpcube[crop_channels[0]-1:crop_channels[1]+1]
log.debug("Read tp freq")
tpcube_k = tpcube.to(u.K, tpcube.beam.jtok_equiv(tpcube.spectral_axis))
log.debug("Converted TP to K")
# determine smooth factor
kw = cube_k.spectral_axis.diff().mean() / tpcube_k.spectral_axis.diff().mean()
log.debug("determined kernel")
tpcube_k_smooth = tpcube_k.spectral_smooth(Gaussian1DKernel(kw/(8*np.log(2))**0.5))
Exemple #56
0
    def __init__(self, rewinder_potential, progenitor, stars, dt, nsteps, extra_parameters,
                 selfgravity=True, nsamples=128):
        """ Model for tidal streams that uses backwards integration to Rewind
            the positions of stars.

            Parameters
            ----------
            rewinder_potential : streams.RewinderPotential
            progenitor : streams.Progenitor
            stars : streams.Stars
            dt, nsteps : float
                Integration parameters.

            Other Parameters
            ----------------
            kwargs
                Any other keyword arguments passed in are assumed to be additional
                parameters for the model.

        """
        # integration
        self.dt = dt
        self.nsteps = nsteps
        self.args = (self.dt, self.nsteps)

        self.parameters = OrderedDict()

        self.rewinder_potential = rewinder_potential
        self.progenitor = progenitor
        self.stars = stars
        self.nstars = len(stars.data)
        self.selfgravity = selfgravity

        for name,p in self.rewinder_potential.parameters.items():
            logger.debug("Adding parameter {}".format(name))
            self.add_parameter(p, group='potential')

        for name,p in self.progenitor.parameters.items():
            logger.debug("Adding parameter {}".format(name))
            self.add_parameter(p, group='progenitor')

        for name,p in extra_parameters.items():
            logger.debug("Adding parameter {}".format(name))
            self.add_parameter(p, group='hyper')

        self.perfect_stars = False
        if self.stars.err is None:
            self.perfect_stars = True
            logger.warning("No uncertainties on stars")

        self.perfect_prog = False
        if self.progenitor.err is None:
            self.perfect_prog = True
            logger.warning("No uncertainties on progenitor")

        self.perfect_data = self.perfect_stars and self.perfect_prog
        if self.perfect_data:
            logger.warning("Perfect data!")

        self._ln_likelihood_tmp = np.empty((self.nsteps, self.nstars))

        if not self.perfect_prog:
            # add progenitor position as parameters
            for i,name in enumerate(heliocentric_names):
                logger.debug("Adding progenitor parameter {}".format(name))
                p = ModelParameter(name=name, prior=BasePrior())
                self.add_parameter(p, group='progenitor')

        self.nsamples = nsamples
        if not self.perfect_stars:
            tot_samples = 10000

            # draw samples for each star
            impo_samples_hel = np.zeros((self.nstars,tot_samples,6))
            impo_samples_hel[...,:2] = self.stars.data[:,np.newaxis,:2]  # copy over l,b
            impo_samples_hel[...,2:] = np.random.normal(self.stars.data[:,None,2:],
                                                        self.stars.err[:,None,2:],
                                                        size=(self.nstars,tot_samples,4)) # TODO: missing data!!??
            impo_samples = hel_to_gal(impo_samples_hel.reshape(self.nstars*tot_samples,6))

            # compute prior probabilities for the samples
            ldp = np.array([self.stars.ln_data_prob(impo_samples_hel[:,i]) for i in range(tot_samples)])
            self.impo_samples_lnprob = ldp.T

            # transform to galactocentric
            self.impo_samples_gal = impo_samples.reshape(self.nstars,tot_samples,6)

            # TODO: pre-allocate? set the tail assignments
            # tail = np.array(self.stars.parameters['tail'])

            self._ln_likelihood_tmp = np.zeros((self.nsteps, self.nsamples))
Exemple #57
0
    def _parse_kwargs(self,
                      min_frequency=None,
                      max_frequency=None,
                      band='any',
                      top20=None,
                      chemical_name=None,
                      chem_re_flags=0,
                      energy_min=None,
                      energy_max=None,
                      energy_type=None,
                      intensity_lower_limit=None,
                      intensity_type=None,
                      transition=None,
                      version=None,
                      exclude=None,
                      only_NRAO_recommended=None,
                      line_lists=None,
                      line_strengths=None,
                      energy_levels=None,
                      export=None,
                      export_limit=None,
                      noHFS=None,
                      displayHFS=None,
                      show_unres_qn=None,
                      show_upper_degeneracy=None,
                      show_molecule_tag=None,
                      show_qn_code=None,
                      show_lovas_labref=None,
                      show_lovas_obsref=None,
                      show_orderedfreq_only=None,
                      show_nrao_recommended=None,
                      parse_chemistry_locally=True):
        """
        The Splatalogue service returns lines with rest frequencies in the
        range [min_frequency, max_frequency].

        Parameters
        ----------
        min_frequency : `astropy.units`
            Minimum frequency (or any spectral() equivalent)
        max_frequency : `astropy.units`
            Maximum frequency (or any spectral() equivalent)
        band : str
            The observing band.  If it is not 'any', it overrides
            minfreq/maxfreq.
        top20: str
            One of ``'comet'``, ``'planet'``, ``'top20'``, ``'ism_hotcore'``,
            ``'ism_darkcloud'``, ``'ism_diffusecloud'``.
            Overrides chemical_name
        chemical_name : str
            Name of the chemical to search for. Treated as a regular
            expression.  An empty set ('', (), [], {}) will match *any*
            species. Examples:

            ``'H2CO'`` - 13 species have H2CO somewhere in their formula.

            ``'Formaldehyde'`` - There are 8 isotopologues of Formaldehyde
                                 (e.g., H213CO).

            ``'formaldehyde'`` - Thioformaldehyde,Cyanoformaldehyde.

            ``'formaldehyde',chem_re_flags=re.I`` - Formaldehyde,thioformaldehyde,
                                                    and Cyanoformaldehyde.

            ``' H2CO '`` - Just 1 species, H2CO. The spaces prevent including
                           others.
        parse_chemistry_locally : bool
            Attempt to determine the species ID #'s locally before sending the
            query?  This will prevent queries that have no matching species.
            It also performs a more flexible regular expression match to the
            species IDs.  See the examples in `get_species_ids`
        chem_re_flags : int
            See the `re` module
        energy_min : `None` or float
            Energy range to include.  See energy_type
        energy_max : `None` or float
            Energy range to include.  See energy_type
        energy_type : ``'el_cm1'``, ``'eu_cm1'``, ``'eu_k'``, ``'el_k'``
            Type of energy to restrict.  L/U for lower/upper state energy,
            cm/K for *inverse* cm, i.e. wavenumber, or K for Kelvin
        intensity_lower_limit : `None` or float
            Lower limit on the intensity.  See intensity_type
        intensity_type : `None` or ``'sij'``, ``'cdms_jpl'``, ``'aij'``
            The type of intensity on which to place a lower limit
        transition : str
            e.g. 1-0
        version : ``'v1.0'``, ``'v2.0'``, ``'v3.0'`` or ``'vall'``
            Data version
        exclude : list
            Types of lines to exclude.  Default is:
            (``'potential'``, ``'atmospheric'``, ``'probable'``)
            Can also exclude ``'known'``.
            To exclude nothing, use 'none', not the python object None, since
            the latter is meant to indicate 'leave as default'
        only_NRAO_recommended : bool
            Show only NRAO recommended species?
        line_lists : list
            Options:
            Lovas, SLAIM, JPL, CDMS, ToyoMA, OSU, Recomb, Lisa, RFI
        line_strengths : list
            * CDMS/JPL Intensity : ls1
            * Sij : ls3
            * Aij : ls4
            * Lovas/AST : ls5
        energy_levels : list
            * E_lower (cm^-1) : el1
            * E_lower (K) : el2
            * E_upper (cm^-1) : el3
            * E_upper (K) : el4
        export : bool
            Set up arguments for the export server (as opposed to the HTML
            server)?
        export_limit : int
            Maximum number of lines in output file
        noHFS : bool
            No HFS Display
        displayHFS : bool
            Display HFS Intensity
        show_unres_qn : bool
            Display Unresolved Quantum Numbers
        show_upper_degeneracy : bool
            Display Upper State Degeneracy
        show_molecule_tag : bool
            Display Molecule Tag
        show_qn_code : bool
            Display Quantum Number Code
        show_lovas_labref : bool
            Display Lab Ref
        show_lovas_obsref : bool
            Display Obs Ref
        show_orderedfreq_only : bool
            Display Ordered Frequency ONLY
        show_nrao_recommended : bool
            Display NRAO Recommended Frequencies

        Returns
        -------
        payload : dict
            Dictionary of the parameters to send to the SPLAT page

        """

        payload = {
            'submit': 'Search',
            'frequency_units': 'GHz',
        }

        if band != 'any':
            if band not in self.FREQUENCY_BANDS:
                raise ValueError("Invalid frequency band.")
            if min_frequency is not None or max_frequency is not None:
                warnings.warn("Band was specified, so the frequency "
                              "specification is overridden")
            payload['band'] = band
        elif min_frequency is not None and max_frequency is not None:
            # allow setting payload without having *ANY* valid frequencies set
            min_frequency = min_frequency.to(u.GHz, u.spectral())
            max_frequency = max_frequency.to(u.GHz, u.spectral())
            if min_frequency > max_frequency:
                min_frequency, max_frequency = max_frequency, min_frequency

            payload['from'] = min_frequency.value
            payload['to'] = max_frequency.value

        if top20 is not None:
            if top20 in self.TOP20_LIST:
                payload['top20'] = top20
            else:
                raise ValueError("Top20 is not one of the allowed values")
        elif chemical_name in ('', {}, (), [], set()):
            # include all
            payload['sid[]'] = []
        elif chemical_name is not None:
            if parse_chemistry_locally:
                species_ids = self.get_species_ids(chemical_name,
                                                   chem_re_flags)
                if len(species_ids) == 0:
                    raise ValueError("No matching chemical species found.")
                payload['sid[]'] = list(species_ids.values())
            else:
                payload['chemical_name'] = chemical_name

        if energy_min is not None:
            payload['energy_range_from'] = float(energy_min)
        if energy_max is not None:
            payload['energy_range_to'] = float(energy_max)
        if energy_type is not None:
            validate_energy_type(energy_type)
            payload['energy_range_type'] = energy_type

        if intensity_type is not None:
            payload['lill'] = 'lill_' + intensity_type
            if intensity_lower_limit is not None:
                payload[payload['lill']] = intensity_lower_limit

        if transition is not None:
            payload['tran'] = transition

        if version in self.versions:
            payload['data_version'] = version
        elif version is not None:
            raise ValueError("Invalid version specified.  Allowed versions "
                             "are {vers}".format(vers=str(self.versions)))

        if exclude == 'none':
            for e in ('potential', 'atmospheric', 'probable', 'known'):
                # Setting a keyword value to 'None' removes it (see query_lines_async)
                log.debug("Setting no_{0} to None".format(e))
                payload['no_' + e] = None
        elif exclude is not None:
            for e in exclude:
                payload['no_' + e] = 'no_' + e

        if only_NRAO_recommended:
            payload['include_only_nrao'] = 'include_only_nrao'

        if line_lists is not None:
            if type(line_lists) not in (tuple, list):
                raise TypeError("Line lists should be a list of linelist "
                                "names.  See Splatalogue.ALL_LINE_LISTS")
            for L in self.ALL_LINE_LISTS:
                kwd = 'display' + L
                if L in line_lists:
                    payload[kwd] = kwd
                else:
                    payload[kwd] = ''

        if line_strengths is not None:
            for LS in line_strengths:
                payload[LS] = LS

        if energy_levels is not None:
            for EL in energy_levels:
                payload[EL] = EL

        for b in ("noHFS", "displayHFS", "show_unres_qn",
                  "show_upper_degeneracy", "show_molecule_tag", "show_qn_code",
                  "show_lovas_labref", "show_orderedfreq_only",
                  "show_lovas_obsref", "show_nrao_recommended"):
            if locals()[b]:
                payload[b] = b

        # default arg, unmodifiable...
        payload['jsMath'] = 'font:symbol,warn:0'
        payload['__utma'] = ''
        payload['__utmc'] = ''

        if export:
            payload['submit'] = 'Export'
            payload['export_delimiter'] = 'colon'  # or tab or comma
            payload['export_type'] = 'current'
            payload['offset'] = 0
            payload['range'] = 'on'

        if export_limit is not None:
            payload['limit'] = export_limit
        else:
            payload['limit'] = self.LINES_LIMIT

        return payload
def make_finder_chart_from_image_and_catalog(
    image,
    catalog,
    save_prefix,
    alma_kwargs={
        'public': False,
        'science': False
    },
    bands=(3, 4, 5, 6, 7, 8, 9, 10),
    private_band_colors=(
        'maroon',
        'red',
        'orange',
        'coral',
        'brown',
        'yellow',
        'mediumorchid',
        'palegoldenrod',
    ),
    public_band_colors=(
        'blue',
        'cyan',
        'green',
        'turquoise',
        'teal',
        'darkslategrey',
        'chartreuse',
        'lime',
    ),
    integration_time_contour_levels=np.logspace(0, 5, base=2, num=6),
    save_masks=False,
    use_saved_masks=False,
    linewidth=1,
):
    """
    Create a "finder chart" showing where ALMA has pointed in various bands,
    including different color coding for public/private data and each band.

    Contours are set at various integration times.

    Parameters
    ----------
    image : fits.PrimaryHDU or fits.ImageHDU object
        The image to overlay onto
    catalog : astropy.Table object
        The catalog of ALMA observations
    save_prefix : str
        The prefix for the output files.  Both .reg and .png files will be
        written.  The .reg files will have the band numbers and
        public/private appended, while the .png file will be named
        prefix_almafinderchart.png
    alma_kwargs : dict
        Keywords to pass to the ALMA archive when querying.
    private_band_colors / public_band_colors : tuple
        A tuple or list of colors to be associated with private/public
        observations in the various bands
    integration_time_contour_levels : list or np.array
        The levels at which to draw contours in units of seconds.  Default is
        log-spaced (2^n) seconds: [  1.,   2.,   4.,   8.,  16.,  32.])
    """
    import aplpy

    import pyregion

    all_bands = bands
    bands = used_bands = [int(band) for band in np.unique(catalog['Band'])]
    log.info("The bands used include: {0}".format(used_bands))
    band_colors_priv = dict(zip(all_bands, private_band_colors))
    band_colors_pub = dict(zip(all_bands, public_band_colors))
    log.info("Color map private: {0}".format(band_colors_priv))
    log.info("Color map public: {0}".format(band_colors_pub))

    if use_saved_masks:
        hit_mask_public = {}
        hit_mask_private = {}

        for band in bands:
            pubfile = '{0}_band{1}_public.fits'.format(save_prefix, band)
            if os.path.exists(pubfile):
                hit_mask_public[band] = fits.getdata(pubfile)
            privfile = '{0}_band{1}_private.fits'.format(save_prefix, band)
            if os.path.exists(privfile):
                hit_mask_private[band] = fits.getdata(privfile)

    else:
        today = np.datetime64('today')

        # At least temporarily obsolete
        # private_circle_parameters = {
        #     band: [(row['RA'], row['Dec'], np.mean(rad).to(u.deg).value)
        #            for row, rad in zip(catalog, primary_beam_radii)
        #            if not row['Release date'] or
        #            (np.datetime64(row['Release date']) > today and row['Band'] == band)]
        #     for band in bands}

        # public_circle_parameters = {
        #     band: [(row['RA'], row['Dec'], np.mean(rad).to(u.deg).value)
        #            for row, rad in zip(catalog, primary_beam_radii)
        #            if row['Release date'] and
        #            (np.datetime64(row['Release date']) <= today and row['Band'] == band)]
        #     for band in bands}

        # unique_private_circle_parameters = {
        #     band: np.array(list(set(private_circle_parameters[band])))
        #     for band in bands}
        # unique_public_circle_parameters = {
        #     band: np.array(list(set(public_circle_parameters[band])))
        #     for band in bands}

        release_dates = np.array(catalog['Release date'], dtype=np.datetime64)

        for band in bands:
            log.info("BAND {0}".format(band))
            privrows = sum((catalog['Band'] == band) & (release_dates > today))
            pubrows = sum((catalog['Band'] == band) & (release_dates <= today))
            log.info("PUBLIC:  Number of rows: {0}".format(pubrows, ))
            log.info("PRIVATE: Number of rows: {0}.".format(privrows))

        log.debug('Creating regions')
        prv_regions = {
            band: pyregion.ShapeList([
                add_meta_to_reg(fp, {'integration': row['Integration']})
                for row in catalog for fp in footprint_to_reg(row['Footprint'])
                if (not row['Release date']) or (np.datetime64(
                    row['Release date']) > today and row['Band'] == band)
            ])
            for band in bands
        }
        pub_regions = {
            band: pyregion.ShapeList([
                add_meta_to_reg(fp, {'integration': row['Integration']})
                for row in catalog for fp in footprint_to_reg(row['Footprint'])
                if row['Release date'] and (np.datetime64(row['Release date'])
                                            <= today and row['Band'] == band)
            ])
            for band in bands
        }

        log.debug('Creating masks')
        prv_mask = {
            band:
            fits.PrimaryHDU(prv_regions[band].get_mask(image).astype('int'),
                            header=image.header)
            for band in bands if prv_regions[band]
        }
        pub_mask = {
            band:
            fits.PrimaryHDU(pub_regions[band].get_mask(image).astype('int'),
                            header=image.header)
            for band in bands if pub_regions[band]
        }

        hit_mask_public = {
            band: np.zeros_like(image.data)
            for band in pub_mask
        }
        hit_mask_private = {
            band: np.zeros_like(image.data)
            for band in prv_mask
        }
        mywcs = wcs.WCS(image.header)

        for band in bands:
            log.debug(
                'Adding integration-scaled masks for Band: {0}'.format(band))

            shapes = prv_regions[band]
            for shape in shapes:
                # private: release_date = 'sometime' says when it will be released
                (xlo, xhi, ylo,
                 yhi), mask = pyregion_subset(shape, hit_mask_private[band],
                                              mywcs)
                log.debug("{0},{1},{2},{3}: {4}".format(
                    xlo, xhi, ylo, yhi, mask.sum()))
                hit_mask_private[band][
                    ylo:yhi, xlo:xhi] += shape.meta['integration'] * mask

            if save_masks:
                shapes.write('{0}_band{1}_private.reg'.format(
                    save_prefix, band))

            shapes = pub_regions[band]
            for shape in shapes:
                # public: release_date = '' should mean already released
                (xlo, xhi, ylo,
                 yhi), mask = pyregion_subset(shape, hit_mask_public[band],
                                              mywcs)
                log.debug("{0},{1},{2},{3}: {4}".format(
                    xlo, xhi, ylo, yhi, mask.sum()))
                hit_mask_public[band][
                    ylo:yhi, xlo:xhi] += shape.meta['integration'] * mask

            if save_masks:
                shapes.write('{0}_band{1}_public.reg'.format(
                    save_prefix, band))

        if save_masks:
            for band in bands:
                if band in hit_mask_public:
                    if hit_mask_public[band].any():
                        hdu = fits.PrimaryHDU(data=hit_mask_public[band],
                                              header=image.header)
                        hdu.writeto('{0}_band{1}_public.fits'.format(
                            save_prefix, band),
                                    clobber=True)
                if band in hit_mask_private:
                    if hit_mask_private[band].any():
                        hdu = fits.PrimaryHDU(data=hit_mask_private[band],
                                              header=image.header)
                        hdu.writeto('{0}_band{1}_private.fits'.format(
                            save_prefix, band),
                                    clobber=True)

    fig = aplpy.FITSFigure(fits.HDUList(image), convention='calabretta')
    fig.show_grayscale(stretch='arcsinh', vmid=np.nanmedian(image.data))
    for band in bands:
        if band in hit_mask_public:
            if hit_mask_public[band].any():
                fig.show_contour(fits.PrimaryHDU(data=hit_mask_public[band],
                                                 header=image.header),
                                 levels=integration_time_contour_levels,
                                 colors=[band_colors_pub[int(band)]] *
                                 len(integration_time_contour_levels),
                                 linewidth=linewidth,
                                 convention='calabretta')
        if band in hit_mask_private:
            if hit_mask_private[band].any():
                fig.show_contour(fits.PrimaryHDU(data=hit_mask_private[band],
                                                 header=image.header),
                                 levels=integration_time_contour_levels,
                                 colors=[band_colors_priv[int(band)]] *
                                 len(integration_time_contour_levels),
                                 linewidth=linewidth,
                                 convention='calabretta')

    fig.save('{0}_almafinderchart.png'.format(save_prefix))

    return image, catalog, hit_mask_public, hit_mask_private
    def _activate_form(self,
                       response,
                       form_index=0,
                       form_id=None,
                       inputs={},
                       cache=True,
                       method=None):
        """
        Parameters
        ----------
        method: None or str
            Can be used to override the form-specified method
        """
        # Extract form from response
        root = BeautifulSoup(response.content, 'html5lib')
        if form_id is None:
            form = root.find_all('form')[form_index]
        else:
            form = root.find_all('form', id=form_id)[form_index]
        # Construct base url
        form_action = form.get('action')
        if "://" in form_action:
            url = form_action
        elif form_action.startswith('/'):
            url = '/'.join(response.url.split('/', 3)[:3]) + form_action
        else:
            url = response.url.rsplit('/', 1)[0] + '/' + form_action
        # Identify payload format
        fmt = None
        form_method = form.get('method').lower()
        if form_method == 'get':
            fmt = 'get'  # get(url, params=payload)
        elif form_method == 'post':
            if 'enctype' in form.attrs:
                if form.attrs['enctype'] == 'multipart/form-data':
                    fmt = 'multipart/form-data'  # post(url, files=payload)
                elif form.attrs[
                        'enctype'] == 'application/x-www-form-urlencoded':
                    fmt = 'application/x-www-form-urlencoded'  # post(url, data=payload)
                else:
                    raise Exception("enctype={0} is not supported!".format(
                        form.attrs['enctype']))
            else:
                fmt = 'application/x-www-form-urlencoded'  # post(url, data=payload)
        # Extract payload from form
        payload = []
        for form_elem in form.find_all(['input', 'select', 'textarea']):
            value = None
            is_file = False
            tag_name = form_elem.name
            key = form_elem.get('name')
            if tag_name == 'input':
                is_file = (form_elem.get('type') == 'file')
                value = form_elem.get('value')
                if form_elem.get('type') in ['checkbox', 'radio']:
                    if form_elem.has_attr('checked'):
                        if not value:
                            value = 'on'
                    else:
                        value = None
            elif tag_name == 'select':
                if form_elem.get('multiple') is not None:
                    value = []
                    if form_elem.select('option[value]'):
                        for option in form_elem.select('option[value]'):
                            if option.get('selected') is not None:
                                value.append(option.get('value'))
                    else:
                        for option in form_elem.select('option'):
                            if option.get('selected') is not None:
                                # bs4 NavigableString types have bad,
                                # undesirable properties that result
                                # in recursion errors when caching
                                value.append(str(option.string))
                else:
                    if form_elem.select('option[value]'):
                        for option in form_elem.select('option[value]'):
                            if option.get('selected') is not None:
                                value = option.get('value')
                        # select the first option field if none is selected
                        if value is None:
                            value = form_elem.select('option[value]')[0].get(
                                'value')
                    else:
                        # survey form just uses text, not value
                        for option in form_elem.select('option'):
                            if option.get('selected') is not None:
                                value = str(option.string)
                        # select the first option field if none is selected
                        if value is None:
                            value = str(form_elem.select('option')[0].string)

            if key in inputs:
                if isinstance(inputs[key], list):
                    # list input is accepted (for array uploads)
                    value = inputs[key]
                else:
                    value = str(inputs[key])

            if (key is not None):  # and (value is not None):
                if fmt == 'multipart/form-data':
                    if is_file:
                        payload.append(
                            (key, ('', '', 'application/octet-stream')))
                    else:
                        if type(value) is list:
                            for v in value:
                                entry = (key, ('', v))
                                # Prevent redundant key, value pairs
                                # (can happen if the form repeats them)
                                if entry not in payload:
                                    payload.append(entry)
                        elif value is None:
                            entry = (key, ('', ''))
                            if entry not in payload:
                                payload.append(entry)
                        else:
                            entry = (key, ('', value))
                            if entry not in payload:
                                payload.append(entry)
                else:
                    if type(value) is list:
                        for v in value:
                            entry = (key, v)
                            if entry not in payload:
                                payload.append(entry)
                    else:
                        entry = (key, value)
                        if entry not in payload:
                            payload.append(entry)

        # for future debugging
        self._payload = payload
        log.debug("Form: payload={0}".format(payload))

        if method is not None:
            fmt = method

        log.debug("Method/format = {0}".format(fmt))

        # Send payload
        if fmt == 'get':
            response = self._request("GET", url, params=payload, cache=cache)
        elif fmt == 'multipart/form-data':
            response = self._request("POST", url, files=payload, cache=cache)
        elif fmt == 'application/x-www-form-urlencoded':
            response = self._request("POST", url, data=payload, cache=cache)

        return response
Exemple #60
0
def _single_orbit_find_actions(orbit, N_max, toy_potential=None,
                               force_harmonic_oscillator=False):
    """
    Find approximate actions and angles for samples of a phase-space orbit,
    `w`, at times `t`. Uses toy potentials with known, analytic action-angle
    transformations to approximate the true coordinates as a Fourier sum.

    This code is adapted from Jason Sanders'
    `genfunc <https://github.com/jlsanders/genfunc>`_

    .. todo::

        Wrong shape for w -- should be (6,n) as usual...

    Parameters
    ----------
    orbit : `~gala.dynamics.Orbit`
    N_max : int
        Maximum integer Fourier mode vector length, |n|.
    toy_potential : Potential (optional)
        Fix the toy potential class.
    force_harmonic_oscillator : bool (optional)
        Force using the harmonic oscillator potential as the toy potential.
    """

    if orbit.norbits > 1:
        raise ValueError("must be a single orbit")

    if toy_potential is None:
        toy_potential = fit_toy_potential(orbit, force_harmonic_oscillator=force_harmonic_oscillator)

    else:
        logger.debug("Using *fixed* toy potential: {}".format(toy_potential.parameters))

    if isinstance(toy_potential, IsochronePotential):
        orbit_align = orbit.align_circulation_with_z()
        w = orbit_align.w()

        dxyz = (1,2,2)
        circ = np.sign(w[0,0]*w[4,0]-w[1,0]*w[3,0])
        sign = np.array([1.,circ,1.])
        orbit = orbit_align
    elif isinstance(toy_potential, HarmonicOscillatorPotential):
        dxyz = (2,2,2)
        sign = 1.
        w = orbit.w()
    else:
        raise ValueError("Invalid toy potential.")

    t = orbit.t.value

    # Now find toy actions and angles
    aaf = toy_potential.action_angle(orbit)

    if aaf[0].ndim > 2:
        aa = np.vstack((aaf[0].value[...,0], aaf[1].value[...,0]))
    else:
        aa = np.vstack((aaf[0].value, aaf[1].value))

    if np.any(np.isnan(aa)):
        ix = ~np.any(np.isnan(aa),axis=0)
        aa = aa[:,ix]
        t = t[ix]
        warnings.warn("NaN value in toy actions or angles!")
        if sum(ix) > 1:
            raise ValueError("Too many NaN value in toy actions or angles!")

    t1 = time.time()
    A,b,nvecs = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2])
    actions = np.array(solve(A,b))
    logger.debug("Action solution found for N_max={}, size {} symmetric"
                 " matrix in {} seconds"
                 .format(N_max,len(actions),time.time()-t1))

    t1 = time.time()
    A,b,nvecs = _angle_prepare(aa, t, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2], sign=sign)
    angles = np.array(solve(A,b))
    logger.debug("Angle solution found for N_max={}, size {} symmetric"
                 " matrix in {} seconds"
                 .format(N_max,len(angles),time.time()-t1))

    # Just some checks
    if len(angles) > len(aa):
        warnings.warn("More unknowns than equations!")

    J = actions[:3]  # * sign
    theta = angles[:3]
    freqs = angles[3:6]  # * sign

    return dict(actions=J*aaf[0].unit, angles=theta*aaf[1].unit, freqs=freqs*aaf[2].unit,
                Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs)