Ejemplo n.º 1
0
def proj_ci( id=None,sigma=1, verbose=True):
	if id == None:
		idval = ui.get_default_id()
	else:
		idval = id
	get_proj().sigma = sigma
	proj(id=idval)
	parnames = np.array(get_proj_results().parnames)
	npars = parnames.size
	fits = np.array(get_proj_results().parvals)
	lower = np.array(get_proj_results().parmins)
	upper = np.array(get_proj_results().parmaxes)
	out = np.zeros( (npars, 3) )
	out[:,0] = fits
	out[:,1] = fits + lower
	out[:,2] = fits + upper
	if sigma==1:
		percent = '68%'
	if sigma==2:
		percent = '95%'
	if verbose:
		print 'projection '+percent+' ci'
		hfmt = '%-12s %12s %12s %12s \n'
		s = hfmt % ('Param', 'Best-Fit', 'Lower Bound', 'Upper Bound')
		s += hfmt % ('-'*5, '-'*8, '-'*11, '-'*11)
		for name, val, lower, upper in zip(parnames, out[:,0],
			out[:,1], out[:,2]):
			s += '%-12s %12g ' % (name, val)
			s += '%12g ' % lower
			s += '%12g \n' % upper
		print s
	result={}
	for i in range(npars):
		result[parnames[i]]=out[i,[1,2]]
	return result
Ejemplo n.º 2
0
def get_draws(id=None, niter=1000, df=4, verbose=True, normalize=True, file=None, params=None):
    """Calculates and returns the draws.

    If params is None then use the fit and covariance values for the
    given dataset (id parameter), otherwise use the information stored
    in params (which should be the return value of get_parameter_info).

    If normalize=True then the on-screen output (when verbose is True)
    and file output (when file is not None) are normalized by the
    best-fit values (this is for the statistic and parameter values).
    """

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id
    if params == None:
        pinfo = get_parameter_info(id=idval)
    else:
        pinfo = params

    return mh.mht(
        pinfo["parnames"],
        pinfo["parvals"],
        pinfo["covar"],
        id=idval,
        niter=niter,
        df=df,
        normalize=normalize,
        verbose=verbose,
        file=file,
    )
Ejemplo n.º 3
0
def projection_ci(id=None):
	if id == None:
		idval = ui.get_default_id()
	else:
		idval = id

	projection(id=idval)
	mode =	np.array( get_proj_results().parvals )
	low = np.array( get_proj_results().parmins )
	hi = np.array( get_proj_results().parmaxes )
	npars = len(hi)
	out = np.zeros((npars,3))
	out[:,0] = mode + low
	out[:,1] = mode
	out[:,2] = mode + hi
	if verbose:
		print 'projection ci'
		hfmt = '%-12s %12s %12s %12s \n'
		s = hfmt % ('Param', 'Best-Fit', 'Lower Bound', 'Upper Bound')
		s += hfmt % ('-'*5, '-'*8, '-'*11, '-'*11)
		for name, val, lower, upper in zip(parnames, out[:,0],
			out[:,1], out[:,2]):
			s += '%-12s %12g ' % (name, val)
			s += '%12g ' % lower
			s += '%12g \n' % upper
		print s
	return out
Ejemplo n.º 4
0
    def __init__(self, id=None, fluxtype="photon"):
        "If id is None the default id will be used."

        if id is None:
            self.id = ui.get_default_id()
        else:
            self.id = id

        if fluxtype in self._valid_fluxtypes:
            self.fluxtype = fluxtype
        else:
            emsg = "fluxtype set to {} but must be one of: {}".format(
                fluxtype, " ".join(self._valid_fluxtypes))
            raise ValueError(emsg)

        # Set up the xlo/xhi/xmid arrays
        d = ui.get_data(self.id)
        self._calc_bins(d)
        self._apply_mask(d)

        # Important to use get_source and not get_model as we do not
        # want to apply any instrument model to the evaluation.
        #
        # Note that we do not hold onto the model expression object,
        # which is probably not an issue here.
        #
        mdl = ui.get_source(id)
        self.modelexpr = mdl.name

        # We do not use xlo/xhi but the _xlo/_xhi attributes which
        # contain an extra bin, in case of X-Spec models
        #
        src = mdl(self._xlo, self._xhi)[:-1]
        if np.any(src < 0.0):
            emsg = "There are negative values in your source " + \
                "model (id={0})!".format(self.id)
            raise RuntimeError(emsg)
        if np.all(src <= 0.0):
            emsg = "The source model for id={0} ".format(self.id) + \
                "evaluates to 0!"
            raise RuntimeError(emsg)

        # Conversion to a single datatype is a bit excessive here.
        #
        dtype = src.dtype

        if self.fluxtype == "erg":
            norm = _charge_e * np.sum(src * self.xmid)
        else:
            norm = np.sum(src)

        self.weight = src / norm

        self.weight = self.weight.astype(dtype)
        self.xlo = self.xlo.astype(dtype)
        self.xhi = self.xhi.astype(dtype)
        self.xmid = self.xmid.astype(dtype)
Ejemplo n.º 5
0
def get_instmap_weights(id=None, fluxtype="photon"):
    """Returns the weights information for use by mkinstmap.

    Parameters
    ----------
    id : int or string
        If id is None then the default Sherpa id is used. This
        dataset must have a grid and source model defined.
    fluxtype : 'photon' or 'erg'
        The units of the instrument map are
        cm^2 count / ``fluxtype``.

    Return
    ------
    weights
        A weights object. When ``fluxtype="photon"`` the
        weights will sum to 1.

    See Also
    --------
    estimate_weighted_expmap
    plot_instmap_weights
    save_instmap_weights

    Notes
    -----
    An error will be thrown if the model evaluates to
    a negative value, or there is no flux.

    This is intended for use with a dataset "faked" using::

        dataspace1d(elow, ehigh, estep)
        set_source(...)

    although there is an attempt to support DataPHA objects
    (either for spectra that have been loaded in or "faked"
    using ``dataspace1d``, specifying the data type explicitly).
    """

    if id is None:
        id = ui.get_default_id()

    # Since sherpa.astro.data.DataPHA is a subclass of
    # sherpa.data.Data1DInt we need to check for it first.
    #
    d = ui.get_data(id)
    if isinstance(d, DataPHA):
        return InstMapWeightsPHA(id, fluxtype=fluxtype)
    elif isinstance(d, Data1DInt):
        return InstMapWeights1DInt(id, fluxtype=fluxtype)
    else:
        emsg = "Unable to calculate weights from a dataset " + \
            "of type {0}.{1}".format(d.__class__.__module__,
                                     d.__class__.__name__)
        raise RuntimeError(emsg)
Ejemplo n.º 6
0
def get_draws(id=None,
              niter=1000,
              df=4,
              verbose=True,
              normalize=True,
              file=None,
              params=None):
    """Calculates and returns the draws.

    If params is None then use the fit and covariance values for the
    given dataset (id parameter), otherwise use the information stored
    in params (which should be the return value of get_parameter_info).

    If normalize=True then the on-screen output (when verbose is True)
    and file output (when file is not None) are normalized by the
    best-fit values (this is for the statistic and parameter values).
    """

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id
    if params == None:
        pinfo = get_parameter_info(id=idval)
    else:
        pinfo = params

    return mh.mht(pinfo["parnames"],
                  pinfo["parvals"],
                  pinfo["covar"],
                  id=idval,
                  niter=niter,
                  df=df,
                  normalize=normalize,
                  verbose=verbose,
                  file=file)
Ejemplo n.º 7
0
def mht(parnames, mu, sigma, niter=1000, id=None,
        file=None, verbose=True, normalize=True,
        draw=draw_t, accept=accept_tcash, **kwargs):
    """Metropolis-Hastings.

    The default distribution is the t distribution, and the statistic is
    assumed to be the Cash statistic.

    The kwargs are passed through to the draw and accept routines.

    The draw routine is used to create a new proposal.
    The accept routine is used to determine whether to accept the proposal.

    If verbose is True then the iteration results are printed to STDOUT
    after each iteration. If file is not None then the iteration results
    are printed to the given file wach iteration. If normalize is True
    then the displayed results (whether to STDOUT or file) are relative
    to the best-fit values rather than absolute ones (so the values for
    the xpos parameter are written out as xpos-xpos_0 where xpos_0 is
    the value from the input mu argument). This also holds for the
    statistic value (so the results are statistic-statistic_0). The
    reason for normalize is to try and avoid lose of information
    without having to display numbers to 15 decimal places.
    """

    # Should we just change to cash here instead of throwing an error?
    #
    if ui.get_stat_name() != "cash":
        raise RuntimeError, "Statistic must be cash, not %s" % ui.get_stat_name()

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id

    # Output storage
    #
    nelem = niter + 1
    npars = mu.size
    if npars != len(parnames):
        raise RuntimeError, "mu.size = %d  len(parnames) = %d!" % (npars, len(parnames))

    params = np.zeros((nelem,npars))
    stats  = np.zeros(nelem)
    alphas = np.zeros(nelem)

    # Using a bool is technically nicer, but stick with an int8 here for easier processing
    # of the output.
    #
    ##acceptflag = np.zeros(nelem, dtype=np.bool)
    acceptflag = np.zeros(nelem, dtype=np.int8)

    params[0] = mu.copy()
    current = mu.copy()
    alphas[0] = 0

    _set_par_vals(parnames, current)
    stats[0] = ui.calc_stat(id=idval)

    if normalize:
        outstr = "# iteration accept d_statistic %s" % " d_".join(parnames)
    else:
        outstr = "# iteration accept statistic %s" % " ".join(parnames)
    if verbose:
        print outstr
    if file != None:
	fout = open(file, "w")
    	fout.write(outstr)
        fout.write("\n")

    def draw_to_string(idx):
        "Return the given draw as a string for display/output"
        if normalize:
            outstr = "%-6d %1d %g %s" % (idx, acceptflag[idx], stats[idx]-stats[0], " ".join(["%g" % (v-v0) for (v,v0) in zip(params[idx],params[0])]))
        else:
            outstr = "%-6d %1d %g %s" % (idx, acceptflag[idx], stats[idx], alphas[idx], " ".join(["%g" % v for v in params[idx]]))
        return outstr

    # Iterations
    # - no burn in at present
    # - the 0th element of the params array is the input value
    # - we loop until all parameters are within the allowable
    #   range; should there be some check to ensure we are not
    #   rejecting a huge number of proposals, which would indicate
    #   that the limits need increasing or very low s/n data?
    #
    for i in range(1,nelem,1):

        current = params[i-1]

        # Create a proposal and set the parameter values. If any lie
        # outside the allowed range then catch this (ParameterError)
        # and create a new proposal.
        #
        while True:
            try:
                proposal = draw(mu, current, sigma, **kwargs)
                _set_par_vals(parnames, proposal)
                break
            except ParameterErr:
                pass

        # Do we accept this proposal?
        #
        stat_temp = ui.calc_stat(id=idval)
        alphas[i] = np.exp( -0.5*stat_temp + dmvt(current, mu, sigma, 4) + 0.5*stats[i-1] - dmvt(proposal, mu, sigma, 4) )

        if accept(current, stats[i-1], proposal, stat_temp,
                  mu, sigma, **kwargs):
            params[i] = proposal.copy()
            stats[i]  = stat_temp
            acceptflag[i] = 1
        else:
            params[i] = params[i-1]
            stats[i]  = stats[i-1]
            acceptflag[i] = 0

        outstr = draw_to_string(i)
    if verbose:
        print outstr
    if file != None:
            fout.write(outstr)
            fout.write("\n")

    if file != None:
	fout.close()
        print "Created: %s" % file

    # Return a dictionary containing the draws
    #
    out = { "parnames": parnames, "statistic": stats, "accept": acceptflag, "alphas": alphas, "iteration": np.arange(0,nelem,1) }
    for (idx,name) in zip(range(npars),parnames):
    	if out.has_key(name):
            raise RuntimeError, "Unexpected name clash: parameter '%s'" % name
        out[name] = params[:,idx]
    return out
Ejemplo n.º 8
0
def calc_profile(model_image_fn,
                 id=None,
                 rmin=None,
                 rmax=None,
                 rstep=None,
                 rlo=None,
                 rhi=None,
                 model=None,
                 xpos=None,
                 ypos=None,
                 ellip=None,
                 theta=None,
                 grouptype=None):
    """Calculate the data needed to create the desired radial or
    elliptical profile.

    model_image_fn should be None or one of get_source_image or
    get_model_image, depending on whether you want the source or model image to
    be used for calculating properties.

    If grouptype is not None then it should be a tuple
        (method name, parameter value)
    where the supported values are
        ("counts", minimum-counts-per-bin)
        ("snr", minimum-snr-per-bin)

    """

    # Throw an error if input is invalid.
    #
    if id is None:
        id = ui.get_default_id()

    data = ui.get_data(id)
    if not isinstance(data, sherpa.astro.data.DataIMG):
        raise TypeError("data set {0} does not contain 2-D data".format(id))

    # Access model information
    #
    if model is None:
        model = _find_model(id)

    # What profile parameters do we use?
    #
    (xpos, ypos, ellip,
     theta) = _process_profile_details(id, xpos, ypos, ellip, theta, model)
    ellipflag = ellip > 0.0

    # Calculate the separation of each pixel from the center
    #
    if ellipflag:
        dr2 = _calculate_distances2(data, xpos, ypos, ellip=ellip, theta=theta)
    else:
        dr2 = _calculate_distances2(data, xpos, ypos)

    # Filter out "bad" points, but only for the evaluation of min/max.
    # - this could be doine in _calculate_distances2 as it would save some
    #   time, but would need to make sure the filtering was done the
    #   same as with the data/models
    #
    mask = data.mask
    if isinstance(mask, (bool, np.bool_)):
        if mask:
            dr2mask = dr2
        else:
            raise ValueError(
                "All data appears to have been filtered out for dataset id={0}"
                .format(id))

    else:
        if not np.any(mask):
            raise ValueError(
                "All data appears to have been filtered out for dataset id={0}"
                .format(id))

        dr2mask = dr2[mask]

    drmin = np.sqrt(dr2mask.min())
    drmax = np.sqrt(dr2mask.max())
    dr2mask = None

    (pixsize, pixarea) = _calc_pixel_size(data.x0, data.x1)
    drmin = _normalize_pixel(drmin, pixsize, False)
    drmax = _normalize_pixel(drmax, pixsize, True)

    # Calculate the binning (before grouping)
    #
    if rlo is not None:
        if (np.diff(rlo) <= 0).any():
            raise ValueError("rlo array must be in ascending order")
        if rhi is None:
            bins_lo = np.asarray(rlo[:-1])
            bins_hi = np.asarray(rlo[1:])
        else:
            if len(rlo) != len(rhi):
                raise ValueError(
                    "rlo and rhi arrays must have the same length: rlo has {0} and rhi has {1} elements"
                    .format(len(rlo), len(rhi)))

            if not (np.diff(rhi) > 0).all():
                raise ValueError("rhi array must be in ascending order")

            bins_lo = np.asarray(rlo)
            bins_hi = np.asarray(rhi)
            if ((bins_hi - bins_lo) <= 0).any():
                raise ValueError(
                    "Have a rhi element <= the corresponding rlo element")
    else:
        if rstep is None:
            rstep = pixsize

        (bins_lo, bins_hi) = _get_bin_edges(rmin, rmax, rstep, drmin, drmax)

    if model_image_fn is None:
        mdata = None
    else:
        mdata = model_image_fn(id)

    rprof = _calc_radial_profile(data,
                                 mdata,
                                 dr2,
                                 bins_lo,
                                 bins_hi,
                                 pixarea,
                                 grouptype=grouptype)

    # Mixing presentational and data concerns here, which is not ideal
    #
    rprof["coord"] = ui.get_coord(id)
    rprof["id"] = id
    rprof["datafile"] = data.name

    if ellipflag:
        rprof["xlabel"] = "Major axis ({0} pixel)".format(rprof["coord"])
    else:
        rprof["xlabel"] = "Radius ({0} pixel)".format(rprof["coord"])

    rprof["xpos"] = xpos
    rprof["ypos"] = ypos
    rprof["labels"] = [
        _set_subscript('x', '0', xpos),
        _set_subscript('y', '0', ypos)
    ]

    if ellipflag:
        rprof["ellip"] = ellip
        rprof["theta"] = theta
        rprof['labels'].extend(
            [_set_symbol(r'\epsilon', ellip),
             _set_symbol(r'\theta', theta)])

    return rprof
Ejemplo n.º 9
0
def get_parameter_info(id=None):
    """Returns the parameter information needed for calling mht.
    
    This routine will call covariance() if needed, but not
    fit().

    For now only works with a single dataset, and requires
    the Cash statistic.
    """

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id

    # Employ a lot of safety checks
    #
    fr = ui.get_fit_results()
    if fr == None:
        raise RuntimeError, "No fit results available!"
    if len(fr.datasets) != 1:
        raise RuntimeError, "Fit is for multiple datasets (%s) which we do not support!" % fr.datasets
    if fr.datasets[0] != idval:
        raise RuntimeError, "Fit results are for dataset %s, not %s" % (fr.datasets[0], idval)
    if fr.statname != "cash":
        raise RuntimeError, "Fit was run using statistic=%s rather than cash!" % fr.statname
    if not fr.succeeded:
        # Should use standard sherpa logging
        print "Warning: fit to dataset %s did not complete successfully:\n%s" % (idval, fr.message)

    cr = ui.get_covar_results()
    if cr == None or len(cr.datasets) != 1 or cr.datasets[0] != idval:
        # Should use standard sherpa logging
        print "Running covariance for dataset %s" % idval
        ui.covariance(idval)
        cr = ui.get_covar_results()

    if cr.statname != "cash":
        raise RuntimeError, "Covariance was run using statistic=%s rather than cash!" % cr.statname

    if len(fr.parnames) != len(cr.parnames):
        raise RuntimeError, "Number of parameters used in fit and covariance analysis do not agree!\n  fit=%s\n  covar=%s\n" % (
            fr.parnames,
            cr.parnames,
        )
    for (p1, p2) in zip(fr.parnames, cr.parnames):
        if p1 != p2:
            raise RuntimeError, "Order of fit/covariance parameters does not match: %s vs %s" % (p1, p2)
    for (pname, v1, v2) in zip(fr.parnames, fr.parvals, cr.parvals):
        if v1 != v2:
            raise RuntimeError, "Value of fit/covariance parameters does not match for parameter %s: %g vs %g" % (
                pname,
                v1,
                v2,
            )

    if not hasattr(cr, "extra_output") or cr.extra_output == None:
        raise RuntimeError, "get_covar_results has no .extra_output or it is None; is this CIAOX?"

    # Store the information, we explicitly copy all items to avoid
    # problems if fit/covariance are run again. This is done by converting
    # all tuples to numpy arrays, even for strings, and is actually
    # not needed.
    #
    out = {
        "dataset": idval,
        "npars": len(fr.parnames),
        "parnames": np.asarray(fr.parnames),
        "parvals": np.asarray(fr.parvals),
        "parmins": np.asarray(cr.parmins),
        "parmaxes": np.asarray(cr.parmaxes),
        "sigma": cr.sigma,
        "covar": cr.extra_output.copy(),
        "statval": fr.statval,
    }
    return out
Ejemplo n.º 10
0
def get_parameter_info(id=None):
    """Returns the parameter information needed for calling mht.
    
    This routine will call covariance() if needed, but not
    fit().

    For now only works with a single dataset, and requires
    the Cash statistic.
    """

    if id == None:
        idval = ui.get_default_id()
    else:
        idval = id

    # Employ a lot of safety checks
    #
    fr = ui.get_fit_results()
    if fr == None:
        raise RuntimeError, "No fit results available!"
    if len(fr.datasets) != 1:
        raise RuntimeError, "Fit is for multiple datasets (%s) which we do not support!" % fr.datasets
    if fr.datasets[0] != idval:
        raise RuntimeError, "Fit results are for dataset %s, not %s" % (
            fr.datasets[0], idval)
    if fr.statname != "cash":
        raise RuntimeError, "Fit was run using statistic=%s rather than cash!" % fr.statname
    if not fr.succeeded:
        # Should use standard sherpa logging
        print "Warning: fit to dataset %s did not complete successfully:\n%s" % (
            idval, fr.message)

    cr = ui.get_covar_results()
    if cr == None or len(cr.datasets) != 1 or cr.datasets[0] != idval:
        # Should use standard sherpa logging
        print "Running covariance for dataset %s" % idval
        ui.covariance(idval)
        cr = ui.get_covar_results()

    if cr.statname != "cash":
        raise RuntimeError, "Covariance was run using statistic=%s rather than cash!" % cr.statname

    if len(fr.parnames) != len(cr.parnames):
        raise RuntimeError, "Number of parameters used in fit and covariance analysis do not agree!\n  fit=%s\n  covar=%s\n" % (
            fr.parnames, cr.parnames)
    for (p1, p2) in zip(fr.parnames, cr.parnames):
        if p1 != p2:
            raise RuntimeError, "Order of fit/covariance parameters does not match: %s vs %s" % (
                p1, p2)
    for (pname, v1, v2) in zip(fr.parnames, fr.parvals, cr.parvals):
        if v1 != v2:
            raise RuntimeError, "Value of fit/covariance parameters does not match for parameter %s: %g vs %g" % (
                pname, v1, v2)

    if not hasattr(cr, "extra_output") or cr.extra_output == None:
        raise RuntimeError, "get_covar_results has no .extra_output or it is None; is this CIAOX?"

    # Store the information, we explicitly copy all items to avoid
    # problems if fit/covariance are run again. This is done by converting
    # all tuples to numpy arrays, even for strings, and is actually
    # not needed.
    #
    out = {
        "dataset": idval,
        "npars": len(fr.parnames),
        "parnames": np.asarray(fr.parnames),
        "parvals": np.asarray(fr.parvals),
        "parmins": np.asarray(cr.parmins),
        "parmaxes": np.asarray(cr.parmaxes),
        "sigma": cr.sigma,
        "covar": cr.extra_output.copy(),
        "statval": fr.statval
    }
    return out
Ejemplo n.º 11
0
def _get_chart_spectrum(id=None,
                        elow=None,
                        ehigh=None,
                        ewidth=None,
                        norm=None):
    """Helper routine for *_chart_spectrum."""

    # What source expression are we using?
    # get_model/source will throw an IdentifierErr if the expression
    # is not defined; we do not, at present catch/re-throw this
    #
    if id is None:
        id = s.get_default_id()

    mdl = s.get_source(id)

    # What energy grid to use?  Since we do not want to restrict users
    # to only using PHA datasets (i.e. if I just want to create
    # something simple) then we have to look for a range of errors
    # from get_arf
    #
    if elow is None or ehigh is None or ewidth is None:
        try:
            arf = s.get_arf(id)
        except (IdentifierErr, ArgumentErr):
            # a) PHA dataset, no ARF
            # b) Assume this means the dataset is not derived from the
            #    PHA class
            arf = None

        if arf is None:
            emsg = "No ARF found for dataset {} ".format(repr(id)) + \
                "so unable to create energy grid"
            raise TypeError(emsg)

        if elow is None:
            elow = arf.energ_lo[0]
        if ehigh is None:
            ehigh = arf.energ_hi[-1]
        if ewidth is None:
            # Assume constant grid spacing in the ARF
            de = arf.energ_hi[-1] - arf.energ_lo[0]
            nelem = np.size(arf.energ_lo)
            ewidth = de * 1.0 / nelem

    if elow >= ehigh:
        emsg = "elow is >= ehigh: " + \
            "elow={}  ehigh={}".format(elow, ehigh)
        raise TypeError(emsg)
    if ewidth <= 0.0:
        raise TypeError("ewidth is <= 0.0: ewidth={0}".format(ewidth))

    # The following is wasteful if we have an ARF and the user
    # supplies no elow, ehigh, or ewidth arguments.
    #
    # Should I check that nbins is a sensible number (e.g. >= 2)?
    #
    nbins = 1 + np.rint((ehigh - elow) / ewidth)
    erange = elow + ewidth * np.arange(nbins)
    elo = erange[:-1]
    ehi = erange[1:]

    flux = mdl(elo, ehi)
    emid = 0.5 * (ehi + elo)

    # do we want to renormalize?
    if norm is not None:
        flux *= norm
    return {
        "x": emid,
        "xlo": elo,
        "xhi": ehi,
        "y": flux,
        "id": id,
        "model": mdl.name
    }
Ejemplo n.º 12
0
def estimate_weighted_expmap(id=None,
                             arf=None,
                             elo=None,
                             ehi=None,
                             specresp=None,
                             fluxtype="photon",
                             par=None,
                             pvals=None):
    """Estimate the weighted exposure map value for an ARF.

    Parameters
    ----------
    id : int, string, or None
        The Sherpa dataset to use. If ``None`` then the default
        dataset is used.
    arf : string, TABLECrate, or None
        The ARF to use. It must contain the following columns:
        ``energ_lo``, ``energ_hi``, and ``specresp``.
    elo, ehi, specresp : array of numbers or None
        The ARF, where the bin edges are in KeV and the response is
        in cm^2. These are only checked if arf is None, in which
        case all three must be given and have the same size
        (one dimensional).
    fluxtype : 'photon' or 'erg'
        The units of the exposure map are
        cm^2 count / ``fluxtype``. The default is ``photon``.
    par : Sherpa parameter object or None
        If not given then the exposure map is calculated at the
        current parameter settings. If given, it is the Sherpa
        parameter to loop over, using pvals (which must be set).
    pvals : array of numbers or None
        If par is set, calcualte the exposure map at the current
        parameter settings whilst setting the par parameter to
        each of the values in pvals. The parameter value is reset
        to its original value when the routine exits.

    Return
    ------
    expmap : scalar or array of numbers
        When par is None then a scalar, otherwise an array the same
        size as pvals.

    See Also
    --------
    get_instmap_weights
    plot_instmap_weights
    save_instmap_weights

    Notes
    -----
    The ARF is interpolated onto the energy grid of the dataspace.

    Examples
    --------

    Calculate the exposure map over the range gamma = 0.1 to 5,
    with 0.1 step increments, for an absorbed power-law model and
    with the ARF in the file "arf.fits".

    >>> dataspace1d(0.3, 8.0, 0.1)
    >>> set_source(xsphabs.gal * powlaw1d.pl)
    >>> gal.nh = 0.087
    >>> pl.gamma = 1.2
    >>> gvals = np.arange(0.5,5,0.1)
    >>> evals = estimate_weighted_expmap(arf="arf.fits", par=pl.gamma,
                                         pvals=gvals)

    """

    # Usage errors. We can not catch them all before doing actual
    # work.
    #
    if arf is None and \
       (elo is not None or ehi is not None or specresp is not None):
        # we only worry about elo/ehi/specresp if arf is NOT given
        if elo is None or ehi is None or specresp is None:
            emsg = "Missing one or more of elo, ehi, and specresp."
            raise TypeError(emsg)

    if id is None:
        id = ui.get_default_id()

    if par is not None or pvals is not None:

        if par is None or pvals is None:
            emsg = "Either both par and pvals are set or they " + \
                "are both None."
            raise TypeError(emsg)

        if not isinstance(par, Parameter):
            emsg = "par argument must be a Sherpa model parameter."
            raise TypeError(emsg)

        if not hasattr(pvals, "__iter__"):
            emsg = "pvals argument must be an iterable (array/list)."
            raise TypeError(emsg)

        smdl = ui.get_source(id)
        if par not in smdl.pars:
            emsg = "par argument is not a parameter of the " + \
                "source model"
            raise TypeError(emsg)

    wgts = get_instmap_weights(id, fluxtype=fluxtype)
    if isinstance(wgts, InstMapWeights1DInt) and arf is None and \
            elo is None:
        emsg = "The arf parameter or the elo,ehi,specresp " + \
            "parameters must be given."
        raise TypeError(emsg)

    if arf is None:
        if elo is None:
            args = []
        else:
            args = [elo, ehi, specresp]
    else:
        args = [arf]

    if par is None:
        return wgts.estimate_expmap(*args)

    else:
        # Ugh: we have to create an object for each evaluation, which
        # is rather wasteful.
        #
        orig = par.val
        out = []
        try:
            for pval in pvals:
                par.val = pval
                wgts = get_instmap_weights(id, fluxtype=fluxtype)
                out.append(wgts.estimate_expmap(*args))

        finally:
            par.val = orig

        return np.asarray(out)
Ejemplo n.º 13
0
def plot_instmap_weights(id=None,
                         fluxtype="photon",
                         overplot=False,
                         clearwindow=True,
                         **kwargs):
    """Plot the weights values.

    Parameters
    ----------
    id : int, string, or None
        The Sherpa dataset to use. If ``None`` then the default
        dataset is used.
    fluxtype : 'photon' or 'erg'
        The units of the instrument map are
        cm^2 count / ``fluxtype``. The default is ``photon``.
    overplot : bool, optional
        If ``True`` then the data is added to the current plot,
        otherwise a new plot is created.
    clearwindow: bool, optional
        If ``True`` then clear out the current plot area of all
        existing plots. This is not used if ``overplot`` is set.
    **kwargs
        Override the histogram plot preferences

    See Also
    --------
    estimate_weighted_expmap
    get_instmap_weights
    save_instmap_weights

    Notes
    -----
    The data plot preferences are used to control the
    appearance of the plot: at present the following fields
    are used::

        ``xlog``
        ``ylog``
        ``color``

    Examples
    --------

    Show the weights for an absorbed powerlaw.

    >>> dataspace1d(0.5, 7.0, 0.1)
    >>> set_source(xsphabs.gal * xspowerlaw.pl)
    >>> gal.nh = 0.12
    >>> pl.phoindex = 1.7
    >>> plot_instmap_weights()

    Change the model to an absorbed APEC model and overplot it.

    >>> set_source(gal * xsapec.gal)
    >>> gal.kt = 1.2
    >>> plot_instmap_weights(overplot=True)

    Compare the weights when using the photon and erg weighting
    schemes (the normalization is significantly different).

    >>> plot_instmap_weights()
    >>> plot_instmap_weights(fluxtype='erg')

    Plot the weights with a log scale on the y axis, and then overplot
    the weights using erg weighting and drawn with a dotted line:

    >>> plot_instmap_weights(ylog=True)
    >>> plot_instmap_weights(fluxtype='erg', overplot=True, linestyle='dotted')

    """

    if id is None:
        id = ui.get_default_id()

    wgts = get_instmap_weights(id, fluxtype=fluxtype)
    wgts.plot(overplot=overplot, clearwindow=clearwindow, **kwargs)
Ejemplo n.º 14
0
def save_instmap_weights(*args, **kwargs):
    """Save a weights file in a format usable by mkinstmap.

    This routine does not have a typical Python interface, in that
    up to three arguments are positional, although they can be
    explicitly named. When used positionally, the following
    orders are allowed::

        filename
        id, filename
        filename, clobber
        id, filename, clobber

    Parameters
    ----------
    id : int, string, or None
        The Sherpa dataset to use. If ``None`` then the default
        dataset is used.
    filename : string
        The name of the file to create.
    fluxtype : 'photon' or 'erg'
        The units of the instrument map are
        cm^2 count / ``fluxtype``. The default is ``photon``.
    clobber : bool, optional
        If the output file already exists, should it be deleted
        (``True``) or an IOError raised? The default is ``True``.

    See Also
    --------
    estimate_weighted_expmap
    get_instmap_weights
    plot_instmap_weights

    Notes
    -----
    The output file contains a header (comment character is '#')
    containing some metadata, and then two columns of data, the
    mid-point of the bin and the weight value for the bin. It is
    compatible with the CIAO ASCII file support (use with ``[opt
    colname=first]``).

    Examples
    --------

    Save the weights to the file wgt.dat, and will error out if it
    already exists. The weights are written out so as to create an
    instrument map with units of cm^2 count / erg.

    >>> dataspace1d(0.5, 7.0, 0.1)
    >>> set_source(xsphabs.gal * xspowerlaw.pl)
    >>> gal.nh = 0.12
    >>> pl.phoindex = 1.7
    >>> save_instmap_weights("wgt.dat", fluxtype="erg", clobber=False)

    """

    fname = "save_instmap_weights"
    nargs = len(args)
    if nargs == 0:
        emsg = "{}() takes at least 1 argument ".format(fname) + \
            "(0 given)"
        raise TypeError(emsg)

    if nargs > 3:
        emsg = "{}() takes at most 3 arguments ".format(fname) + \
            "({} given)".format(nargs)
        raise TypeError(emsg)

    # The default values
    user = {
        "id": ui.get_default_id(),
        "filename": None,
        "clobber": True,
        "fluxtype": "photon"
    }
    argnames = user.keys()

    if nargs == 1:
        user["filename"] = args[0]

    elif nargs == 3:
        user["id"] = args[0]
        user["filename"] = args[1]
        user["clobber"] = args[2]

    elif _is_boolean(args[1]):
        user["filename"] = args[0]
        user["clobber"] = args[1]

    elif isinstance(args[1], int):
        # This was needed in CIAO 4.2 and earlier since S-Lang would
        # end up using integers for boolean values. Left in in CIAO
        # 4.3 in case there is any old code or documentation relying
        # on this. Note that Python treats non-zero integers as True
        # so this is perhaps not needed (and slightly harmful, as
        # clobber=2 will not map to True).
        #
        user["filename"] = args[0]
        user["clobber"] = args[1] == 1

    else:
        user["id"] = args[0]
        user["filename"] = args[1]

    for (n, v) in kwargs.items():
        if n not in argnames:
            emsg = "{}() got an unexpected ".format(fname) + \
                "keyword argument '{}'".format(n)
            raise TypeError(emsg)
        user[n] = v

    wgts = get_instmap_weights(user["id"], fluxtype=user["fluxtype"])
    wgts.save(user["filename"], clobber=user["clobber"])
Ejemplo n.º 15
0
def mainloop(mymodel, fwhm, id = None, maxiter = 5, mindist = 0., do_plots = 0):
    
    if id is None:
        id = ui.get_default_id()
    data = ui.get_data(id)
    wave = data.get_indep()[0]
    error = data.get_error()[0]
    
    # model could habe been initalized with arbitrary values
    ui.fit(id) 

    for i in range(maxiter):
        oldmodel = smh.get_model_parts(id)
        res_flux = ui.get_resid_plot(id).y
        if smoothwindow is not None:
            fwhminpix = int(fwhm / np.diff(wave).mean())
            y = smooth(res_flux/error, window_len = 3*fwhminpix, window = smoothwindow)
        else:
            y = res_flux/error
        peaks = findlines(wave, y, fwhm, smoothwindow = None, sigma_threshold = sigma_threshold)
        if has_mpl and (do_plots > 2):
            plt.figure()
            plt.plot(wave, res_flux/error, 's')
            for pos in mymodel.line_value_list('pos'):
                plt.plot([pos, pos], plt.ylim(),'k:')
            for peak in peaks:
                plt.plot([wave[peak], wave[peak]], plt.ylim())
            plt.plot(wave, y)
            plt.draw()
            
        for peak in peaks:
            if (len(mymodel.line_value_list('pos')) == 0) or (min(np.abs(mymodel.line_value_list('pos') - wave[peak])) >= mindist):
                mymodel.add_line(**mymodel.guess(wave, smooth(res_flux, window_len = 3*fwhminpix, window = smoothwindow), peak, fwhm = fwhm))
        newmodel = smh.get_model_parts(id)
        print 'Iteration {0:3n}: {1:3n} lines added'.format(i, len(newmodel) - len(oldmodel))
        
        if set(newmodel) == set(oldmodel):
            print 'No new lines added this step - fitting finished'
            break
        # Now do the fitting in Sherpa
        #ui.set_method('simplex')
        ui.fit(id)
        #ui.set_method('moncar')
        #ui.fit(id)
        
        if has_mpl and (do_plots > 0):
            if do_plots > 1:
                plt.figure()
            else:
                plt.clf()
            ui.plot_fit(id)
            for pos in mymodel.line_value_list('pos'):
                plt.plot([pos, pos], plt.ylim(),'k:')
            for peak in peaks:
                plt.plot([wave[peak], wave[peak]], plt.ylim())
            plt.plot(wave, res_flux)
            plt.draw()
        

    else:
        print 'Max number of iterations reached'
    #model.cleanup() #remove lines running to 0 etc.
    return mymodel