Ejemplo n.º 1
0
 def image(self, array, shape=None, newframe=False, tile=False):
     newframe = bool_cast(newframe)
     tile = bool_cast(tile)
     if shape is None:
         backend.image(array, newframe, tile)
     else:
         backend.image(array.reshape(shape), newframe, tile)
Ejemplo n.º 2
0
 def image(self, array, shape=None, newframe=False, tile=False):
     newframe = bool_cast(newframe)
     tile = bool_cast(tile)
     if shape is None:
         backend.image(array, newframe, tile)
     else:
         backend.image(array.reshape(shape), newframe, tile)
Ejemplo n.º 3
0
 def get_indep(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return (self._x0lo, self._x1lo, self._x2lo, self._x0hi, self._x1hi,
                 self._x2hi)
     return (self.x0lo, self.x1lo, self.x2lo, self.x0hi, self.x1hi,
             self.x2hi)
Ejemplo n.º 4
0
 def get_dep(self, filter=False):
     "Return an array of dependent variable values"
     dep = getattr(self, 'dep', None)
     filter=bool_cast(filter)
     if filter:
         dep = self.apply_filter(dep)
     return dep
Ejemplo n.º 5
0
    def notice(self, mins, maxes, axislist, ignore=False):

        ignore = bool_cast(ignore)
        if str in [type(min) for min in mins]:
            raise DataErr('typecheck', 'lower bound')
        elif str in [type(max) for max in maxes]:
            raise DataErr('typecheck', 'upper bound')
        elif str in [type(axis) for axis in axislist]:
            raise DataErr('typecheck', 'grid')

        mask = filter_bins(mins, maxes, axislist)

        if mask is None:
            self.mask = not ignore
        elif not ignore:
            if self.mask is True:
                self.mask = mask
            else:
                self.mask |= mask
        else:
            mask = ~mask
            if self.mask is False:
                self.mask = mask
            else:
                self.mask &= mask
Ejemplo n.º 6
0
    def get_indep(self, filter=False, model=None):
        """Return the independent axes of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis: tuple of arrays
           The independent axis values for the data set. This gives
           the coordinates of each point in the data set.

        See Also
        --------
        get_dep : Return the dependent axis of a data set.

        """
        indep = getattr(self, 'indep', None)
        filter = bool_cast(filter)
        if filter:
            indep = tuple([self.apply_filter(x) for x in indep])
        return indep
Ejemplo n.º 7
0
    def _get_data_space(self, filter):
        """
        Return the data space for this object. The method is called by the get_x and get_indep methods, which need
        an EvaluationSpace1D representation of the data space to return the appropriate data to the client.

        This is a hook method providing the default implementation for subclasses. Subclasses should override this
        method to provide alternative callback when the default implementation does not apply to the new class.
        At this point, this means that if you develop a subclass you should be aware of the default implementation and
        override it if it does not apply to your subclass. Future versions of Sherpa may implement a cleaner and more
        extensible API.

        Parameters
        ----------
        filter : bool or string to be parsed as bool
            Whether the data returned should be filtered or not.

        Returns
        -------
        data_space : EvaluationSpace1D
            An instance of the EvaluationSpace1D representing the data space for this object.
        """
        filter = bool_cast(filter)
        if filter:
            data_x = self._x
        else:
            data_x = self.x

        return EvaluationSpace1D(data_x)
Ejemplo n.º 8
0
    def get_dep(self, filter=False):
        """Return the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis: array
           The dependent axis values for the data set. This gives
           the value of each point in the data set.

        See Also
        --------
        get_indep : Return the independent axis of a data set.
        get_error : Return the errors on the dependent axis of a data set.
        get_staterror : Return the statistical errors on the dependent axis of a data set.
        get_syserror : Return the systematic errors on the dependent axis of a data set.

        """
        dep = getattr(self, 'dep', None)
        filter = bool_cast(filter)
        if filter:
            dep = self.apply_filter(dep)
        return dep
Ejemplo n.º 9
0
    def get_syserror(self, filter=False):
        """Return the statistical error on the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis : array or `None`
           The systematic error for each data point. A value of
           `None` is returned if the data set has no systematic
           errors.

        See Also
        --------
        get_error : Return the errors on the dependent axis of a data set.
        get_indep : Return the independent axis of a data set.
        get_staterror : Return the statistical errors on the dependent axis of a data set.

        """
        syserr = getattr(self, 'syserror', None)
        filter = bool_cast(filter)
        if filter:
            syserr = self.apply_filter(syserr)
        return syserr
Ejemplo n.º 10
0
    def get_indep(self, filter=False, model=None):
        """Return the independent axes of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis: tuple of arrays
           The independent axis values for the data set. This gives
           the coordinates of each point in the data set.

        See Also
        --------
        get_dep : Return the dependent axis of a data set.

        """
        indep = getattr(self, 'indep', None)
        filter = bool_cast(filter)
        if filter:
            indep = tuple([self.apply_filter(x) for x in indep])
        return indep
Ejemplo n.º 11
0
    def get_dep(self, filter=False):
        """Return the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis: array
           The dependent axis values for the data set. This gives
           the value of each point in the data set.

        See Also
        --------
        get_indep : Return the independent axis of a data set.
        get_error : Return the errors on the dependent axis of a data set.
        get_staterror : Return the statistical errors on the dependent axis of a data set.
        get_syserror : Return the systematic errors on the dependent axis of a data set.

        """
        dep = getattr(self, 'dep', None)
        filter = bool_cast(filter)
        if filter:
            dep = self.apply_filter(dep)
        return dep
Ejemplo n.º 12
0
    def notice(self, mins, maxes, axislist, ignore=False):

        ignore = bool_cast(ignore)
        if str in [type(min) for min in mins]:
            raise DataErr('typecheck', 'lower bound')
        elif str in [type(max) for max in maxes]:
            raise DataErr('typecheck', 'upper bound')
        elif str in [type(axis) for axis in axislist]:
            raise DataErr('typecheck', 'grid')

        mask = filter_bins(mins, maxes, axislist)

        if mask is None:
            self.mask = not ignore
        elif not ignore:
            if self.mask is True:
                self.mask = mask
            else:
                self.mask |= mask
        else:
            mask = ~mask
            if self.mask is False:
                self.mask = mask
            else:
                self.mask &= mask
Ejemplo n.º 13
0
    def get_syserror(self, filter=False):
        """Return the statistical error on the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.

        Returns
        -------
        axis : array or `None`
           The systematic error for each data point. A value of
           `None` is returned if the data set has no systematic
           errors.

        See Also
        --------
        get_error : Return the errors on the dependent axis of a data set.
        get_indep : Return the independent axis of a data set.
        get_staterror : Return the statistical errors on the dependent axis of a data set.

        """
        syserr = getattr(self, 'syserror', None)
        filter = bool_cast(filter)
        if filter:
            syserr = self.apply_filter(syserr)
        return syserr
Ejemplo n.º 14
0
    def _get_data_space(self, filter):
        """
        Return the data space for this object. The method is called by the get_x and get_indep methods, which need
        an EvaluationSpace1D representation of the data space to return the appropriate data to the client.

        This is a hook method providing the default implementation for subclasses. Subclasses should override this
        method to provide alternative callback when the default implementation does not apply to the new class.
        At this point, this means that if you develop a subclass you should be aware of the default implementation and
        override it if it does not apply to your subclass. Future versions of Sherpa may implement a cleaner and more
        extensible API.

        Parameters
        ----------
        filter : bool or string to be parsed as bool
            Whether the data returned should be filtered or not.

        Returns
        -------
        data_space : EvaluationSpace1D
            An instance of the EvaluationSpace1D representing the data space for this object.
        """
        filter = bool_cast(filter)
        if filter:
            data_x = self._x
        else:
            data_x = self.x

        return EvaluationSpace1D(data_x)
Ejemplo n.º 15
0
 def get_indep(self, filter=False):
     "Return a tuple containing the independent variables/axes"
     indep = getattr(self, 'indep', None)
     filter=bool_cast(filter)
     if filter:
         indep = tuple([self.apply_filter(x) for x in indep])
     return indep
Ejemplo n.º 16
0
 def get_syserror(self, filter=False):
     "Return the systematic error array"
     syserr = getattr(self, 'syserror', None)
     filter=bool_cast(filter)
     if filter:
         syserr = self.apply_filter(syserr)
     return syserr
Ejemplo n.º 17
0
    def get_staterror(self, filter=False, staterrfunc=None):
        "Return the statistical error array"

        staterror = getattr(self, 'staterror', None)
        filter=bool_cast(filter)
        if filter:
            staterror = self.apply_filter(staterror)
        
        if (staterror is None) and (staterrfunc is not None):
            dep = self.get_dep()
            if filter:
                dep = self.apply_filter(dep)
            staterror = staterrfunc(dep)
        return staterror
Ejemplo n.º 18
0
Archivo: data.py Proyecto: nplee/sherpa
    def get(self, filter=False):
        """
        Get a filtered representation of this data set. If `filter` is `False` this object is returned.

        Parameters
        ----------
        filter : bool
            whether the data set should be filtered before being returned

        Returns
        -------
        DataSpaceND
        """
        filter = bool_cast(filter)

        if not filter:
            return self

        data = tuple(self.filter.apply(axis) for axis in self.indep)
        return DataSpaceND(self.filter, data)
Ejemplo n.º 19
0
    def get_staterror(self, filter=False, staterrfunc=None):
        """Return the statistical error on the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.
        staterrfunc : function
           If no statistical error has been set, the errors will
           be calculated by applying this function to the
           dependent axis of the data set.

        Returns
        -------
        axis : array or `None`
           The statistical error for each data point. A value of
           `None` is returned if the data set has no statistical error
           array and `staterrfunc` is `None`.

        See Also
        --------
        get_error : Return the errors on the dependent axis of a data set.
        get_indep : Return the independent axis of a data set.
        get_syserror : Return the systematic errors on the dependent axis of a data set.

        """
        staterror = getattr(self, 'staterror', None)
        filter = bool_cast(filter)
        if filter:
            staterror = self.apply_filter(staterror)

        if (staterror is None) and (staterrfunc is not None):
            dep = self.get_dep()
            if filter:
                dep = self.apply_filter(dep)
            staterror = staterrfunc(dep)
        return staterror
Ejemplo n.º 20
0
    def get_staterror(self, filter=False, staterrfunc=None):
        """Return the statistical error on the dependent axis of a data set.

        Parameters
        ----------
        filter : bool, optional
           Should the filter attached to the data set be applied to
           the return value or not. The default is `False`.
        staterrfunc : function
           If no statistical error has been set, the errors will
           be calculated by applying this function to the
           dependent axis of the data set.

        Returns
        -------
        axis : array or `None`
           The statistical error for each data point. A value of
           `None` is returned if the data set has no statistical error
           array and `staterrfunc` is `None`.

        See Also
        --------
        get_error : Return the errors on the dependent axis of a data set.
        get_indep : Return the independent axis of a data set.
        get_syserror : Return the systematic errors on the dependent axis of a data set.

        """
        staterror = getattr(self, 'staterror', None)
        filter = bool_cast(filter)
        if filter:
            staterror = self.apply_filter(staterror)

        if (staterror is None) and (staterrfunc is not None):
            dep = self.get_dep()
            if filter:
                dep = self.apply_filter(dep)
            staterror = staterrfunc(dep)
        return staterror
Ejemplo n.º 21
0
 def get_indep(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return (self._x0lo, self._x1lo, self._x0hi, self._x1hi)
     return (self.x0lo, self.x1lo, self.x0hi, self.x1hi)
Ejemplo n.º 22
0
 def get_indep(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return (self._lo, self._hi, self._x1, self._x2)
     return (self.xlo, self.xhi, self.x1, self.x2)
Ejemplo n.º 23
0
 def _get_data_space(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return EvaluationSpace1D(self._lo, self._hi)
     return EvaluationSpace1D(self.xlo, self.xhi)
Ejemplo n.º 24
0
 def get_indep(self, filter=False, model=None ):
     filter = bool_cast(filter)
     if filter:
         return (self._x0, self._x1)
     return (self.x0, self.x1)
Ejemplo n.º 25
0
 def get_dep(self, filter=False):
     y = self.y
     filter = bool_cast(filter)
     if filter:
         y = self.apply_filter(y)
     return y
Ejemplo n.º 26
0
 def calc(self, *args, **kwargs):
     kwargs['integrate'] = bool_cast(self.integrate)
     return _modelfcts.hr(*args, **kwargs)
Ejemplo n.º 27
0
 def get_indep(self, filter=False, model=None):
     filter = bool_cast(filter)
     if filter:
         return (self._x0, self._x1)
     return (self.x0, self.x1)
Ejemplo n.º 28
0
 def get_indep(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return (self._x, )
     return (self.x, )
Ejemplo n.º 29
0
 def get_dep(self, filter=False):
     y = self.y
     filter = bool_cast(filter)
     if filter:
         y = self.apply_filter(y)
     return y
Ejemplo n.º 30
0
    def fold(self, data):
        # FIXME how will we know the native dimensionality of the
        # raveled model without the values?
        kargs = {}

        kshape = None
        dshape = data.get_dims()

        (size, center, origin, kargs['norm'],
         radial) = (self.size, self.center, self.origin,
                    bool_cast(self.norm.val), int(self.radial.val))

        kargs['size'] = size
        kargs['center'] = center
        kargs['origin'] = origin
        kargs['is_model'] = False
        kargs['do_pad'] = False

        kargs['args'] = data.get_indep()

        pixel_size_comparison = self._check_pixel_size(data)

        if pixel_size_comparison == self.SAME_RESOLUTION:  # Don't do anything special
            self.data_space = EvaluationSpace2D(*data.get_indep())
            self._must_rebin = False
        elif pixel_size_comparison == self.BETTER_RESOLUTION:  # Evaluate model in PSF space
            self.data_space = EvaluationSpace2D(*data.get_indep())
            self.psf_space = PSFSpace2D(self.data_space, self, data.sky.cdelt)
            kargs['args'] = self.psf_space.grid
            dshape = self.psf_space.shape
            self._must_rebin = True
        else:  # PSF has worse resolution, error out
            raise AttributeError(
                "The PSF has a worse resolution than the data.")

        if isinstance(self.kernel, Data):

            kshape = self.kernel.get_dims()
            # (kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(self.kernel.get_indep(), kshape)

            kargs['lo'] = [1] * len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1] * len(kshape)

            if center is None:
                kargs['center'] = [int(dim / 2.) for dim in kshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = kshape
                # update size param to default
                self.size = kargs['size']

        else:
            if (self.kernel is None) or (not callable(self.kernel)):
                raise PSFErr('nopsf', self._name)
            kshape = data.get_dims()
            # (kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(kargs['args'], dshape)

            kargs['lo'] = [1] * len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1] * len(kshape)

            if center is None:
                kargs['center'] = [int(dim / 2.) for dim in dshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = dshape
                # update size param to default
                self.size = kargs['size']

            kargs['is_model'] = True
            if hasattr(self.kernel, 'pars'):
                # freeze all PSF model parameters if not already.
                for par in self.kernel.pars:
                    par.freeze()

            if hasattr(self.kernel, 'thawedpars'):
                kargs['frozen'] = (len(self.kernel.thawedpars) == 0)

        is_kernel = (kargs['is_model'] and not kargs['norm']
                     and len(kshape) == 1)
        # Handle noticed regions for convolution
        if numpy.iterable(data.mask):
            kargs['do_pad'] = True
            kargs['pad_mask'] = data.mask

        if is_kernel:
            for id in ['is_model', 'lo', 'hi', 'width', 'size']:
                kargs.pop(id)
            self.model = Kernel(dshape, kshape, **kargs)
            return

        if radial:
            self.model = RadialProfileKernel(dshape, kshape, **kargs)
            return

        self.model = PSFKernel(dshape, kshape, **kargs)
        return
Ejemplo n.º 31
0
 def _get_data_space(self, filter=False):
     filter = bool_cast(filter)
     if filter:
         return EvaluationSpace1D(self._lo, self._hi)
     return EvaluationSpace1D(self.xlo, self.xhi)
Ejemplo n.º 32
0
 def get_indep(self, filter=False):
     filter=bool_cast(filter)
     if filter:
         return (self._lo, self._hi)
     return (self.xlo, self.xhi)
Ejemplo n.º 33
0
 def get_indep(self, filter=False):
     filter=bool_cast(filter)
     if filter:
         return (self._x,)
     return (self.x,)
Ejemplo n.º 34
0
    def fold(self, data):
        # FIXME how will we know the native dimensionality of the
        # raveled model without the values?
        kargs={}

        kshape = None
        dshape = data.get_dims()

        (size, center, origin,
         kargs['norm'], radial) = (self.size, self.center, self.origin,
                                   bool_cast(self.norm.val),
                                   int(self.radial.val))

        kargs['size'] = size
        kargs['center'] = center
        kargs['origin'] = origin
        kargs['is_model']=False
        kargs['do_pad']=False

        kargs['args'] = data.get_indep()
        if isinstance(self.kernel, Data):

            kshape = self.kernel.get_dims()
            #(kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(self.kernel.get_indep(), kshape)

            kargs['lo'] = [1]*len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1]*len(kshape)

            if center is None:
                kargs['center'] = [int(dim/2.) for dim in kshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = kshape
                # update size param to default
                self.size = kargs['size']

        else:
            if (self.kernel is None) or (not callable(self.kernel)):
                raise PSFErr('nopsf', self._name)
            kshape = data.get_dims()
            #(kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(kargs['args'], dshape)

            kargs['lo'] = [1]*len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1]*len(kshape)

            if center is None:
                kargs['center'] = [int(dim/2.) for dim in dshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = dshape
                # update size param to default
                self.size = kargs['size']

            kargs['is_model']=True
            if hasattr(self.kernel, 'pars'):
                # freeze all PSF model parameters if not already.
                for par in self.kernel.pars:
                    par.freeze()

            if hasattr(self.kernel, 'thawedpars'):
                kargs['frozen'] = (len(self.kernel.thawedpars) == 0)


        # check size of self.size to ensure <= dshape for 2D
#        if len(dshape) > 1:
#            dsize = numpy.asarray(dshape)
#            ksize = numpy.asarray(self.size)
#            if True in (ksize>dsize):
#                raise PSFErr('badsize', ksize, dsize)

        is_kernel = (kargs['is_model'] and not kargs['norm'] and
                     len(kshape) == 1)
        # Handle noticed regions for convolution
        if numpy.iterable(data.mask):
            kargs['do_pad'] = True
            kargs['pad_mask'] = data.mask

        if is_kernel:
            for id in ['is_model','lo','hi','width','size']:
                kargs.pop(id)
            self.model = Kernel(dshape, kshape, **kargs)
            return

        if radial:
            self.model = RadialProfileKernel(dshape, kshape, **kargs)
            return

        self.model = PSFKernel(dshape, kshape, **kargs)
        return
Ejemplo n.º 35
0
    def fold(self, data):
        # FIXME how will we know the native dimensionality of the
        # raveled model without the values?
        self._check_pixel_size(data)

        kargs = {}

        kshape = None
        dshape = data.get_dims()

        (size, center, origin,
         kargs['norm'], radial) = (self.size, self.center, self.origin,
                                   bool_cast(self.norm.val),
                                   int(self.radial.val))

        kargs['size'] = size
        kargs['center'] = center
        kargs['origin'] = origin
        kargs['is_model'] = False
        kargs['do_pad'] = False

        kargs['args'] = data.get_indep()
        if isinstance(self.kernel, Data):

            kshape = self.kernel.get_dims()
            # (kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(self.kernel.get_indep(), kshape)

            kargs['lo'] = [1] * len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1] * len(kshape)

            if center is None:
                kargs['center'] = [int(dim / 2.) for dim in kshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = kshape
                # update size param to default
                self.size = kargs['size']

        else:
            if (self.kernel is None) or (not callable(self.kernel)):
                raise PSFErr('nopsf', self._name)
            kshape = data.get_dims()
            # (kargs['lo'], kargs['hi'],
            # kargs['width']) = _get_axis_info(kargs['args'], dshape)

            kargs['lo'] = [1] * len(kshape)
            kargs['hi'] = kshape
            kargs['width'] = [1] * len(kshape)

            if center is None:
                kargs['center'] = [int(dim / 2.) for dim in dshape]
                # update center param to default
                self.center = kargs['center']

            if size is None:
                kargs['size'] = dshape
                # update size param to default
                self.size = kargs['size']

            kargs['is_model'] = True
            if hasattr(self.kernel, 'pars'):
                # freeze all PSF model parameters if not already.
                for par in self.kernel.pars:
                    par.freeze()

            if hasattr(self.kernel, 'thawedpars'):
                kargs['frozen'] = (len(self.kernel.thawedpars) == 0)

        # check size of self.size to ensure <= dshape for 2D
        #        if len(dshape) > 1:
        #            dsize = numpy.asarray(dshape)
        #            ksize = numpy.asarray(self.size)
        #            if True in (ksize>dsize):
        #                raise PSFErr('badsize', ksize, dsize)

        is_kernel = (kargs['is_model'] and not kargs['norm'] and
                     len(kshape) == 1)
        # Handle noticed regions for convolution
        if numpy.iterable(data.mask):
            kargs['do_pad'] = True
            kargs['pad_mask'] = data.mask

        if is_kernel:
            for id in ['is_model', 'lo', 'hi', 'width', 'size']:
                kargs.pop(id)
            self.model = Kernel(dshape, kshape, **kargs)
            return

        if radial:
            self.model = RadialProfileKernel(dshape, kshape, **kargs)
            return

        self.model = PSFKernel(dshape, kshape, **kargs)
        return
Ejemplo n.º 36
0
    def est_errors(self, methoddict=None, parlist=None):
        # Define functions to freeze and thaw a parameter before
        # we call fit function -- projection can call fit several
        # times, for each parameter -- that parameter must be frozen
        # while the others freely vary.        
        def freeze_par(pars, parmins, parmaxes, i):
            # Freeze the indicated parameter; return
            # its place in the list of all parameters,
            # and the current values of the parameters,
            # and the hard mins amd maxs of the parameters
            self.model.pars[self.thaw_indices[i]].val = pars[i]
            self.model.pars[self.thaw_indices[i]].frozen = True
            self.current_frozen = self.thaw_indices[i]
            keep_pars = ones_like(pars)
            keep_pars[i] = 0
            current_pars = pars[where(keep_pars)]
            current_parmins = parmins[where(keep_pars)]
            current_parmaxes = parmaxes[where(keep_pars)]
            return (current_pars, current_parmins, current_parmaxes)

        def thaw_par(i):
            if (i < 0):
                pass
            else:
                self.model.pars[self.thaw_indices[i]].frozen = False
                self.current_frozen = -1

        # confidence needs to know which parameter it is working on.
        def get_par_name( ii ):
            return self.model.pars[self.thaw_indices[ii]].fullname
        
        # Call from a parameter estimation method, to report
        # that limits for a given parameter have been found
        def report_progress(i, lower, upper):
            if (i < 0):
                pass
            else:
                name = self.model.pars[self.thaw_indices[i]].fullname
                if isnan(lower) or isinf(lower):
                    info("%s \tlower bound: -----" % name)
                else:
                    info("%s \tlower bound: %g" % (name, lower))
                if isnan(upper) or isinf(upper):
                    info("%s \tupper bound: -----" % name)
                else:
                    info("%s \tupper bound: %g" % (name, upper))


        # If starting fit statistic is chi-squared or C-stat,
        # can calculate reduced fit statistic -- if it is
        # more than 3, don't bother calling method to estimate
        # parameter limits.

        if (type(self.stat) is LeastSq):
            #raise FitError('cannot estimate confidence limits with ' +
            #               type(self.stat).__name__)
            raise EstErr( 'noerr4least2', type(self.stat).__name__)

        
        if (type(self.stat) is not Cash):
            dep, staterror, syserror = self.data.to_fit(
                self.stat.calc_staterror)

            if not iterable(dep) or len(dep) == 0:
                #raise FitError('no noticed bins found in data set')
                raise FitErr( 'nobins' )

            # For chi-squared and C-stat, reduced statistic is
            # statistic value divided by number of degrees of
            # freedom.

            # Degress of freedom are number of data bins included
            # in fit, minus the number of thawed parameters.
            dof = len(dep) - len(self.model.thawedpars)
            if (dof < 1):
                #raise FitError('degrees of freedom are zero or lower')
                raise EstErr( 'nodegfreedom' )
            
            if (hasattr(self.estmethod, "max_rstat") and
                (self.calc_stat() / dof) > self.estmethod.max_rstat):
                #raise FitError('reduced statistic larger than ' +
                #               str(self.estmethod.max_rstat))
                raise EstErr( 'rstat>max', str(self.estmethod.max_rstat) )

        # If statistic is chi-squared, change fitting method to
        # Levenberg-Marquardt; else, switch to NelderMead.  (We
        # will do fitting during projection, and therefore don't
        # want to use LM with a stat other than chi-squared).

        # If current method is not LM or NM, warn it is not a good
        # method for estimating parameter limits.
        if (type(self.estmethod) is not Covariance and
            type(self.method) is not NelderMead and
            type(self.method) is not LevMar):
            warning(self.method.name + " is inappropriate for confidence " +
                    "limit estimation")
        
        oldmethod = self.method
        if (hasattr(self.estmethod, "fast") and
            bool_cast(self.estmethod.fast) is True and
            methoddict is not None):
            if (isinstance(self.stat, Likelihood) ):
                if (type(self.method) is not NelderMead):
                    self.method = methoddict['neldermead']
                    warning("Setting optimization to " + self.method.name
                            + " for confidence limit search")
            else:
                if (type(self.method) is not LevMar):
                    self.method = methoddict['levmar']
                    warning("Setting optimization to " + self.method.name
                            + " for confidence limit search")

        # Now, set up before we call the confidence limit function
        # Keep track of starting values, will need to set parameters
        # back to starting values when we are done.
        startpars = self.model.thawedpars
        startsoftmins = self.model.thawedparmins
        startsoftmaxs = self.model.thawedparmaxes
        starthardmins = self.model.thawedparhardmins
        starthardmaxs = self.model.thawedparhardmaxes

        # If restricted to soft_limits, only send soft limits to
        # method, and do not reset model limits
        if (bool_cast(self.estmethod.soft_limits) is True):
            starthardmins = self.model.thawedparmins
            starthardmaxs = self.model.thawedparmaxes
        else:
            self.model.thawedparmins = starthardmins
            self.model.thawedparmaxes = starthardmaxs
        
        self.current_frozen = -1

        # parnums is the list of indices of the thawed parameters
        # we want to visit.  For example, if there are three thawed
        # parameters, and we want to derive limits for only the first
        # and third, then parnums = [0,2].  We construct the list by
        # comparing each parameter in parlist to the thawed model
        # parameters.  (In the default case, when parlist is None,
        # that means get limits for all thawed parameters, so parnums
        # is [0, ... , numpars - 1], if the number of thawed parameters
        # is numpars.)
        parnums = []
        if parlist is not None:
            allpars = [p for p in self.model.pars if not p.frozen]
            for p in parlist:
                count = 0
                match = False
                for par in allpars:
                    if p is par:
                        parnums.append(count)
                        match = True
                    count = count + 1
                if (match == False):
                    raise EstErr('noparameter', p.fullname)
            parnums = array(parnums)
        else:
            parlist = [p for p in self.model.pars if not p.frozen]
            parnums = arange(len(startpars))
            
        # If we are here, we are ready to try to derive confidence limits.
        # General rule:  if failure because a hard limit was hit, find
        # out which parameter it was so we can tell the user.
        # If a new minimum statistic was found, start over, with parameter
        # values that yielded new lower statistic as the new starting point.
        output = None
        results = None
        oldremin = -1.0
        if (hasattr(self.estmethod, "remin")):
            oldremin = self.estmethod.remin
        try:
            output = self.estmethod.compute(self._iterfit._get_callback(),
                                            self._iterfit.fit,
                                            self.model.thawedpars,
                                            startsoftmins,
                                            startsoftmaxs,
                                            starthardmins,
                                            starthardmaxs,
                                            parnums,
                                            freeze_par, thaw_par,
                                            report_progress, get_par_name)
        except EstNewMin, e:
            # If maximum number of refits has occurred, don't
            # try to reminimize again.
            if (hasattr(self.estmethod, "maxfits") and
                not(self.refits < self.estmethod.maxfits-1)):
                self.refits = 0
                thaw_par(self.current_frozen)
                self.model.thawedpars = startpars
                self.model.thawedparmins = startsoftmins
                self.model.thawedparmaxes = startsoftmaxs
                self.method = oldmethod
                if (hasattr(self.estmethod, "remin")):
                    self.estmethod.remin = -1.0
                warning("Maximum number of reminimizations reached")
            
            # First report results of new fit, then call
            # compute limits for those new best-fit parameters
            for p in parlist:
                p.frozen = False
            self.current_frozen = -1

            if e.args != ():
                self.model.thawedpars = e.args[0]

            self.model.thawedparmins = startsoftmins
            self.model.thawedparmaxes = startsoftmaxs
            results = self.fit()
            self.refits = self.refits + 1
            warning("New minimum statistic found while computing confidence limits")
            warning("New best-fit parameters:\n" + results.format())

            # Now, recompute errors for new best-fit parameters
            results = self.est_errors(methoddict, parlist)
            self.model.thawedparmins = startsoftmins
            self.model.thawedparmaxes = startsoftmaxs
            self.method = oldmethod
            if (hasattr(self.estmethod, "remin")):
                self.estmethod.remin = oldremin
            return results
Ejemplo n.º 37
0
    def calc(self, fit, par0, par1, methoddict=None):
        self.title='Region-Projection'

        Confidence2D.calc(self, fit, par0, par1)
        if par0.frozen:
            raise ConfidenceErr('frozen', par0.fullname, 'region projection')
        if par1.frozen:
            raise ConfidenceErr('frozen', par1.fullname, 'region projection')

        thawed = [i for i in fit.model.pars if not i.frozen]
        
        if par0 not in thawed:
            raise ConfidenceErr('thawed', par0.fullname, fit.model.name)
        if par1 not in thawed:
            raise ConfidenceErr('thawed', par1.fullname, fit.model.name)

        # If "fast" option enabled, set fitting method to
        # lmdif if stat is chi-squared,
        # else set to neldermead

        # If current method is not LM or NM, warn it is not a good
        # method for estimating parameter limits.
        if type(fit.method) not in (NelderMead, LevMar):
            warning(fit.method.name + " is inappropriate for confidence " +
                    "limit estimation")

        oldfitmethod = fit.method
        if (bool_cast(self.fast) is True and methoddict is not None):
            if (isinstance(fit.stat, Likelihood)):
                if (type(fit.method) is not NelderMead):
                    fit.method = methoddict['neldermead']
                    warning("Setting optimization to " + fit.method.name
                            + " for region projection plot")
            else:
                if (type(fit.method) is not LevMar):
                    fit.method = methoddict['levmar']
                    warning("Setting optimization to " + fit.method.name
                            + " for region projection plot")


        def eval_proj(pars):
            for ii in [0,1]:
                if self.log[ii]:
                    pars[ii] = numpy.power(10, pars[ii])
            (par0.val, par1.val) = pars
            if len(thawed) > 2:
                r = fit.fit()
                return r.statval
            return fit.calc_stat()

        oldpars = fit.model.thawedpars

        try:
            fit.model.startup()
            
            # store the class methods for startup and teardown
            # these calls are unnecessary for every fit
            startup = fit.model.startup
            fit.model.startup = lambda : None
            teardown = fit.model.teardown
            fit.model.teardown = lambda : None

            grid = self._region_init(fit, par0, par1)

            par0.freeze()
            par1.freeze()

            self.y = numpy.asarray(parallel_map(eval_proj, grid,
                                                self.numcores))

        finally:
            # Set back data after we changed it
            par0.thaw()
            par1.thaw()

            fit.model.startup = startup
            fit.model.teardown = teardown

            fit.model.teardown()
            fit.model.thawedpars = oldpars
            fit.method = oldfitmethod