Esempio n. 1
0
def _sample_stat(fit, samples, numcores=None, cache=True):
    """Calculate the statistic for each set of samples.

    Parameters
    ----------
    fit : sherpa.fit.Fit instance
        This defines the thawed parameters that are used to generate
        the samples, along with any possible error analysis.
    samples : 2D numpy array
        The samples array, stored as a npar by niter matrix.
    numcores : int or None, optional
        Should the calculation be done on multiple CPUs?  The default
        (None) is to rely on the parallel.numcores setting of the
        configuration file.
    cache : bool, optional
        Should the model cache be used?

    Returns
    -------
    vals : 2D numpy array
        A copy of the samples input with an extra row added to its
        start, giving the statistic value for that row.

    """

    oldvals = fit.model.thawedpars

    try:
        fit.model.startup(cache=cache)
        stats = numpy.asarray(parallel_map(Evaluate(fit), samples, numcores))
    finally:
        fit.model.teardown()
        fit.model.thawedpars = oldvals

    return numpy.concatenate([stats[:, numpy.newaxis], samples], axis=1)
Esempio n. 2
0
def test_parallel_map(num_tasks, num_segments):
    f = numpy.sum
    iterable = [numpy.arange(1, 2 + 2 * i) for i in range(num_segments)]

    result = list(map(f, iterable))
    result = numpy.asarray(result)

    pararesult = utils.parallel_map(f, iterable, num_tasks)

    assert numpy.asarray(pararesult) == pytest.approx(result)
Esempio n. 3
0
def test_parallel_map(num_tasks, num_segments):
        f = numpy.sum
        iterable = [numpy.arange(1, 2+2*i) for i in range(num_segments)]

        result = list(map(f, iterable))
        result = numpy.asarray(result)

        pararesult = utils.parallel_map(f, iterable, num_tasks)

        assert_equal(result, numpy.asarray(pararesult))
Esempio n. 4
0
def _sample_stat(fit, samples, numcores=None):

    oldvals = fit.model.thawedpars

    try:
        fit.model.startup()
        stats = numpy.asarray(parallel_map(Evaluate(fit), samples, numcores))
    finally:
        fit.model.teardown()
        fit.model.thawedpars = oldvals

    return numpy.concatenate([stats[:, numpy.newaxis], samples], axis=1)
Esempio n. 5
0
def _sample_stat(fit, samples, numcores=None):

    oldvals = fit.model.thawedpars

    try:
        fit.model.startup()
        stats = numpy.asarray(parallel_map(Evaluate(fit), samples, numcores))
    finally:
        fit.model.teardown()
        fit.model.thawedpars = oldvals

    return numpy.concatenate([stats[:, numpy.newaxis], samples], axis=1)
Esempio n. 6
0
    def test_parallel_map(self):
        ncpus = multiprocessing.cpu_count()

        numtasks = 8
        f = numpy.sum
        iterable = [numpy.arange(1, 2+2*i) for i in range(numtasks)]

        result = map(f, iterable)
        result = numpy.asarray(result)

        pararesult = utils.parallel_map(f, iterable, ncpus)

        assert_equal(result, numpy.asarray(pararesult))
Esempio n. 7
0
    def test_parallel_map(self):
        ncpus = multiprocessing.cpu_count()

        numtasks = 8
        f = numpy.sum
        iterable = [numpy.arange(1, 2 + 2 * i) for i in range(numtasks)]

        result = map(f, iterable)
        result = numpy.asarray(result)

        pararesult = utils.parallel_map(f, iterable, ncpus)

        assert_equal(result, numpy.asarray(pararesult))
Esempio n. 8
0
def calc_flux(fit, data, src, samples, method=calc_energy_flux,
              lo=None, hi=None, numcores=None):

    def evaluate(sample):
        fit.model.thawedpars = sample
        flux = method(data, src, lo, hi)
        return [flux] + list(sample)

    old_model_vals = fit.model.thawedpars
    try:
        fluxes = parallel_map(CalcFluxWorker(fit, method, data, src, lo, hi), samples, numcores)
    finally:
        fit.model.thawedpars = old_model_vals

    return numpy.asarray(fluxes)
Esempio n. 9
0
def calc_flux(fit, data, src, samples, method=calc_energy_flux,
              lo=None, hi=None, numcores=None):

    def evaluate(sample):
        fit.model.thawedpars = sample
        flux = method(data, src, lo, hi)
        return [flux] + list(sample)

    old_model_vals  = fit.model.thawedpars
    try:
        fluxes = parallel_map(evaluate, samples, numcores)
    finally:
        fit.model.thawedpars = old_model_vals

    return numpy.asarray(fluxes)
Esempio n. 10
0
    def calc(self, fit, par0, par1, methoddict=None):
        self.title='Region-Uncertainty'

        Confidence2D.calc(self, fit, par0, par1)
        if par0.frozen:
            raise ConfidenceErr('frozen', par0.fullname, 'region uncertainty')
        if par1.frozen:
            raise ConfidenceErr('frozen', par1.fullname, 'region uncertainty')
        
        thawed = [i for i in fit.model.pars if not i.frozen]
        
        if par0 not in thawed:
            raise ConfidenceErr('thawed', par0.fullname, fit.model.name)
        if par1 not in thawed:
            raise ConfidenceErr('thawed', par1.fullname, fit.model.name)

        def eval_uncert(pars):
            for ii in [0,1]:
                if self.log[ii]:
                    pars[ii] = numpy.power(10, pars[ii])
            (par0.val, par1.val) = pars
            return fit.calc_stat()

        oldpars = fit.model.thawedpars

        try:
            fit.model.startup()

            grid = self._region_init(fit, par0, par1)

            for i in thawed:
                i.freeze()

            self.y = numpy.asarray(parallel_map(eval_uncert, grid,
                                                self.numcores))

        finally:
            # Set back data after we changed it
            for i in thawed:
                i.thaw()
            fit.model.teardown()
            fit.model.thawedpars = oldpars
Esempio n. 11
0
    def __call__(self, tol, maxnfev, numcores=_ncpus):
        nfev = 0
        random.seed(self.seed)
        mypop = self.polytope
        old_fval = numpy.inf
        while nfev < maxnfev:

            keys = self.calc_key(range(self.npop))
            results = \
                parallel_map(self.all_strategies, keys, numcores)

            for index, result in enumerate(results):
                nfev += int(result[0])
                if result[-1] < mypop[index][-1]:
                    mypop[index] = result[1:]

            self.polytope.sort()
            if self.polytope.check_convergence(tol, 0):
                break

            best = mypop[0]
            best_fval = best[-1]
            if best_fval < old_fval:
                best_par = best[:-1]
                tmp_nfev, tmp_fval, tmp_par = \
                    self.ncores_nm(self.func, best_par, self.xmin, self.xmax,
                                   tol)
                nfev += tmp_nfev
                if tmp_fval < best_fval:
                    best_par = numpy.append(tmp_par, tmp_fval)
                    mypop[1] = best_par[:]
                    self.polytope.sort()
                    old_fval = tmp_fval
                else:
                    old_fval = best_fval

        best_vertex = self.polytope[0]
        best_par = best_vertex[:-1]
        best_fval = best_vertex[-1]
        return nfev, best_fval, best_par
Esempio n. 12
0
    def calc(self, fit, par, methoddict=None):
        self.title = 'Interval-Uncertainty'

        Confidence1D.calc(self, fit, par)
        if par.frozen:
            raise ConfidenceErr('frozen', par.fullname, 'interval uncertainty')
        
        thawed = [i for i in fit.model.pars if not i.frozen]
    
        if par not in thawed:
            raise ConfidenceErr('thawed', par.fullname, fit.model.name)

        oldpars = fit.model.thawedpars

        xvals = self._interval_init(fit, par)

        for i in thawed:
            i.freeze()                    

        def eval_uncert(val):
            if self.log:
                val = numpy.power(10, val)
            par.val = val
            return fit.calc_stat()

        try:
            fit.model.startup()
            self.y = numpy.asarray(parallel_map(eval_uncert, xvals,
                                                self.numcores))

        finally:
            # Set back data that we changed
            for i in thawed:
                i.thaw()
            fit.model.teardown()
            fit.model.thawedpars = oldpars
Esempio n. 13
0
def grid_search( fcn, x0, xmin, xmax, num=16, sequence=None, numcores=1,
                 maxfev=None, ftol=EPSILON, method=None, verbose=0 ):

    x, xmin, xmax = _check_args(x0, xmin, xmax)

    npar = len( x )

    def func( pars ):
        aaa = fcn( pars )[ 0 ]
        if verbose:
            print 'f%s=%g' % ( pars, aaa )
        return aaa

    def make_sequence( ranges, N ):
        list_ranges = list( ranges )
        for ii in range( npar ):
            list_ranges[ ii ] = tuple( list_ranges[ ii ] ) + ( complex( N ), )
            list_ranges[ ii ] = slice( *list_ranges[ ii ] )
        grid = numpy.mgrid[ list_ranges ]
        mynfev = pow( N, npar )
        grid = map( numpy.ravel, grid )
        sequence = []
        for index in xrange( mynfev ):
            tmp = []
            for xx in xrange( npar ):
                tmp.append( grid[ xx ][ index ] )
            sequence.append( tmp )
        return sequence

    def eval_stat_func( xxx ):
        return numpy.append( func( xxx ), xxx )

    if sequence is None:
        ranges = []
        for index in xrange( npar ):
            ranges.append( [ xmin[ index ], xmax[ index ] ] )
        sequence = make_sequence( ranges, num )
    else:
        if not numpy.iterable( sequence ):
            raise TypeError( "sequence option must be iterable" )
        else:
            for seq in sequence:
                if npar != len( seq ):
                    msg = "%s must be of length %d" % ( seq, npar )
                    raise TypeError( msg )
                    

    answer = eval_stat_func( x )
    sequence_results = parallel_map( eval_stat_func, sequence, numcores )
    for xresult in sequence_results[ 1: ]:
        if xresult[ 0 ] < answer[ 0 ]:
            answer = xresult

    fval = answer[ 0 ]
    x = answer[ 1: ]
    nfev = len( sequence_results ) + 1
    ierr = 0
    status, msg = _get_saofit_msg( ierr, ierr )
    rv = ( status, x, fval )
    rv += (msg, {'info': ierr, 'nfev': nfev })
    
    if ( 'NelderMead' == method or 'neldermead' == method or \
         'Neldermead' == method or 'nelderMead' == method ):
        #re.search( '^[Nn]elder[Mm]ead', method ):
        nm_result = neldermead( fcn, x, xmin, xmax, ftol=ftol, maxfev=maxfev,
                                verbose=verbose )
        tmp_nm_result = list(nm_result)
        tmp_nm_result_4 = tmp_nm_result[4]
        tmp_nm_result_4['nfev'] += nfev
        rv = tuple( tmp_nm_result )

    if ( 'LevMar' == method or 'levmar' == method or \
         'Levmar' == method or 'levMar' == method ):
        #re.search( '^[Ll]ev[Mm]ar', method ):
        levmar_result = lmdif( fcn, x, xmin, xmax, ftol=ftol, xtol=ftol,
                               gtol=ftol, maxfev=maxfev, verbose=verbose )
        tmp_levmar_result = list(levmar_result)
        tmp_levmar_result_4 = tmp_levmar_result[4]
        tmp_levmar_result_4['nfev'] += nfev
        rv = tuple( tmp_levmar_result )
    

    return rv
Esempio n. 14
0
    def run(fit,
            null_comp,
            alt_comp,
            conv_mdl=None,
            stat=None,
            method=None,
            niter=500,
            numcores=None):

        if stat is None: stat = CStat()
        if method is None: method = NelderMead()

        if not isinstance(stat, (Cash, CStat)):
            raise TypeError("Sherpa fit statistic must be Cash or CStat" +
                            " for likelihood ratio test")

        niter = int(niter)

        alt = alt_comp
        null = null_comp

        oldaltvals = numpy.array(alt.thawedpars)
        oldnullvals = numpy.array(null.thawedpars)

        data = fit.data

        if conv_mdl is not None:
            # Copy the PSF
            null_conv_mdl = deepcopy(conv_mdl)

            alt = conv_mdl(alt_comp)
            if hasattr(conv_mdl, 'fold'):
                conv_mdl.fold(data)

            # Convolve the null model
            null = null_conv_mdl(null_comp)
            if hasattr(null_conv_mdl, 'fold'):
                null_conv_mdl.fold(data)

        nullfit = Fit(data, null, stat, method, Covariance())

        # Fit with null model
        nullfit_results = nullfit.fit()
        debug(nullfit_results.format())

        null_stat = nullfit_results.statval
        null_vals = nullfit_results.parvals

        # Calculate niter samples using null best-fit and covariance
        sampler = NormalParameterSampleFromScaleMatrix()
        samples = sampler.get_sample(nullfit, None, niter)

        # Fit with alt model, null component starts at null's best fit params.
        altfit = Fit(data, alt, stat, method, Covariance())
        altfit_results = altfit.fit()
        debug(altfit_results.format())

        alt_stat = altfit_results.statval
        alt_vals = altfit_results.parvals

        LR = -(alt_stat - null_stat)

        def worker(proposal, *args, **kwargs):
            return LikelihoodRatioTest.calculate(nullfit, altfit, proposal,
                                                 null_vals, alt_vals)

        olddep = data.get_dep(filter=False)
        try:
            #statistics = map(worker, samples)
            statistics = parallel_map(worker, samples, numcores)
        finally:
            data.set_dep(olddep)
            alt.thawedpars = list(oldaltvals)
            null.thawedpars = list(oldnullvals)

        debug("statistic null = " + repr(null_stat))
        debug("statistic alt = " + repr(alt_stat))
        debug("LR = " + repr(LR))

        statistics = numpy.asarray(statistics)

        pppvalue = numpy.sum(statistics[:, 2] > LR) / (1.0 * niter)

        debug('ppp value = ' + str(pppvalue))

        return LikelihoodRatioResults(statistics[:, 2], statistics[:, 0:2],
                                      samples, LR, pppvalue, null_stat,
                                      alt_stat)
Esempio n. 15
0
def calc_flux(data,
              src,
              samples,
              method=calc_energy_flux,
              lo=None,
              hi=None,
              numcores=None,
              subset=None):
    """Calculate model fluxes from a sample of parameter values.

    Given a set of parameter values, calculate the model flux for
    each set.

    .. versionchanged:: 4.12.2
       The subset parameter was added.

    .. versionchanged:: 4.12.1
       The fit parameter was removed.

    Parameters
    ----------
    data : sherpa.data.Data subclass
        The data object to use.
    src : sherpa.models.Arithmetic instance
        The source model (without instrument response for PHA data)
    samples : 2D array
        The rows indicate each set of sample, and the columns the
        parameter values to use. If there are n free parameters
        in the model then the array must have a size of num by m,
        where num is the number of fluxes to calculate and m >= n.
        If m > n then the subset argument must be set.
    method : function, optional
        How to calculate the flux: assumed to be one of calc_energy_flux
        or calc_photon_flux
    lo : number or None, optional
        The lower edge of the dataspace range for the flux calculation.
        If None then the lower edge of the data grid is used.
    hi : number or None, optional
        The upper edge of the dataspace range for the flux calculation.
        If None then the upper edge of the data grid is used.
    numcores : int or None, optional
        Should the analysis be split across multiple CPU cores?
        When set to None all available cores are used.
    subset : list of ints or None, optional
        This is only used when the samples array has more parameters
        in it than are free in src. In this case the subset array lists
        the column number of the free parameters in src. So, if the
        samples represented 'nh', 'gamma', and 'ampl' values for each
        row, but the src model only contained the 'gamma' and 'ampl'
        parameters then subset would be [1, 2].

    Returns
    -------
    vals : 2D NumPy array
        If the samples array has a shape of (num, nfree) then vals
        has the shame (num, nfree + 1). The first column is the flux
        for the row, and the remaining columns are copies of the input
        samples array.

    See Also
    --------
    sample_flux

    """

    old_vals = src.thawedpars
    worker = CalcFluxWorker(method, data, src, lo, hi, subset)
    try:
        fluxes = parallel_map(worker, samples, numcores)
    finally:
        src.thawedpars = old_vals

    return numpy.asarray(fluxes)
Esempio n. 16
0
 def fcn_parallel(pars, fvec):
     fd_jac = fdJac(stat_cb1, fvec, pars)
     params = fd_jac.calc_params()
     fjac = parallel_map(fd_jac, params, numcores)
     return numpy.concatenate(fjac)
Esempio n. 17
0
def grid_search(fcn,
                x0,
                xmin,
                xmax,
                num=16,
                sequence=None,
                numcores=1,
                maxfev=None,
                ftol=EPSILON,
                method=None,
                verbose=0):

    x, xmin, xmax = _check_args(x0, xmin, xmax)

    npar = len(x)

    def func(pars):
        aaa = fcn(pars)[0]
        if verbose:
            print('f%s=%g' % (pars, aaa))
        return aaa

    def make_sequence(ranges, N):
        list_ranges = list(ranges)
        for ii in range(npar):
            list_ranges[ii] = tuple(list_ranges[ii]) + (complex(N), )
            list_ranges[ii] = slice(*list_ranges[ii])
        grid = numpy.mgrid[list_ranges]
        mynfev = pow(N, npar)
        grid = list(map(numpy.ravel, grid))
        sequence = []
        for index in range(mynfev):
            tmp = []
            for xx in range(npar):
                tmp.append(grid[xx][index])
            sequence.append(tmp)
        return sequence

    def eval_stat_func(xxx):
        return numpy.append(func(xxx), xxx)

    if sequence is None:
        ranges = []
        for index in range(npar):
            ranges.append([xmin[index], xmax[index]])
        sequence = make_sequence(ranges, num)
    else:
        if not numpy.iterable(sequence):
            raise TypeError("sequence option must be iterable")
        else:
            for seq in sequence:
                if npar != len(seq):
                    msg = "%s must be of length %d" % (seq, npar)
                    raise TypeError(msg)

    answer = eval_stat_func(x)
    sequence_results = list(parallel_map(eval_stat_func, sequence, numcores))
    for xresult in sequence_results[1:]:
        if xresult[0] < answer[0]:
            answer = xresult

    fval = answer[0]
    x = answer[1:]
    nfev = len(sequence_results) + 1
    ierr = 0
    status, msg = _get_saofit_msg(ierr, ierr)
    rv = (status, x, fval)
    rv += (msg, {'info': ierr, 'nfev': nfev})

    if ( 'NelderMead' == method or 'neldermead' == method or \
         'Neldermead' == method or 'nelderMead' == method ):
        #re.search( '^[Nn]elder[Mm]ead', method ):
        nm_result = neldermead(fcn,
                               x,
                               xmin,
                               xmax,
                               ftol=ftol,
                               maxfev=maxfev,
                               verbose=verbose)
        tmp_nm_result = list(nm_result)
        tmp_nm_result_4 = tmp_nm_result[4]
        tmp_nm_result_4['nfev'] += nfev
        rv = tuple(tmp_nm_result)

    if ( 'LevMar' == method or 'levmar' == method or \
         'Levmar' == method or 'levMar' == method ):
        #re.search( '^[Ll]ev[Mm]ar', method ):
        levmar_result = lmdif(fcn,
                              x,
                              xmin,
                              xmax,
                              ftol=ftol,
                              xtol=ftol,
                              gtol=ftol,
                              maxfev=maxfev,
                              verbose=verbose)
        tmp_levmar_result = list(levmar_result)
        tmp_levmar_result_4 = tmp_levmar_result[4]
        tmp_levmar_result_4['nfev'] += nfev
        rv = tuple(tmp_levmar_result)

    return rv
Esempio n. 18
0
def grid_search(fcn, x0, xmin, xmax, num=16, sequence=None, numcores=1,
                maxfev=None, ftol=EPSILON, method=None, verbose=0):
    """Grid Search optimization method.

    This method evaluates the fit statistic for each point in the
    parameter space grid; the best match is the grid point with the
    lowest value of the fit statistic. It is intended for use with
    template models as it is very inefficient for general models.

    Parameters
    ----------
    fcn : function reference
       Returns the current statistic and per-bin statistic value when
       given the model parameters.
    x0, xmin, xmax : sequence of number
       The starting point, minimum, and maximum values for each
       parameter.
    num : int
       The size of the grid for each parameter when `sequence` is
       `None`, so ``npar^num`` fits will be evaluated, where `npar` is
       the number of free parameters. The grid spacing is uniform.
    sequence : sequence of numbers or `None`
       The list through which to evaluate. Leave as `None` to use
       a uniform grid spacing as determined by the `num` attribute.
    numcores : int or `None`
       The number of CPU cores to use. The default is `1` and a
       value of `None` will use all the cores on the machine.
    maxfev : int or `None`
       The `maxfev` attribute if `method` is not `None`.
    ftol : number
       The `ftol` attribute if `method` is not `None`.
    method : str or `None`
       The optimization method to use to refine the best-fit
       location found using the grid search. If `None` then
       this step is not run.
    verbose: int
       The amount of information to print during the fit. The default
       is `0`, which means no output.

    Returns
    -------
    retval : tuple
       A boolean indicating whether the optimization succeeded, the
       best-fit parameter values, the best-fit statistic value, a
       string message indicating the status, and a dictionary
       returning information from the optimizer.

    """

    x, xmin, xmax = _check_args(x0, xmin, xmax)

    npar = len(x)

    def func(pars):
        aaa = fcn(pars)[0]
        if verbose:
            print('f%s=%g' % (pars, aaa))
        return aaa

    def make_sequence(ranges, N):
        list_ranges = list(ranges)
        for ii in range(npar):
            list_ranges[ii] = tuple(list_ranges[ii]) + (complex(N),)
            list_ranges[ii] = slice(*list_ranges[ii])

        grid = numpy.mgrid[list_ranges]
        mynfev = pow(N, npar)
        grid = list(map(numpy.ravel, grid))
        sequence = []
        for index in range(mynfev):
            tmp = []
            for xx in range(npar):
                tmp.append(grid[xx][index])
            sequence.append(tmp)
        return sequence

    def eval_stat_func(xxx):
        return numpy.append(func(xxx), xxx)

    if sequence is None:
        ranges = []
        for index in range(npar):
            ranges.append([xmin[index], xmax[index]])
        sequence = make_sequence(ranges, num)
    else:
        if not numpy.iterable(sequence):
            raise TypeError("sequence option must be iterable")
        else:
            for seq in sequence:
                if npar != len(seq):
                    msg = "%s must be of length %d" % (seq, npar)
                    raise TypeError(msg)

    answer = eval_stat_func(x)
    sequence_results = list(parallel_map(eval_stat_func, sequence, numcores))
    for xresult in sequence_results[1:]:
        if xresult[0] < answer[0]:
            answer = xresult

    fval = answer[0]
    x = answer[1:]
    nfev = len(sequence_results) + 1
    ierr = 0
    status, msg = _get_saofit_msg(ierr, ierr)
    rv = (status, x, fval)
    rv += (msg, {'info': ierr, 'nfev': nfev})

    # TODO: should we just use case-insensitive comparison?
    if method in ['NelderMead', 'neldermead', 'Neldermead', 'nelderMead']:
        # re.search( '^[Nn]elder[Mm]ead', method ):
        nm_result = neldermead(fcn, x, xmin, xmax, ftol=ftol, maxfev=maxfev,
                               verbose=verbose)
        tmp_nm_result = list(nm_result)
        tmp_nm_result_4 = tmp_nm_result[4]
        tmp_nm_result_4['nfev'] += nfev
        rv = tuple(tmp_nm_result)

    if method in ['LevMar', 'levmar', 'Levmar', 'levMar']:
        # re.search( '^[Ll]ev[Mm]ar', method ):
        levmar_result = lmdif(fcn, x, xmin, xmax, ftol=ftol, xtol=ftol,
                              gtol=ftol, maxfev=maxfev, verbose=verbose)
        tmp_levmar_result = list(levmar_result)
        tmp_levmar_result_4 = tmp_levmar_result[4]
        tmp_levmar_result_4['nfev'] += nfev
        rv = tuple(tmp_levmar_result)

    return rv
Esempio n. 19
0
    def calc(self, fit, par0, par1, methoddict=None):
        self.title='Region-Projection'

        Confidence2D.calc(self, fit, par0, par1)
        if par0.frozen:
            raise ConfidenceErr('frozen', par0.fullname, 'region projection')
        if par1.frozen:
            raise ConfidenceErr('frozen', par1.fullname, 'region projection')

        thawed = [i for i in fit.model.pars if not i.frozen]
        
        if par0 not in thawed:
            raise ConfidenceErr('thawed', par0.fullname, fit.model.name)
        if par1 not in thawed:
            raise ConfidenceErr('thawed', par1.fullname, fit.model.name)

        # If "fast" option enabled, set fitting method to
        # lmdif if stat is chi-squared,
        # else set to neldermead

        # If current method is not LM or NM, warn it is not a good
        # method for estimating parameter limits.
        if type(fit.method) not in (NelderMead, LevMar):
            warning(fit.method.name + " is inappropriate for confidence " +
                    "limit estimation")

        oldfitmethod = fit.method
        if (bool_cast(self.fast) is True and methoddict is not None):
            if (isinstance(fit.stat, Likelihood)):
                if (type(fit.method) is not NelderMead):
                    fit.method = methoddict['neldermead']
                    warning("Setting optimization to " + fit.method.name
                            + " for region projection plot")
            else:
                if (type(fit.method) is not LevMar):
                    fit.method = methoddict['levmar']
                    warning("Setting optimization to " + fit.method.name
                            + " for region projection plot")


        def eval_proj(pars):
            for ii in [0,1]:
                if self.log[ii]:
                    pars[ii] = numpy.power(10, pars[ii])
            (par0.val, par1.val) = pars
            if len(thawed) > 2:
                r = fit.fit()
                return r.statval
            return fit.calc_stat()

        oldpars = fit.model.thawedpars

        try:
            fit.model.startup()
            
            # store the class methods for startup and teardown
            # these calls are unnecessary for every fit
            startup = fit.model.startup
            fit.model.startup = lambda : None
            teardown = fit.model.teardown
            fit.model.teardown = lambda : None

            grid = self._region_init(fit, par0, par1)

            par0.freeze()
            par1.freeze()

            self.y = numpy.asarray(parallel_map(eval_proj, grid,
                                                self.numcores))

        finally:
            # Set back data after we changed it
            par0.thaw()
            par1.thaw()

            fit.model.startup = startup
            fit.model.teardown = teardown

            fit.model.teardown()
            fit.model.thawedpars = oldpars
            fit.method = oldfitmethod
Esempio n. 20
0
    def run(fit, null_comp, alt_comp, conv_mdl=None,
	    stat=None, method=None,
	    niter=500, numcores=None):
	    
        if stat is None:   stat = CStat()
	if method is None: method = NelderMead()

	if not isinstance(stat, (Cash, CStat)):
		raise TypeError("Sherpa fit statistic must be Cash or CStat" +
                                " for likelihood ratio test")

        niter = int(niter)

	alt  = alt_comp
	null = null_comp
	
	oldaltvals = numpy.array(alt.thawedpars)
	oldnullvals = numpy.array(null.thawedpars)

        data = fit.data
	
	if conv_mdl is not None:
            # Copy the PSF
            null_conv_mdl = deepcopy(conv_mdl)

	    alt = conv_mdl(alt_comp)
	    if hasattr(conv_mdl, 'fold'):
                conv_mdl.fold(data)
		
            # Convolve the null model
	    null = null_conv_mdl(null_comp)
	    if hasattr(null_conv_mdl, 'fold'):
                null_conv_mdl.fold(data)

	nullfit = Fit(data, null, stat, method, Covariance())

	# Fit with null model
	nullfit_results = nullfit.fit()
	debug(nullfit_results.format())
	    
	null_stat = nullfit_results.statval
	null_vals = nullfit_results.parvals
	    
	# Calculate niter samples using null best-fit and covariance
	sampler = NormalParameterSampleFromScaleMatrix()
	samples = sampler.get_sample(nullfit, None, niter)
	   
	# Fit with alt model, null component starts at null's best fit params.
	altfit = Fit(data, alt, stat, method, Covariance())
	altfit_results = altfit.fit()
	debug(altfit_results.format())
	
	alt_stat = altfit_results.statval
	alt_vals = altfit_results.parvals
	
	LR = -(alt_stat - null_stat)
	
	def worker(proposal, *args, **kwargs):
            return LikelihoodRatioTest.calculate(nullfit, altfit, proposal,
                                                 null_vals, alt_vals)

	olddep = data.get_dep(filter=False)
	try:
            #statistics = map(worker, samples)
            statistics = parallel_map(worker, samples, numcores)
        finally:
            data.set_dep(olddep)
            alt.thawedpars = list(oldaltvals)
            null.thawedpars = list(oldnullvals)

        debug("statistic null = " + repr(null_stat))
        debug("statistic alt = " + repr(alt_stat))
        debug("LR = " + repr(LR))

        statistics = numpy.asarray(statistics)

        pppvalue = numpy.sum( statistics[:,2] > LR ) / (1.0*niter)

        debug('ppp value = '+str(pppvalue))

        return LikelihoodRatioResults(statistics[:,2], statistics[:,0:2],
                                      samples, LR, pppvalue, null_stat,
                                      alt_stat)