Пример #1
0
 def preFit(self):
   
   #This is a simple scan through the normalization parameters to guess
   #a good starting point for them (if we start too far from the data,
   #minuit and the other minimizers will have trouble)
   
   #Get the list of free parameters
   freeParams               = self.likelihoodModel.getFreeParameters()
   
   #Now isolate the normalizations, and use them as free parameters in the loglikelihood
   
   self.freeParameters      = collections.OrderedDict()
   
   for (k,v) in freeParams.iteritems():
     
     if v.isNormalization():
       
       self.freeParameters[k] = v
       
   #Prepare the grid of values to scan
   
   grids                    = []
   
   for norm in self.freeParameters.values():
           
     grids.append(numpy.logspace( numpy.log10( norm.minValue ),
                                  numpy.log10( norm.maxValue ), 
                                  50 ))
   
   if len(grids) == 0:
       
       #No norm. Maybe they are fixed ?
       
       return
   
   #Compute the global likelihood at each point in the grid
   globalGrid               = cartesian(grids)
   
   logLikes                 = map(self.minusLogLikeProfile, globalGrid)
   
   idx                      = numpy.argmin(logLikes)
   #print("Minimum is %s with %s" %(logLikes[idx],globalGrid[idx]))
   
   for i,norm in enumerate(self.freeParameters.values()):
     
     norm.setValue(globalGrid[idx][i])
     norm.setDelta(norm.value / 40)
Пример #2
0
    def contours(
        self, src1, param1, p1min, p1max, p1steps, src2, param2, p2min, p2max, p2steps, progress=True, **kwargs
    ):

        # First create another minimizer

        newargs = dict(self.minuit.fitarg)

        # Update the values for the parameters with the best fit one

        for key, value in self.minuit.values.iteritems():

            newargs[key] = value

        # Fix the parameters under scrutiny

        # values = {}

        for s, p in zip([src1, src2], [param1, param2]):

            if s is None:

                # Only one parameter to analyze

                continue

            key = "%s_of_%s" % (p, s)

            if key not in newargs.keys():

                raise ValueError("Parameter %s is not a free parameter for source %s." % (p, s))

            else:

                newargs["fix_%s" % key] = True

            # values[ key ] = float( self.minuit.values[key] )

        # This is a likelihood
        newargs["errordef"] = 0.5

        newargs["print_level"] = 0

        # Now create the new minimizer
        self.contour_minuit = Minuit(self._f, **newargs)

        # Check the keywords
        p1log = False
        p2log = False

        if "log" in kwargs.keys():

            p1log = bool(kwargs["log"][0])

            if param2 is not None:

                p2log = bool(kwargs["log"][1])

        # Generate the steps

        if p1log:

            a = numpy.logspace(numpy.log10(p1min), numpy.log10(p1max), p1steps)

        else:

            a = numpy.linspace(p1min, p1max, p1steps)

        if param2 is not None:

            if p2log:

                b = numpy.logspace(numpy.log10(p2min), numpy.log10(p2max), p2steps)

            else:

                b = numpy.linspace(p2min, p2max, p2steps)

        else:

            # Only one parameter to step through
            # Put b as nan so that the worker can realize that it does not have
            # to step through it

            b = numpy.array([numpy.nan])

        # Generate the grid

        grid = cartesian([a, b])

        # Define the parallel worker

        def contourWorker(args):

            aa, bb = args

            name1 = "%s_of_%s" % (param1, src1)

            # Will change this if needed
            name2 = None

            # First of all restore the best fit values
            # for k,v in values.iteritems():

            #    self.minuit.values[ k ] = v

            # Now set the parameters under scrutiny to the current values

            # Since iminuit does not allow to fix parameters,
            # I am forced to create a new one (which sucks)

            newargs = dict(self.minuit.fitarg)

            newargs["fix_%s" % name1] = True
            newargs["%s" % name1] = aa

            if numpy.isfinite(bb):

                name2 = "%s_of_%s" % (param2, src2)

                newargs["fix_%s" % name2] = True
                newargs["%s" % name2] = bb

            else:

                # We are stepping through one param only.
                # Do nothing

                pass

            newargs["errordef"] = 0.5
            newargs["print_level"] = 0

            m = Minuit(self._f, **newargs)

            # High tolerance for speed
            m.tol = 100

            # mpl.warning("Running migrad")

            # Handle the corner case where there are no free parameters
            # after fixing the two under scrutiny

            # free = [k for k, v in self.contour_minuit.fixed.iteritems() if not v]

            if len(m.list_of_vary_param()) == 0:

                # All parameters are fixed, just return the likelihood function

                if name2 is None:

                    val = self._f(aa)

                else:

                    # This is needed because the user could specify the
                    # variables in reverse order

                    myvars = [0] * 2
                    myvars[self.nameToPos[name1]] = aa
                    myvars[self.nameToPos[name2]] = bb

                    val = self._f(*myvars)

                return val

            try:

                m.migrad()

            except:

                # In this context this is not such a big deal,
                # because we might be so far from the minimum that
                # the fit cannot converge

                return FIT_FAILED

            else:

                pass

                # print("%s -> %s" % (self.minuit.values, self.minuit.fval))

            # mpl.warning("Returning")

            return m.fval

        # Do the computation

        if progress:

            prog = ProgressBar(grid.shape[0])

            def wrap(args):
                prog.increase()
                return contourWorker(args)

            r = map(wrap, grid)

        else:

            r = map(contourWorker, grid)

        return a, b, numpy.array(r).reshape((a.shape[0], b.shape[0]))
Пример #3
0
def test_cartesian():
    cart = cartesian(([1, 2, 3], [1, 2, 3]))
Пример #4
0
    def contours(
        self,
        param_1,
        param_1_minimum,
        param_1_maximum,
        param_1_n_steps,
        param_2=None,
        param_2_minimum=None,
        param_2_maximum=None,
        param_2_n_steps=None,
        progress=True,
        **options
    ):
        """
        Generate confidence contours for the given parameters by stepping for the given number of steps between
        the given boundaries. Call it specifying only source_1, param_1, param_1_minimum and param_1_maximum to
        generate the profile of the likelihood for parameter 1. Specify all parameters to obtain instead a 2d
        contour of param_1 vs param_2

        :param param_1: name of the first parameter
        :param param_1_minimum: lower bound for the range for the first parameter
        :param param_1_maximum: upper bound for the range for the first parameter
        :param param_1_n_steps: number of steps for the first parameter
        :param param_2: name of the second parameter
        :param param_2_minimum: lower bound for the range for the second parameter
        :param param_2_maximum: upper bound for the range for the second parameter
        :param param_2_n_steps: number of steps for the second parameter
        :param progress: (True or False) whether to display progress or not
        :param log: by default the steps are taken linearly. With this optional parameter you can provide a tuple of
        booleans which specify whether the steps are to be taken logarithmically. For example,
        'log=(True,False)' specify that the steps for the first parameter are to be taken logarithmically, while they
        are linear for the second parameter. If you are generating the profile for only one parameter, you can specify
         'log=(True,)' or 'log=(False,)' (optional)
        :param: parallel: whether to use or not parallel computation (default:False)
        :return: a : an array corresponding to the steps for the first parameter
                 b : an array corresponding to the steps for the second parameter (or None if stepping only in one
                 direction)
                 contour : a matrix of size param_1_steps x param_2_steps containing the value of the function at the
                 corresponding points in the grid. If param_2_steps is None (only one parameter), then this reduces to
                 an array of size param_1_steps.
        """

        # Figure out if we are making a 1d or a 2d contour

        if param_2 is None:

            n_dimensions = 1

        else:

            n_dimensions = 2

        # Check the options

        p1log = False
        p2log = False
        parallel = False

        if "log" in options.keys():

            assert len(options["log"]) == n_dimensions, (
                "When specifying the 'log' option you have to provide a "
                + "boolean for each dimension you are stepping on."
            )

            p1log = bool(options["log"][0])

            if param_2 is not None:

                p2log = bool(options["log"][1])

        if "parallel" in options.keys():

            parallel = bool(options["parallel"])

        # Generate the steps

        if p1log:

            param_1_steps = numpy.logspace(math.log10(param_1_minimum), math.log10(param_1_maximum), param_1_n_steps)

        else:

            param_1_steps = numpy.linspace(param_1_minimum, param_1_maximum, param_1_n_steps)

        if n_dimensions == 2:

            if p2log:

                param_2_steps = numpy.logspace(
                    math.log10(param_2_minimum), math.log10(param_2_maximum), param_2_n_steps
                )

            else:

                param_2_steps = numpy.linspace(param_2_minimum, param_2_maximum, param_2_n_steps)

        else:

            # Only one parameter to step through
            # Put param_2_steps as nan so that the worker can realize that it does not have
            # to step through it

            param_2_steps = numpy.array([numpy.nan])

        # Generate the grid

        grid = cartesian([param_1_steps, param_2_steps])

        # Define the worker which will compute the value of the function at a given point in the grid

        # Restore best fit

        self._restore_best_fit()

        # Duplicate the options used for the original minimizer

        new_args = dict(self.minuit.fitarg)

        # Get the minuit names for the parameters

        minuit_param_1 = self._parameter_name_to_minuit_name(param_1)

        if param_2 is None:

            minuit_param_2 = None

        else:

            minuit_param_2 = self._parameter_name_to_minuit_name(param_2)

        # Instance the worker

        contour_worker = ContourWorker(
            self._f, self.minuit.values, new_args, minuit_param_1, minuit_param_2, self.name_to_position
        )

        # We are finally ready to do the computation

        # Serial and parallel computation are slightly different, so check whether we are in one case
        # or the other

        if not parallel:

            # Serial computation

            if progress:

                # Computation with progress bar

                progress_bar = ProgressBar(grid.shape[0])

                # Define a wrapper which will increase the progress before as well as run the actual computation

                def wrap(args):

                    results = contour_worker(args)

                    progress_bar.increase()

                    return results

                # Do the computation

                results = map(wrap, grid)

            else:

                # Computation without the progress bar

                results = map(contour_worker, grid)

        else:

            # Parallel computation

            # Connect to the engines

            client = ParallelClient(**options)

            # Get a balanced view of the engines

            load_balance_view = client.load_balanced_view()

            # Distribute the work among the engines and start it, but return immediately the control
            # to the main thread

            amr = load_balance_view.map_async(contour_worker, grid)

            # print progress
            n_points = grid.flatten().shape[0]
            progress = ProgressBar(n_points)

            # This loop will check from time to time the status of the computation, which is happening on
            # different threads, and update the progress bar

            while not amr.ready():
                # Check and report the status of the computation every second

                time.sleep(1)

                # if (debug):
                #     stdouts = amr.stdout
                #
                #     # clear_output doesn't do much in terminal environments
                #     for stdout, stderr in zip(amr.stdout, amr.stderr):
                #         if stdout:
                #             print "%s" % (stdout[-1000:])
                #         if stderr:
                #             print "%s" % (stderr[-1000:])
                #     sys.stdout.flush()

                progress.animate(amr.progress - 1)

            # If there have been problems, here is where they will be raised

            results = amr.get()

            # Always display 100% at the end

            progress.animate(n_points)

            # Add a new line after the progress bar
            print("\n")

        # Return results

        return (
            param_1_steps,
            param_2_steps,
            numpy.array(results).reshape((param_1_steps.shape[0], param_2_steps.shape[0])),
        )