Example #1
0
    def generateDictFromNTo_KL_divergenceListAndGammaListIncludingGammaGreaterThanEta(self): #tested
        "Also gives numGammasGreaterThanEta Gammas"

        if self.normalizedKL_divergenceList is None or len(self.normalizedKL_divergenceList) == 0:
            raise Exception("Beta table has no normalized KL_divergnece list")
        if not self.NumberOfGammasGreaterThanEta:
            raise Exception("Beta table has no variable for number of gammas greater than eta")
        self.generateDictFromNTo_KL_divergenceList()
        self.generateDictFromNToGammaList()
        p_eta = pdf.probabilityDistributionFactory(self.k, self.l).get_p_eta(self.eta)

        uniformMarginals = [1.0/self.k,1.0/self.l]
        probabilityDistPathBasedAtUniformMarginals = pdpf.probabilityDistributionPathFactory(uniformMarginals, self.k, self.l).construct()
        t_max = probabilityDistPathBasedAtUniformMarginals.t_max
        distributionAt_t_max_OneUniformBasedPath = probabilityDistPathBasedAtUniformMarginals.distribution_at_t(t_max)
        KLDivergenceFromP_etaToDistributionAtTMaxOnPath = p_eta.KL_divergence_as_base(distributionAt_t_max_OneUniformBasedPath)

        probabilityDistPathBasedAtUniform = pdpf.probabilityDistributionPathFactory([1.0/self.k, 1.0/self.l], self.k, self.l).construct()
        probabilityDistPathBasedAtUniform.markP_eta(self.eta)

        numLgGam = int(self.NumberOfGammasGreaterThanEta)
        rawKLDivergenceListForGammaGreaterThanEta = KLDivergenceFromP_etaToDistributionAtTMaxOnPath* ( (1.0-tolerance)/numLgGam )*np.array(range(numLgGam+1) )


        for N in self.NList:
            self.NToKL_divergenceList[N].extend(rawKLDivergenceListForGammaGreaterThanEta)
            GammaListForGammaGreaterThanEta = [ probabilityDistPathBasedAtUniform.KL_divergence_at_t(
                probabilityDistPathBasedAtUniform.t_at_specifiedDivergenceFromMarkedDistAwayFromBase(KLDivergence)) for KLDivergence in rawKLDivergenceListForGammaGreaterThanEta]
            self.NToGammaList[N] = np.append(self.NToGammaList[N],np.array(GammaListForGammaGreaterThanEta))
 def setUp(self):
     self.factory = pdpf.probabilityDistributionPathFactory([0.1,0.9], 2, 2)
     self.path = self.factory.construct()
     self.factoryUniform = pdpf.probabilityDistributionPathFactory([0.5,0.5], 2, 2)
     self.pathUniform = self.factoryUniform.construct()
     self.pathUniform.markP_eta(0.01)
     self.distributionFactory = pdf.probabilityDistributionFactory(2,2)
 def setUp(self):
     self.factory = pdpf.probabilityDistributionPathFactory([0.1, 0.9], 2,
                                                            2)
     self.path = self.factory.construct()
     self.factoryUniform = pdpf.probabilityDistributionPathFactory(
         [0.5, 0.5], 2, 2)
     self.pathUniform = self.factoryUniform.construct()
     self.pathUniform.markP_eta(0.01)
     self.distributionFactory = pdf.probabilityDistributionFactory(2, 2)
Example #4
0
 def generate_N_toMinimumGammaDict(self):
     '''
     :Effect:
     Implements (145) in Section 4.2 of Chapter 3
     
     Generates a dictionary mapping each value of N to the minimum gamma
     and assigned to the NToMinimumGammaDict datamember
     
     :Example:
     testGenerate_N_ToMinimumGammaDict
     '''
     if not self.NList:
         raise ValueError("No NList for betaTable.")
     logGammas = (-1.0 / 10.0) * np.array(range(1, 1000))
     Gammas = np.exp(logGammas)
     uniformMarginalsBasedPath = pdpf.probabilityDistributionPathFactory(
         [1.0 / self.k, 1.0 / self.l], self.k, self.l).construct()
     Ns = [
         np.int(
             np.ceil(
                 1.0 /
                 (uniformMarginalsBasedPath.
                  lengthOfSegmentofKLDivergenceLessThanSpecified(gamma))))
         for gamma in Gammas
     ]
     NToMinimumGammaDict = {}
     for N in self.NList:
         index = 0
         while Ns[index] <= N:
             index += 1
         NToMinimumGammaDict[N] = Gammas[index]
     self.NToMinimumGammaDict = NToMinimumGammaDict
 def RobbinsEstimateOfEmissionProbabilityTimesCharFunctionOfTauMinusGamma(
         self, firstMarginal, secondMarginal, t):
     """
     Evaluate the function estimating, from above, the probability of emission of a type of size N "close to" 
     the the probability distribution parameterized by the triple (firstMarginal, secondMarginal, t)
     Before calling the actual evaluation of the probabilityCalculatorObject, checks that
     the three parameters (firstMarginal, secondMarginal, t) actually parameterize a valid probability distribution
     Only implemented for binary/binary (k=l=2 case) yet
     """
     N = self.N
     #checking the marginals are within bounds or probability simplex: in case not or if the parameter t
     #is out of bounds we return 0
     if firstMarginal > 1 - max_t_comparison_tolerance or firstMarginal < 0 + max_t_comparison_tolerance:
         return 0
     if secondMarginal > 1 - max_t_comparison_tolerance or secondMarginal < 0 + max_t_comparison_tolerance:
         return 0
     pathBasedAtMarginals = pdpf.probabilityDistributionPathFactory(
         [firstMarginal, secondMarginal], self.k, self.l).construct()
     max_t = pathBasedAtMarginals.t_max
     min_t = pathBasedAtMarginals.t_min
     if t > max_t - max_t_comparison_tolerance or t < min_t + max_t_comparison_tolerance:
         return 0
     KLDivergenceAt_t = pathBasedAtMarginals.KL_divergence_at_t(t)
     if KLDivergenceAt_t > self.gamma:
         return 0
     else:
         p_gammaDistribution = pdf.probabilityDistributionFactory(
             self.k, self.l).distributionWrappingParameters(
                 pathBasedAtMarginals.distribution_at_t(t))
         return self.probabilityCalculatorObject.emissionProbabilityFromP_eta_ofProductLikeTypeSizeN(
             p_gammaDistribution, N)
     """
Example #6
0
 def generate_N_toMinimumGammaDict(self):
     '''
     :Effect:
     Implements (145) in Section 4.2 of Chapter 3
     
     Generates a dictionary mapping each value of N to the minimum gamma
     and assigned to the NToMinimumGammaDict datamember
     
     :Example:
     testGenerate_N_ToMinimumGammaDict
     '''
     if not self.NList:
         raise ValueError("No NList for betaTable.")
     logGammas = (-1.0/10.0)*np.array(range(1,1000))
     Gammas = np.exp(logGammas)
     uniformMarginalsBasedPath = pdpf.probabilityDistributionPathFactory([1.0/self.k, 1.0/self.l],
                                                                         self.k,self.l).construct()
     Ns = [np.int(np.ceil(1.0/(uniformMarginalsBasedPath.lengthOfSegmentofKLDivergenceLessThanSpecified(gamma))))  for gamma in Gammas]
     NToMinimumGammaDict = {}
     for N in self.NList:
         index = 0
         while Ns[index] <= N:
             index += 1
         NToMinimumGammaDict[N] = Gammas[index]
     self.NToMinimumGammaDict = NToMinimumGammaDict
 def RobbinsEstimateOfEmissionProbabilityTimesCharFunctionOfTauMinusGamma(self, firstMarginal, secondMarginal, t):
     """
     Evaluate the function estimating, from above, the probability of emission of a type of size N "close to" 
     the the probability distribution parameterized by the triple (firstMarginal, secondMarginal, t)
     Before calling the actual evaluation of the probabilityCalculatorObject, checks that
     the three parameters (firstMarginal, secondMarginal, t) actually parameterize a valid probability distribution
     Only implemented for binary/binary (k=l=2 case) yet
     """
     N = self.N
     #checking the marginals are within bounds or probability simplex: in case not or if the parameter t
     #is out of bounds we return 0
     if firstMarginal > 1 - max_t_comparison_tolerance or firstMarginal < 0 + max_t_comparison_tolerance:
         return 0
     if secondMarginal > 1 - max_t_comparison_tolerance or secondMarginal < 0 + max_t_comparison_tolerance:
         return 0
     pathBasedAtMarginals = pdpf.probabilityDistributionPathFactory([firstMarginal, secondMarginal], self.k,self.l).construct()
     max_t = pathBasedAtMarginals.t_max
     min_t = pathBasedAtMarginals.t_min
     if t > max_t - max_t_comparison_tolerance or t < min_t + max_t_comparison_tolerance:
         return 0
     KLDivergenceAt_t = pathBasedAtMarginals.KL_divergence_at_t(t)
     if KLDivergenceAt_t > self.gamma:
         return 0
     else:
         p_gammaDistribution = pdf.probabilityDistributionFactory(self.k, self.l).distributionWrappingParameters(pathBasedAtMarginals.distribution_at_t(t))
         return self.probabilityCalculatorObject.emissionProbabilityFromP_eta_ofProductLikeTypeSizeN( p_gammaDistribution, N)  
     
     """
Example #8
0
    def generateDictFromNTo_KL_divergenceListAndGammaListIncludingGammaGreaterThanEta(
            self):  #tested
        "Also gives numGammasGreaterThanEta Gammas"

        if self.normalizedKL_divergenceList is None or len(
                self.normalizedKL_divergenceList) == 0:
            raise Exception("Beta table has no normalized KL_divergnece list")
        if not self.NumberOfGammasGreaterThanEta:
            raise Exception(
                "Beta table has no variable for number of gammas greater than eta"
            )
        self.generateDictFromNTo_KL_divergenceList()
        self.generateDictFromNToGammaList()
        p_eta = pdf.probabilityDistributionFactory(self.k,
                                                   self.l).get_p_eta(self.eta)

        uniformMarginals = [1.0 / self.k, 1.0 / self.l]
        probabilityDistPathBasedAtUniformMarginals = pdpf.probabilityDistributionPathFactory(
            uniformMarginals, self.k, self.l).construct()
        t_max = probabilityDistPathBasedAtUniformMarginals.t_max
        distributionAt_t_max_OneUniformBasedPath = probabilityDistPathBasedAtUniformMarginals.distribution_at_t(
            t_max)
        KLDivergenceFromP_etaToDistributionAtTMaxOnPath = p_eta.KL_divergence_as_base(
            distributionAt_t_max_OneUniformBasedPath)

        probabilityDistPathBasedAtUniform = pdpf.probabilityDistributionPathFactory(
            [1.0 / self.k, 1.0 / self.l], self.k, self.l).construct()
        probabilityDistPathBasedAtUniform.markP_eta(self.eta)

        numLgGam = int(self.NumberOfGammasGreaterThanEta)
        rawKLDivergenceListForGammaGreaterThanEta = KLDivergenceFromP_etaToDistributionAtTMaxOnPath * (
            (1.0 - tolerance) / numLgGam) * np.array(range(numLgGam + 1))

        for N in self.NList:
            self.NToKL_divergenceList[N].extend(
                rawKLDivergenceListForGammaGreaterThanEta)
            GammaListForGammaGreaterThanEta = [
                probabilityDistPathBasedAtUniform.KL_divergence_at_t(
                    probabilityDistPathBasedAtUniform.
                    t_at_specifiedDivergenceFromMarkedDistAwayFromBase(
                        KLDivergence))
                for KLDivergence in rawKLDivergenceListForGammaGreaterThanEta
            ]
            self.NToGammaList[N] = np.append(
                self.NToGammaList[N],
                np.array(GammaListForGammaGreaterThanEta))
def t_gammaPlusMinus_l_gamma(marginalPair, gamma, k,l):
    """
    Input: k = |Val(A)|, l=|Val(B)|, marginalPair = [firstMarginal, secondMarginal]
    Return list of t_gamma_plus, t_gamma_minus and the "length" l_gamma := t_gamma_plus - t_gamma_minus """
    probDistPath = pdpf.probabilityDistributionPathFactory(marginalPair, k, l).construct()
    t_gamma_plus = probDistPath.largestPos_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
    t_gamma_minus = probDistPath.smallestNeg_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
    return t_gamma_plus, t_gamma_minus, t_gamma_plus - t_gamma_minus #length of relevant segment
def t_gammaPlusMinus_l_gamma(marginalPair, gamma, k, l):
    """
    Input: k = |Val(A)|, l=|Val(B)|, marginalPair = [firstMarginal, secondMarginal]
    Return list of t_gamma_plus, t_gamma_minus and the "length" l_gamma := t_gamma_plus - t_gamma_minus """
    probDistPath = pdpf.probabilityDistributionPathFactory(marginalPair, k,
                                                           l).construct()
    t_gamma_plus = probDistPath.largestPos_t_atWhichKLDivergenceFromBaseIsLessThanEta(
        gamma)
    t_gamma_minus = probDistPath.smallestNeg_t_atWhichKLDivergenceFromBaseIsLessThanEta(
        gamma)
    return t_gamma_plus, t_gamma_minus, t_gamma_plus - t_gamma_minus  #length of relevant segment
 def GaussianCenteredAttGammaPlusfromMarginals(self, marginals):
     """
     Inputs a (2-element) list of marginals, outputs a <class 'scipy.stats.distributions.rv_frozen'>
     """
     #constants
     k,l = self.k, self.l
     
     probabilityDistPathBasedAtMarginals = pdpf.probabilityDistributionPathFactory(marginals, k, l).construct()
     logging.debug("For marginals=%s, gamma=%s, about to compute t_gamma_plus"%(str(marginals), self.gamma))
     t_gamma_plus = probabilityDistPathBasedAtMarginals.t_at_specified_divergence_from_base_pos_t_orMax_t(self.gamma)
     logging.debug('For gamma=%s, marginals =%s, returning normal dist of loc and scale = %s'%(self.gamma, str(marginals), t_gamma_plus))
     return stats.norm(loc=t_gamma_plus, scale=t_gamma_plus)
Example #12
0
 def generateDictFromNToGammaList(self):
     """
     :Effect:
     Convert NToKL_divergenceList to NToGammaList
     """
     if not self.NToKL_divergenceList:
         raise ValueError("Beta table has no NToKL_divergenceList.")
     self.NToGammaList = {}
     probabilityDistPathBasedAtUniform = pdpf.probabilityDistributionPathFactory([1.0/self.k, 1.0/self.l], self.k, self.l).construct()
     probabilityDistPathBasedAtUniform.markP_eta(self.eta)
     for N in self.NList:
         GammaList = [ probabilityDistPathBasedAtUniform.KL_divergence_at_t(
             probabilityDistPathBasedAtUniform.t_at_specifiedDivergenceFromMarkedDistInDirectionOfBase(KLDivergence))
                       for KLDivergence in self.NToKL_divergenceList[N]]
         self.NToGammaList[N] = np.array(GammaList)
    def GaussianCenteredAttGammaPlusfromMarginals(self, marginals):
        """
        Inputs a (2-element) list of marginals, outputs a <class 'scipy.stats.distributions.rv_frozen'>
        """
        #constants
        k, l = self.k, self.l

        probabilityDistPathBasedAtMarginals = pdpf.probabilityDistributionPathFactory(
            marginals, k, l).construct()
        logging.debug(
            "For marginals=%s, gamma=%s, about to compute t_gamma_plus" %
            (str(marginals), self.gamma))
        t_gamma_plus = probabilityDistPathBasedAtMarginals.t_at_specified_divergence_from_base_pos_t_orMax_t(
            self.gamma)
        logging.debug(
            'For gamma=%s, marginals =%s, returning normal dist of loc and scale = %s'
            % (self.gamma, str(marginals), t_gamma_plus))
        return stats.norm(loc=t_gamma_plus, scale=t_gamma_plus)
Example #14
0
 def generateDictFromNToGammaList(self):
     """
     :Effect:
     Convert NToKL_divergenceList to NToGammaList
     """
     if not self.NToKL_divergenceList:
         raise ValueError("Beta table has no NToKL_divergenceList.")
     self.NToGammaList = {}
     probabilityDistPathBasedAtUniform = pdpf.probabilityDistributionPathFactory(
         [1.0 / self.k, 1.0 / self.l], self.k, self.l).construct()
     probabilityDistPathBasedAtUniform.markP_eta(self.eta)
     for N in self.NList:
         GammaList = [
             probabilityDistPathBasedAtUniform.KL_divergence_at_t(
                 probabilityDistPathBasedAtUniform.
                 t_at_specifiedDivergenceFromMarkedDistInDirectionOfBase(
                     KLDivergence))
             for KLDivergence in self.NToKL_divergenceList[N]
         ]
         self.NToGammaList[N] = np.array(GammaList)
Example #15
0
 def testParallelNonUniformMarginals(self):
     import itertools 
     import numpy as np
     eta=0.01  #for reference distribution
     displacements = np.arange(0.05,0.55,0.05)
     for displacedMarginals in itertools.product(displacements,displacements):
         probabilityDistributionPath = pdpf.probabilityDistributionPathFactory(
           displacedMarginals,2,2).construct()
         p_eta_sub1 = \
           probabilityDistributionPath.\
           distribution_at_speicified_divergence_from_base_pos_t_as_distribution(eta)
         CDF1 = CDF.CDF()
         CDF1.referenceDistribution = p_eta_sub1
         CDF1.setN(30)
         CDF1.setn(4)
         CDF1.accountForAllTypes()
         print "*************************************"
         print displacedMarginals
         print len(CDF1.Dictionary)
         print CDF1.assignCumulativeProbability(eta)
Example #16
0
    def testGenerate_N_ToMinimumGammaDict(self):
        eta = 0.01
        k, l = 2, 2
        newBetaTable = bt.betaTable(eta)
        NMaxList = [100, 200, 500, 1000, 10000, 110000]
        stepSizeList = [5, 10, 50, 100, 1000, 10000]
        newBetaTable.generate_N_List(NMaxList, stepSizeList)
        newBetaTable.generate_N_toMinimumGammaDict()
        uniformMarginalsBasedPath = pdpf.probabilityDistributionPathFactory([1.0 / k, 1.0 / l],
                                                                            k, l).construct()

        for index in [1, 39, 50]:
            N = newBetaTable.NList[index]
            Nnext = newBetaTable.NList[index + 1]
            gammaN = newBetaTable.NToMinimumGammaDict[N]
            scriptL_gammaN = uniformMarginalsBasedPath.lengthOfSegmentofKLDivergenceLessThanSpecified(gammaN)
            recoveredN = np.int(np.ceil(1.0 / (scriptL_gammaN)))
            print N, recoveredN, Nnext
            self.failUnless(recoveredN > N)
            self.failUnless(recoveredN < Nnext)
Example #17
0
 def testParallelNonUniformMarginals(self):
     import itertools
     import numpy as np
     eta = 0.01  #for reference distribution
     displacements = np.arange(0.05, 0.55, 0.05)
     for displacedMarginals in itertools.product(displacements,
                                                 displacements):
         probabilityDistributionPath = pdpf.probabilityDistributionPathFactory(
             displacedMarginals, 2, 2).construct()
         p_eta_sub1 = \
           probabilityDistributionPath.\
           distribution_at_speicified_divergence_from_base_pos_t_as_distribution(eta)
         CDF1 = CDF.CDF()
         CDF1.referenceDistribution = p_eta_sub1
         CDF1.setN(30)
         CDF1.setn(4)
         CDF1.accountForAllTypes()
         print "*************************************"
         print displacedMarginals
         print len(CDF1.Dictionary)
         print CDF1.assignCumulativeProbability(eta)
Example #18
0
 def testSearchArgWhereIncreasingFunctionTakesProportionOfMaxVal(self):
     theProportion = stats.norm.pdf(1)/stats.norm.pdf(0)
     N_list = list(10*np.array(range(1,10)))
     N_list.extend((100*np.array(range(1,10))))
     k,l=2,2
     eta=0.01
     gamma = 0.001
     logger.debug("Set eta=%s, gamma=%s"%(eta,gamma))
     #firstMarginalsDist = stats.uniform(loc = .4,scale = .2)
     #secondMarginalsDist = stats.uniform(loc = .4,scale = .2)
     for N in N_list:
         for iteration in range(1):
             #firstMarginal, secondMarginal = [firstMarginalsDist.rvs(), secondMarginalsDist.rvs()]
             firstMarginal, secondMarginal = 1.0/l, 1.0/k #0.5, 0.5 for binary-binary
             logger.debug("Randomly chosen marginals: (%s,%s)"%(firstMarginal, secondMarginal))
     
             
             functionFromTwoMarginalsAndParameterToIntegrand = epc.emissionProbabilityCalculator(eta, k, l, N).RobbinsEstimateOfEmissionProbability
             
             
             logger.debug("For marginals (%s,%s), Robbins function takes value %s at t=%s"%(firstMarginal, secondMarginal, functionFromTwoMarginalsAndParameterToIntegrand(firstMarginal, secondMarginal, 0.001), 0.001))
             functionFromParameterToIntegrandObject = fa.functionAlgorithms(functionFromTwoMarginalsAndParameterToIntegrand)
             
             functionFromParameterToIntegrandObject.setFixedArgumentList([firstMarginal, secondMarginal])
             logger.debug("Fixed argument list set to %s"%(str(functionFromParameterToIntegrandObject.fixedArgumentList)))
             functionFromParameterToIntegrand = functionFromParameterToIntegrandObject.functionOfOneVariable
             logger.debug("As func. of one variable, takes value %s at t=%s"%(functionFromParameterToIntegrand(0.001), 0.001))
             
             probDistPath = pdpf.probabilityDistributionPathFactory([firstMarginal, secondMarginal], k, l).construct()
             t_gamma_plus = probDistPath.largestPos_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
             t_gamma_minus = probDistPath.smallestNeg_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
             logger.debug("Robbins function takes value %s at t_gamma_plus=%s"%(functionFromParameterToIntegrandObject.theFunction(firstMarginal, secondMarginal, t_gamma_plus), t_gamma_plus))
             logger.debug("Robbins function takes value %s at t_gamma_minus=%s"%(functionFromParameterToIntegrandObject.theFunction(firstMarginal, secondMarginal, t_gamma_minus), t_gamma_minus))
             
             integrandFunctionObject = fa.functionAlgorithms(functionFromParameterToIntegrand)
             logger.info("Searching for where the function is proportion %s of the max between %s and %s"%(theProportion, t_gamma_minus, t_gamma_plus))
             computedScale = integrandFunctionObject.searchArgWhereIncreasingFunctionTakesProportionOfMaxVal(theProportion, t_gamma_minus, t_gamma_plus)
             logger.info("For marginals (%s,%s), N=%s, computed scale is %s"%(firstMarginal, secondMarginal, N, computedScale))
    def testSearchArgWhereIncreasingFunctionTakesProportionOfMaxVal(self):
        theProportion = stats.norm.pdf(1) / stats.norm.pdf(0)
        N_list = list(10 * np.array(range(1, 10)))
        N_list.extend((100 * np.array(range(1, 10))))
        k, l = 2, 2
        eta = 0.01
        gamma = 0.001
        logger.debug("Set eta=%s, gamma=%s" % (eta, gamma))
        # firstMarginalsDist = stats.uniform(loc = .4,scale = .2)
        # secondMarginalsDist = stats.uniform(loc = .4,scale = .2)
        for N in N_list:
            for iteration in range(1):
                # firstMarginal, secondMarginal = [firstMarginalsDist.rvs(), secondMarginalsDist.rvs()]
                firstMarginal, secondMarginal = 1.0 / l, 1.0 / k  # 0.5, 0.5 for binary-binary
                logger.debug("Randomly chosen marginals: (%s,%s)" % (firstMarginal, secondMarginal))

                functionFromTwoMarginalsAndParameterToIntegrand = epc.emissionProbabilityCalculator(
                    eta, k, l, N
                ).RobbinsEstimateOfEmissionProbability

                logger.debug(
                    "For marginals (%s,%s), Robbins function takes value %s at t=%s"
                    % (
                        firstMarginal,
                        secondMarginal,
                        functionFromTwoMarginalsAndParameterToIntegrand(firstMarginal, secondMarginal, 0.001),
                        0.001,
                    )
                )
                functionFromParameterToIntegrandObject = fa.functionAlgorithms(
                    functionFromTwoMarginalsAndParameterToIntegrand
                )

                functionFromParameterToIntegrandObject.setFixedArgumentList([firstMarginal, secondMarginal])
                logger.debug(
                    "Fixed argument list set to %s" % (str(functionFromParameterToIntegrandObject.fixedArgumentList))
                )
                functionFromParameterToIntegrand = functionFromParameterToIntegrandObject.functionOfOneVariable
                logger.debug(
                    "As func. of one variable, takes value %s at t=%s"
                    % (functionFromParameterToIntegrand(0.001), 0.001)
                )

                probDistPath = pdpf.probabilityDistributionPathFactory(
                    [firstMarginal, secondMarginal], k, l
                ).construct()
                t_gamma_plus = probDistPath.largestPos_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
                t_gamma_minus = probDistPath.smallestNeg_t_atWhichKLDivergenceFromBaseIsLessThanEta(gamma)
                logger.debug(
                    "Robbins function takes value %s at t_gamma_plus=%s"
                    % (
                        functionFromParameterToIntegrandObject.theFunction(firstMarginal, secondMarginal, t_gamma_plus),
                        t_gamma_plus,
                    )
                )
                logger.debug(
                    "Robbins function takes value %s at t_gamma_minus=%s"
                    % (
                        functionFromParameterToIntegrandObject.theFunction(
                            firstMarginal, secondMarginal, t_gamma_minus
                        ),
                        t_gamma_minus,
                    )
                )

                integrandFunctionObject = fa.functionAlgorithms(functionFromParameterToIntegrand)
                logger.info(
                    "Searching for where the function is proportion %s of the max between %s and %s"
                    % (theProportion, t_gamma_minus, t_gamma_plus)
                )
                computedScale = integrandFunctionObject.searchArgWhereIncreasingFunctionTakesProportionOfMaxVal(
                    theProportion, t_gamma_minus, t_gamma_plus
                )
                logger.info(
                    "For marginals (%s,%s), N=%s, computed scale is %s"
                    % (firstMarginal, secondMarginal, N, computedScale)
                )