def combine_results(resultXfoil, resultCfd):
    alphaMax = min([max(resultXfoil.alpha), max(resultCfd.alpha)])
    alphaMin = max([min(resultXfoil.alpha), min(resultCfd.alpha)])
    alphaNew = np.arange(alphaMin,alphaMax+1.,1.)
    clAlpha = interp1d(resultCfd.alpha,resultCfd.cl,'cubic')
    cdAlpha = interp1d(resultXfoil.alpha, resultXfoil.cd,'cubic')
    cmAlpha = interp1d(resultXfoil.alpha, resultXfoil.cm,'cubic')
    resultNew = FluentOutput()
    resultNew.alpha = alphaNew
    resultNew.cl = clAlpha(alphaNew)
    resultNew.cd = cdAlpha(alphaNew)
    resultNew.cm = cmAlpha(alphaNew)
    f1 = lambda x: -clAlpha(x)
    f2 = lambda x: -clAlpha(x)/cdAlpha(x)
    f3 = lambda x: -clAlpha(x)**1.5/cdAlpha(x)
    a1 = fminbound(f1,alphaMin, alphaMax, full_output=1)
    a2 = fminbound(f2,alphaMin, alphaMax, full_output=1)
    a3 = fminbound(f3,alphaMin, alphaMax, full_output=1)
    resultNew.alphaClmax = a1[0]
    resultNew.alphaLDmax = a2[0]
    resultNew.alphaLD32max = a3[0]
    resultNew.Clmax = -a1[1]
    resultNew.LDmax = -a2[1]
    resultNew.LD32max = -a3[1]
    resultNew.cdAtLDmax = cdAlpha(a2[0])
    resultNew.cdAtLD32max = cdAlpha(a3[0])
    return resultNew
Exemple #2
0
    def __init__(self, end1=1.0 / 4, end2=4, f=None):
        l = end1
        r = end2
        self._l = end1
        self._r = end2
        self._dim = 2
        if f is None:

            def f(t):
                return np.cos(t)

        def mf(t):
            return -f(t)

        self._f = f

        self._tol = 1e-14
        tol = self._tol

        # Use optimization to find the bounding box
        (topt, fmin, ierr, numfunc) = fminbound(f, l, r, xtol=tol, full_output=True, disp=1)
        (topt, fval, ierr, numfunc) = fminbound(mf, l, r, xtol=tol, full_output=True, disp=1)
        fmax = -fval
        self._bb = [np.array([l, fmin]), np.array([r, fmax])]

        self._hasParam = True
Exemple #3
0
    def analyze_geometry(self):
        """
        analyzing airfoil geometry by upper and lower curve points.
        Search maximum thickness and maximum camber using cubic spline 
        interpolation and gradient based optimization. To avoid interpolation 
        errors that can occur at leading edge of several airfoil types 
        (mostly NACA cambered airfols) it is assumed that maximum camber and 
        thickness are located between 5 and 95% of airfoil length.
        
        Result is stored in self.thicknessLoc, self.thickness, self.camber, 
        self.camberLoc
        """
        lb = 0.1
        ub = 0.9
        up = geom.get_pts_in_range(self.upPts, lb, ub)
        lo = geom.get_pts_in_range(self.loPts, lb, ub)
        upCurve = interp1d(up[:, 0], up[:, 1], "cubic")
        loCurve = interp1d(lo[:, 0], lo[:, 1], "cubic")
        lb = up[0, 0]
        ub = up[-1, 0]

        def tc(x):
            return loCurve(x) - upCurve(x)

        def camber(x):
            return -(upCurve(x) + loCurve(x)) / 2.0

        self.thicknessLoc = float(fminbound(tc, lb, ub, xtol=0.001))
        self.camberLoc = float(fminbound(camber, lb, ub, xtol=0.001))
        self.thickness = -float(tc(self.thicknessLoc))
        self.camber = -float(camber(self.camberLoc))
Exemple #4
0
def find_min_max(obj, xlo=0.0, xhi=10.0, xtol=1.0e-12):
    """Find Min/Max values over interval xlo to xhi"""

    # do a little brute force to keep away from local minima/maxima
    xArr = linspace(xlo, xhi, 1000)
    yArr = obj.eval_xrange(xArr)
    
    # find index of min and max y value in array over range
    imin = argmin( yArr )
    imax = argmax( yArr )
    print('imin=',imin,'imax=',imax)
    
    dx = old_div((xhi-xlo),990.0)
    
    xlo_min = max(xlo, xArr[imin]-dx)
    xhi_min = min(xhi, xArr[imin]+dx)
    
    xlo_max = max(xlo, xArr[imax]-dx)
    xhi_max = min(xhi, xArr[imax]+dx)

    def f( xval ):
        return -obj.eval_xrange( xval )
        
    x_max = optimize.fminbound(f, xlo_max, xhi_max, xtol=1e-12)
    
    def f2( xval ):
        return obj.eval_xrange( xval )
        
    x_min = optimize.fminbound(f2, xlo_min, xhi_min, xtol=1e-12)
    y_min = f2(x_min)
    y_max = f2(x_max)

    return x_min, y_min, x_max, y_max
Exemple #5
0
 def all_GL(self, q, maxpiv=None):
     """return (piv, f_binodal_gas, f_binodal_liquid, f_spinodal_gas, f_spinodal_liquid) at insersion works piv sampled between the critical point and maxpiv (default to 2.2*critical pressure)"""
     fc, pivc = self.critical_point(q)
     Fc = np.log(fc)
     #start sensibly above the critical point
     startp = pivc*1.1
     fm = fminbound(self.mu, fc, self.maxf(), args=(startp, q))
     fM = fminbound(lambda f: -self.pv(f, startp, q), 0, fc)
     initial_guess = np.log([0.5*fM, 0.5*(fm+self.maxf())])
     #construct the top of the GL binodal
     if maxpiv is None:
         maxpiv = startp*2
     topp = 1./np.linspace(1./startp, 1./maxpiv)
     topGL = [initial_guess]
     for piv in topp:
         topGL.append(self.binodalGL(piv, q, topGL[-1]))
     #construct the GL binodal between the starting piv and the critical point
     botp = np.linspace(startp, pivc)[:-1]
     botGL = [initial_guess]
     for piv in botp:
         botGL.append(self.binodalGL(piv, q, botGL[-1]))
     #join the two results and convert back from log
     binodal = np.vstack((
         [[pivc, fc, fc]],
         np.column_stack((botp, np.exp(botGL[1:])))[::-1],
         np.column_stack((topp, np.exp(topGL[1:])))[1:]
         ))
     #spinodal at the same pivs
     spinodal = self.spinodalGL(q, binodal[:,0])
     #join everything
     return np.column_stack((binodal, spinodal[:,1:]))
Exemple #6
0
def minimize1D(f, evalgrid = None, nGrid=10, minval=0.0, maxval = 0.99999, verbose=False, brent=True,check_boundaries = True, resultgrid=None):
    '''
    minimize a function f(x) in the grid between minval and maxval.
    The function will be evaluated on a grid and then all triplets,
    where the inner value is smaller than the two outer values are optimized by
    Brent's algorithm.
    --------------------------------------------------------------------------
    Input:
    f(x)    : callable target function
    evalgrid: 1-D array prespecified grid of x-values
    nGrid   : number of x-grid points to evaluate f(x)
    minval  : minimum x-value for optimization of f(x)
    maxval  : maximum x-value for optimization of f(x)
    brent   : boolean indicator whether to do Brent search or not.
              (default: True)
    --------------------------------------------------------------------------
    Output list:
    [xopt, f(xopt)]
    xopt    : x-value at the optimum
    f(xopt) : function value at the optimum
    --------------------------------------------------------------------------
    '''
    #evaluate the target function on a grid:
    if verbose: print "evaluating target function on a grid"
    if evalgrid is not None and brent:# if brent we need to sort the input values
        i_sort = evalgrid.argsort()
        evalgrid = evalgrid[i_sort]
    if resultgrid is None:
        [evalgrid,resultgrid] = evalgrid1D(f, evalgrid = evalgrid, nGrid=nGrid, minval=minval, maxval = maxval  )
    
    i_currentmin=resultgrid.argmin()
    minglobal = (evalgrid[i_currentmin],resultgrid[i_currentmin])
    if brent:#do Brent search in addition to rest? 
        if check_boundaries:
            if verbose: print "checking grid point boundaries to see if further search is required"
            if resultgrid[0]<resultgrid[1]:#if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose: print "resultgrid[0]<resultgrid[1]--> outer boundary point is a local optimum expand search bounded between the grid points"
                minlocal = opt.fminbound(f,evalgrid[0],evalgrid[1],full_output=True)
                if minlocal[1]<minglobal[1]:
                    if verbose: print "found a new minimum during grid search"
                    minglobal=minlocal[0:2]
            if resultgrid[-1]<resultgrid[-2]:#if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose: print "resultgrid[-1]<resultgrid[-2]-->outer boundary point is a local optimum expand search bounded between the grid points"
                minlocal = opt.fminbound(f,evalgrid[-2],evalgrid[-1],full_output=True)
                if minlocal[1]<minglobal[1]:
                    if verbose: print "found a new minimum during grid search"
                    minglobal=minlocal[0:2]
        if verbose: print "exploring triplets with brent search"
        onebrent=False
        for i in xrange(resultgrid.shape[0]-2):#if any triplet is found, where the inner point is a local optimum expand search
            if (resultgrid[i+1]<resultgrid[i+2]) and (resultgrid[i+1]<resultgrid[i]):
                onebrent=True
                if verbose: print "found triplet to explore"
                minlocal = opt.brent(f,brack = (evalgrid[i],evalgrid[i+1],evalgrid[i+2]),full_output=True)
                if minlocal[1]<minglobal[1]:
                    minglobal=minlocal[0:2]
                    if verbose: print "found new minimum from brent search"
    return minglobal
Exemple #7
0
    def test_fminbound_scalar(self):
        try:
            optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
            self.fail("exception not raised")
        except ValueError as e:
            assert_('must be scalar' in str(e))

        x = optimize.fminbound(self.fun, 1, np.array(5))
        assert_allclose(x, self.solution, atol=1e-6)
Exemple #8
0
 def spinodalGL(self, q, pivs=None):
     """return (piv, f_gas, f_liquid) on the spinodal line at insersion works pivs"""
     fc, pivc = self.critical_point(q)
     if pivs is None:
         pivs = 1./np.linspace(1./pivc, 1./(8*pivc))
     return np.column_stack((pivs, np.vstack([(
         fminbound(lambda f: -self.pv(f, piv, q), 0, fc),
         fminbound(lambda f: self.pv(f, piv, q), fc, self.maxf())
         ) for piv in pivs])))
 def test_fminbound(self):
     """Test fminbound
     """
     x = optimize.fminbound(lambda x: (x - 1.5) ** 2 - 0.8, 0, 1)
     assert_(abs(x - 1) < 1e-5)
     x = optimize.fminbound(lambda x: (x - 1.5) ** 2 - 0.8, 1, 5)
     assert_(abs(x - 1.5) < 1e-6)
     x = optimize.fminbound(lambda x: (x - 1.5) ** 2 - 0.8, numpy.array([1]), numpy.array([5]))
     assert_(abs(x - 1.5) < 1e-6)
     assert_raises(ValueError, optimize.fminbound, lambda x: (x - 1.5) ** 2 - 0.8, 5, 1)
Exemple #10
0
    def test_fminbound(self):
        x = optimize.fminbound(self.fun, 0, 1)
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.fminbound(self.fun, 1, 5)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
        assert_allclose(x, self.solution, atol=1e-6)
        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
Exemple #11
0
        def integral(y, return_x=False):

            x0 = float(fminbound(lambda x: np.abs(self.best.pdf(x) - y),
                       self.lims[0], self.best.MAPP, full_output=1)[0])
            x1 = float(fminbound(lambda x: np.abs(self.best.pdf(x) - y),
                       self.best.MAPP, self.lims[1], full_output=1)[0])
            if not return_x:
                return quad(self.best.pdf, x0, x1)[0]
            else:
                return x0, x1
Exemple #12
0
    def buyAll(self, d_bond,d_equity,r_bond,r_equity):
        '''
        #this function solve the weight of spx in a buy all index to optimize a
        #backward looking sharpe ratio defined by total carry / vol in which
        #correlation is taken as last 100 days observation
        # d_equity: %ret of equity
        # d_bond: %ret of bond
        # r_equity - spx carry (dividend yield)
        # r_bond - ty carry (yield)
        # v_equity - spx variance
        # v_bond - ty variance
        # p - spx/ty correlation


        #result
        # x_IR - weight for maximizing IR
        # x_P - weight for minimizing variance assuming -50% constant correlation
        # x - average of the 2 above
        '''
        t=200
        t_s=30

        p=pd.rolling_corr(d_equity,d_bond,t)
        p=pd.ewma(p,halflife=t_s)

        p2 = pd.Series(-0.5, index=p.index)

        v_equity=pd.rolling_var(d_equity,t)
        v_bond=pd.rolling_var(d_bond,t)

        m=len(p)

        x_IR=p.copy()
        x_P=x_IR.copy()

        for i in range(0,m):

            f = lambda x, : -(x*r_equity[i]+(1-x)*r_bond[i])/np.sqrt((x**2*v_equity[i]+(1-x)**2*v_bond[i]+2*x*(1-x)*np.sqrt(v_equity[i]*v_bond[i])*p[i])*16)

            #fitting the data with fmin
            x0 = 0.2 # initial parameter value
            x1 = op.fminbound(f, 0.1,0.8,maxfun=100)
            x_IR[i]=x1
    
            #portfolio optimisation assuming a constant correlation of -50%
            f = lambda x, : -(x*r_equity[i]+(1-x)*r_bond[i])/np.sqrt((x**2*v_equity[i]+(1-x)**2*v_bond[i]+2*x*(1-x)*np.sqrt(v_equity[i]*v_bond[i])*p2[i])*16)

            # fitting the data with fmin
            x0 = 0.2 # initial parameter value
            x2 = op.fminbound(f, 0.1,0.8,maxfun=100)
            x_P[i]=x2
    
            w=(x_P+x_IR)/2
    
        return w
Exemple #13
0
 def binodalGL(self, piv, q, guess=None):
     """return (log(f_gas), log(f_liquid)) on the binodal line at a given insersion work piv"""
     if guess is None:
         fc = self.critical_point(q)[0]
         fspG = fminbound(lambda f: -self.pv(f, piv,q), 0, fc)
         fspL = fminbound(self.pv, fc, self.maxf(), args=(piv, q))
         guess = np.log([0.5*fspG, fspL+fspG])
     return fsolve(lambda Fs: [
         self.pv_of_log(Fs[0], piv, q) - self.pv_of_log(Fs[1], piv, q), 
         self.mu_of_log(Fs[0], piv, q) - self.mu_of_log(Fs[1], piv, q)
         ], guess)
Exemple #14
0
	def bellman_operator(self, w, compute_policy=False):
		"""
		The approximate Bellman operator, which computes and returns the
		updated value function Tw on the grid points.

		Parameters
		----------
		w : array_like(float, ndim=1)
			The value of the input function on different grid points
		compute_policy : Boolean, optional(default=False)
			Whether or not to compute policy function

		"""
		
		Aw = [InterpolatedUnivariateSpline(self.grid, w[i], k=3) for i in xrange(len(self.shocks))]
		Awx = lambda y : [Aw[i](y[i]) for i in xrange(len(y))]
		#Awx = lambda y: np.asarray([function(y) for function in Aw]) 

		if compute_policy:
			sigma = np.asarray([np.empty(len(w[0]))]*len(self.shocks))

		Tw = np.asarray([np.empty(len(w[0]))]*len(self.shocks))
		
		if self.functional: 
			# It is necessary to formulate the objective function differently for the functional form
			for i, x in enumerate(self.grid):
				for j in xrange(len(self.shocks)):
					objective = lambda c: - self.u(c) - self.beta(c) * (np.dot(self.transition[j], Awx(np.add(self.r*(x - c), self.shocks))))
					c_star = fminbound(objective, 1e-6, x-1e-6)
			
					if compute_policy:
						sigma[j][i] = c_star
					Tw[j][i] = - objective(c_star)

		else: 
			for i, x in enumerate(self.grid):
				for j in xrange(len(self.shocks)):
					objective = lambda c: - self.u(c) - self.beta * (np.dot(self.transition[j], Awx(np.add(self.r*(x - c), self.shocks))))
					#def objective(c):
					#	u = self.u(c)
					#	expectation = np.dot(self.transition[j], Awx(self.r*(x - c) + self.shocks)))
					#	return (- u - self.beta*expectation)

					c_star = fminbound(objective, 1e-6, x-1e-6)

					if compute_policy:
						sigma[j][i] = c_star
					Tw[j][i] = - objective(c_star)

		if compute_policy:
			return Tw, sigma
		else:
			return Tw
Exemple #15
0
def leja(order, dist):
    """
After paper by Narayan and Jakeman
    """

    if len(dist) > 1:
        if isinstance(order, int):
            xw = [leja(order, d) for d in dist]
        else:
            xw = [leja(order[i], dist[i]) for i in range(len(dist))]

        x = [_[0][0] for _ in xw]
        w = [_[1] for _ in xw]
        x = cp.utils.combine(x).T
        w = cp.utils.combine(w)
        w = np.prod(w, -1)

        return x, w

    lo, up = dist.range()
    X = [lo, dist.mom(1), up]
    for o in range(order):

        X_ = np.array(X[1:-1])
        obj = lambda x:-np.sqrt(dist.pdf(x))*np.prod(np.abs(X_-x))
        opts, vals = zip(*[fminbound(obj, X[i], X[i+1],
            full_output=1)[:2] for i in range(len(X)-1)])
        index = np.argmin(vals)
        X.insert(index+1, opts[index])

    X = np.asfarray(X).flatten()[1:-1]
    W = weightgen(X, dist)
    X = X.reshape(1, X.size)

    return np.array(X), np.array(W)
def infnorm(H):
	"""Find the infinity norm of a z-domain transfer function.
	"""
	# Get a rough idea of the location of the maximum.
	N = 129
	w = np.linspace(0, 2*np.pi, num=N, endpoint=True)
	dw = 2*np.pi/(N-1)
	Hval = evalTF(H, np.exp(1j*w))
	Hinf = np.max(np.abs(Hval))
	wi = np.where(np.abs(Hval) == Hinf)[0]

	# Home in using the scipy "fminbound" function.
	# original MATLAB code:
	#   wmax = fminbnd(nabsH, w(wi)-dw, w(wi)+dw, options, H);
	wmax = fminbound(nabsH, w[wi]-dw, w[wi]+dw, args=(H,), \
	                 xtol=1e-08, maxfun=5000, full_output=0)

	if wmax is None:
		warn('Hinf: Warning. scipy.optimize operation failed.'
		     + ' The result returned may not be very accurate.')
		wmax = w[wi]

	Hinf = -nabsH(wmax, H);
	fmax = wmax/(2*np.pi);
	return Hinf, fmax
def single_var_maximize_ln_L(data, ln_L_func, low_param, high_param):
    '''Takes `data` (the data set)
    `initial` is a starting parameter value
    `ln_L_func` is a function that should take the (data, parameter_value)
    and return a log-likelihood.
    `low_param` and `high_param` should be 2 numbers (with high_param > low_param)
        that can be used in the call to find a set of points that bracket the
        optimum.
    '''
    def scipy_ln_likelihood(x):
        '''SciPy minimizes functions. We want to maximize the likelihood. This
        function adapts our ln_likelihood function to the minimization context
        by returning the negative log-likelihood.
    
        We use this function with SciPy's minimization routine (minimizing the
        negative log-likelihood will maximize the log-likelihood).
        '''
        ln_L = ln_L_func(data, x)
        if VERBOSE:
            sys.stderr.write('In wrapper around ln_L_func with param = {m} lnL = {l}\n'.format(m=x, l=ln_L))
        return -ln_L
    # Now that we have wrapped the scipy_ln_likelihood, we can find an 
    #       approximation to the solution
    # Here we use a bounded optimization, becaue the death prob can't be 1 or
    #       less than 0.0
    mle = optimize.fminbound(scipy_ln_likelihood,
                             0.0,
                             1.0 - EPSILON,
                             xtol=1e-8,
                             full_output=False)
    return mle
Exemple #18
0
    def test_var(self, sig2_0, return_weights=False):
        """
        Returns  -2 x log-likelihoog ratio and the p-value for the
        hypothesized variance

        Parameters
        ----------
        sig2_0 : float
            Hypothesized variance to be tested

        return_weights : bool
            If True, returns the weights that maximize the
            likelihood of observing sig2_0. Default is False

        Returns
        --------
        test_results : tuple
            The  log-likelihood ratio and the p_value  of sig2_0

        Examples
        --------
        >>> random_numbers = np.random.standard_normal(1000)*100
        >>> el_analysis = sm.emplike.DescStat(random_numbers)
        >>> hyp_test = el_analysis.test_var(9500)
        """
        self.sig2_0 = sig2_0
        mu_max = max(self.endog)
        mu_min = min(self.endog)
        llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
                                 full_output=1)[1]
        p_val = chi2.sf(llr, 1)
        if return_weights:
            return llr, p_val, self.new_weights.T
        else:
            return  llr, p_val
Exemple #19
0
def align_global_phase(data):
    """ Phase retrieval is degenerate to a global phase factor. This function tries to align the global phase rotation
    by minimizing the amount of power in the imag component. Real component could also be minimized with no effect
    on the outcome.
    
    arguments:
        data: 2d or 3d ndarray whose phase is to be aligned. Each frame of data is aligned independently.
        
    returns:
        complex ndarray of same shape as data"""
        
    from scipy.optimize import fminbound
    
    # check types
    assert isinstance(data,numpy.ndarray), "data must be array"
    assert data.ndim in (2,3), "data must be 2d or 3d"
    assert numpy.iscomplexobj(data), "data must be complex"
    was2d = False
    
    if data.ndim == 2:
        was2d = True
        data.shape = (1,data.shape[0],data.shape[1])
        
    for frame in data:
        x = frame.ravel()
        e = lambda p: numpy.sum(abs((x*numpy.exp(complex(0,1)*p)).imag))
        opt, val, conv, num = fminbound(e,0,2*numpy.pi,full_output=1)
        print opt
        frame *= numpy.exp(complex(0,1)*opt)
    
    if was2d: data = data[0]
    
    return data     
 def _calc_clmax(self):
     if self._alphaCl==None:
         self._create_splines()
     f = lambda alpha: -self._alphaCl(alpha)
     alphaClmax = fminbound(f, self.alpha[0], self.alpha[-1])
     self.alphaClmax = alphaClmax
     self.clmax = self._alphaCl(alphaClmax)
Exemple #21
0
    def find_offset_and_scale(self):
        '''Tries to find the offset of the vbi data in the raw samples.'''

        # Split into chunks and ensure there is something "interesting" in each
        target = gauss(self.vbi, self.gauss_sd_offset)
        d = [np.std(target[x:x+128]) < 5.0 for x in range(64, 1440, 128)]
        if any(d):
            return False

        low = 64
        high = 256
        target = gauss(self.vbi[low:high], self.gauss_sd_offset)

        def _inner(offset):
            self.g.set_offset(offset)

            self.g.update_cri(low, high)
            guess_scaled = self.g.convolved[low:high]
            mask_scaled = self.g.mask[low:high]

            a = guess_scaled*mask_scaled
            b = np.clip(target*mask_scaled, self.black, 256)

            scale = a.std()/b.std()
            b -= self.black
            b *= scale
            a = np.clip(a, 0, 256*scale)

            return np.sum(np.square(b-a))

        offset = fminbound(_inner, self.offset_low, self.offset_high)

        # call it also to set self.offset and self.scale
        return (_inner(offset) < 10)
Exemple #22
0
    def bellman_operator(self, w, compute_policy=False):
        """
        The approximate Bellman operator, which computes and returns the 
        updated value function Tw on the grid points.

        Parameters
        ==========
            w : a flat NumPy array with len(w) = len(grid)

        The vector w represents the value of the input function on the grid
        points.

        """
        # === Apply linear interpolation to w === #
        Aw = lambda x: interp(x, self.grid, w)  

        if compute_policy:
            sigma = np.empty(len(w))

        # === set Tw[i] equal to max_c { u(c) + beta w(f(k_i) - c)} === #
        Tw = np.empty(len(w))
        for i, k in enumerate(self.grid):
            objective = lambda c:  - self.u(c) - self.beta * Aw(self.f(k) - c)
            c_star = fminbound(objective, 1e-6, self.f(k))
            if compute_policy:
                # sigma[i] = argmax_c { u(c) + beta w(f(k_i) - c)} 
                sigma[i] = c_star
            Tw[i] = - objective(c_star)

        if compute_policy:
            return Tw, sigma
        else:
            return Tw
def find_global_min(fun,s,args):
    # get arguments for minimization function <fun>
    x,spline_x,spline_y = args
    # first, find the point that minimizes <fun> in the list of grid point s_i
    min_val=6e66
    min_ind=-1
    # loop over all points s_i
    for i in range(0,len(s)):
        # get function value at point s_i
        val = fun(s[i],x,spline_x,spline_y)
        if val<min_val:
            # if smaller than current smallest value, save value and index i
            min_val=val
            min_ind=i
    # determine range for local minimization, if s_i is the smallest grid point
    # then use range (s_i-1,s_i+1)     
    sb=[-1,-1]
    # deter
    if min_ind==0:
        # unless i=0, then use (0,s_i+1)
        sb[0]=s[0]
    else:
        sb[0]=s[min_ind-1]        
    if min_ind==len(s)-1:
        # or unless i is last grid point, then use (s_i-1,s_i)
        sb[1]=s[-1]
    else:
        sb[1]=s[min_ind+1]        
    # then, determine minimizing arc length s within the range (s_i-1,s_i+1)    
    s_min = opt.fminbound(fun, sb[0], sb[1], args=(x,spline_x,spline_y))
    return s_min
Exemple #24
0
  def local_solver(self, unset_U, u, set_dofs, xtol):
    ''' Local solver on a triangle. Return new value for u in dof=unset_U. '''
    p = self.dof_to_coordinate[unset_U]
    p0 = self.dof_to_coordinate[set_dofs[0]]
    p1 = self.dof_to_coordinate[set_dofs[1]]
    
    fp = u[unset_U]
    f0 = u[set_dofs[0]]
    f1 = u[set_dofs[1]]

    def value(t):
      P = p0*(1-t) + p1*t
      return f0*(1-t) + f1*t + sqrt(np.dot((P-p).flat, (P-p).flat))

    res, fval, ierr, numfunc = fminbound(value, 0, 1, xtol=xtol, full_output=True)
    
    # set the "records" of eval calls
    if numfunc > self.max_calls:
      self.max_calls = numfunc

    if numfunc < self.min_calls:
      self.min_calls = numfunc
    
    if ierr == 0:
      return min([value(res), fp])
    else:
      # edge lentgh
      a = sqrt(np.dot(p-p0, p-p0)) + f0
      b = sqrt(np.dot(p-p1, p-p1)) + f1

      return min([fp, a, b])
Exemple #25
0
def bellman_operator(cp, V, return_policy=False):
    """
    The approximate Bellman operator, which computes and returns the updated
    value function TV (or the V-greedy policy c if return_policy == True).

    Parameters:

        * cp is an instance of class consumerProblem
        * V is a NumPy array of dimension len(cp.asset_grid) x len(cp.z_vals)

    """
    # === simplify names, set up arrays === #
    R, Pi, beta, u, b = cp.R, cp.Pi, cp.beta, cp.u, cp.b  
    asset_grid, z_vals = cp.asset_grid, cp.z_vals        
    new_V = np.empty(V.shape)
    new_c = np.empty(V.shape)
    z_index = range(len(z_vals))  

    # === linear interpolation of V along the asset grid === #
    vf = lambda a, i_z: interp(a, asset_grid, V[:, i_z]) 

    # === solve r.h.s. of Bellman equation === #
    for i_a, a in enumerate(asset_grid):
        for i_z, z in enumerate(z_vals):
            def obj(c):  # objective function to be *minimized*
                y = sum(vf(R * a + z - c, j) * Pi[i_z, j] for j in z_index)
                return - u(c) - beta * y
            c_star = fminbound(obj, np.min(z_vals), R * a + z + b)
            new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star)

    if return_policy:
        return new_c
    else:
        return new_V
    def _get_sigma_c_max( self ):
        def minfunc_sigma( w ):
            self.model.w = w
            stiffness_loss = np.sum( self.model.Kf * self.model.damage ) / np.sum( self.model.Kf )
            if stiffness_loss > 0.90:
                return 1. + w
            # plt.plot(w, self.sigma_c, 'ro')
            return -self.sigma_c
        def residuum_stiffness( w ):
            self.model.w = w
            stiffness_loss = np.sum( self.model.Kf * self.model.damage ) / np.sum( self.model.Kf )
            if stiffness_loss > 0.90:
                return 1. + w
            if stiffness_loss < 0.65 and stiffness_loss > 0.45:
                residuum = 0.0
            else:
                residuum = stiffness_loss - 0.5
            return residuum

        w_max = brentq( residuum_stiffness, 0.0, min( 0.1 * ( self.model.Ll + self.model.Lr ), 20. ) )
        w_points = np.linspace( 0, w_max, 7 )
        w_maxima = []
        sigma_maxima = []
        for i, w in enumerate( w_points[1:] ):
            w_maxima.append( fminbound( minfunc_sigma, w_points[i], w_points[i + 1], maxfun = 5, disp = 0 ) )
            sigma_maxima.append( self.sigma_c )
        return sigma_maxima[np.argmax( np.array( sigma_maxima ) )], w_maxima[np.argmax( np.array( sigma_maxima ) )]
Exemple #27
0
def gradient_ascent(r1, r2, theta, gradient_magnitude, 
                    fix_mu=False, fix_sigma=False):
    for j in range(len(theta)):
        if fix_mu and j == 0: continue
        if fix_sigma and j == 1: continue
        
        prev_loss = calc_loss(r1, r2, theta)

        mu, sigma, rho, p = theta
        z1 = compute_pseudo_values(r1, mu, sigma, p)
        z2 = compute_pseudo_values(r2, mu, sigma, p)
        real_grad = calc_pseudo_log_lhd_gradient(theta, z1, z2, False, False)
        
        gradient = numpy.zeros(len(theta))
        gradient[j] = gradient_magnitude
        if real_grad[j] < 0: gradient[j] = -gradient[j]
                
        min_step = 0
        max_step = find_max_step_size(
            theta[j], gradient[j], (False if j in (0,1) else True))

        if max_step < 1e-12: continue

        alpha = fminbound(
            lambda x: calc_loss( r1, r2, theta + x*gradient ),
            min_step, max_step)
                
        loss = calc_loss( r1, r2, theta + alpha*gradient )
        if loss < prev_loss:
            theta += alpha*gradient

    return theta
Exemple #28
0
    def bellman_operator(self, w, compute_policy=False):
        """
        The approximate Bellman operator, which computes and returns the
        updated value function Tw on the grid points.

        Parameters
        ----------
        w : array_like(float, ndim=1)
            The value of the input function on different grid points
        compute_policy : Boolean, optional(default=False)
            Whether or not to compute policy function

        """
        # === Apply linear interpolation to w === #
        Aw = lambda x: interp(x, self.grid, w)

        if compute_policy:
            sigma = np.empty(len(w))

        # == set Tw[i] equal to max_c { u(c) + beta w(f(k_i) - c)} == #
        Tw = np.empty(len(w))
        for i, k in enumerate(self.grid):
            objective = lambda c: - self.u(c) - self.beta * Aw(self.f(k) - c)
            c_star = fminbound(objective, 1e-6, self.f(k))
            if compute_policy:
                # sigma[i] = argmax_c { u(c) + beta w(f(k_i) - c)}
                sigma[i] = c_star
            Tw[i] = - objective(c_star)

        if compute_policy:
            return Tw, sigma
        else:
            return Tw
Exemple #29
0
def max_deflection(M_1, gamma=1.4):
    """Returns maximum deflection angle and corresponding wave angle for given
    Mach number.

    Parameters
    ----------
    M_1 : float
        Upstream Mach number.
    gamma : float, optional
        Specific heat ratio, default 7 / 5.

    Returns
    -------
    theta : float
        Maximum deflection angle.
    beta : float
        Corresponding wave angle.

    """

    def eq(beta, M_1, gamma):
        os = _ShockClass(M_1, beta, gamma)
        return -os.theta

    mu = mach_angle(M_1)
    beta_theta_max = optimize.fminbound(eq, mu, np.pi / 2, args=(M_1, gamma), disp=0)
    os = _ShockClass(M_1, beta_theta_max, gamma)
    return os.theta, os.beta
	def plotFisherInfo(self, dataContainer, ymax, ymaxsq):
		plt.clf()
		t = np.linspace(0, 1, 500)
		
		minf = lambda x: -1 * self.m(x)
		minx = fminbound(minf, 0, 1)
		fval = self.m(minx)
		
		noisemean = UnivariateSpline(self.m(t)/fval, self.s(t)/fval/fval)
		self.se = noisemean(0)
		
		fi = lambda a, sa, sp: 2*np.power(sa, 2)/ (np.power(a,2)*2*sa+np.power(sp,2))
		fiapp = lambda a, sa, sp: sa / (np.power(a,2))
		plt.xlim(0, 1)
		plt.ylim(0, ymaxsq)
		print 'whop whop'
		plt.plot(t, fi(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval))
		plt.plot(t, fiapp(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval), 'r')
		plt.savefig(self.workdir+"/variance.pdf")
		plt.clf()
		plt.ylim(0, ymax)
		plt.plot(t, np.sqrt(fi(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval)))
		plt.plot(t, np.sqrt(fiapp(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval)), 'r')
		plt.savefig(self.workdir+"/stddev.pdf")
		plt.clf()
		plt.plot(t, 1/fi(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval))
		plt.plot(t, 1/fiapp(self.m(t,1)/fval, self.s(t)/fval/fval-self.se, self.s(t, 1)/fval/fval), 'r')
		plt.savefig(self.workdir+"/fisherInfo.pdf")
Exemple #31
0
 def u_to_x(self, u,marg,x=None):
   """Transformation from u to x
   """
   if x == None:
     x = np.zeros(len(u))
   for i in range(len(u)):
     q = marg.getP1()
     r = marg.getP2()
     a = marg.getP3()
     b = marg.getP4()
     mean = marg.getMean()
     normal_val = Normal.cdf(u[i],0,1)
     par = opt.fminbound(zero_beta, 0,1, args =(q,r,normal_val),disp=False)
     x01 = par
     x[i] = a+x01*(b-a)
   return x
Exemple #32
0
    def get_tau_t(calc_wn: Callable, tau_t: float, tau_T: float,
                  ess: float) -> float:
        """
        Solve optimization problem that yields the constraint coefficient, tau_t for the next
        time step. Optimized in log-scale

        :param calc_wn: Function that calculates wn (used to modify weights)
        :param tau_t: Initial guess of contraint coefficient
        :param tau_T: Largest allowable value of constraint coefficient
        :param ess: Effective sample size tau_t is chosen to optimize for
        :return: tau: Optimal tau_t
        """
        F = lambda tau_tt: sum(calc_wn(10**tau_tt))**2 / (sum(
            calc_wn(10**tau_tt)**2)) - ess
        tau = optimize.fminbound(F, np.log10(tau_t), np.log10(tau_T))
        return 10**tau
Exemple #33
0
def penalty_optimize(point, init_func, penalty_param, general_function,
                     cut_function, penalty_func, step_delta, precision):
    def penalty(point):
        return init_func(point) + penalty_func(point, general_function,
                                               cut_function, penalty_param)

    point_next = optimize.fminbound(penalty, -10., 10)
    if check_stop_condition_func(penalty_func, point_next, general_function,
                                 cut_function, penalty_param, precision):
        print('Result finction value: ', penalty(point_next))
        return point_next
    else:
        return penalty_optimize(
            derived_penalty_x_search_by_param(penalty_param), init_func,
            (penalty_param * step_delta), general_function, cut_function,
            penalty_func, step_delta, precision)
Exemple #34
0
    def _find_estimator_weight(self, y, dv_pre, y_pred):
        """Make line search to determine estimator weights."""
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            def optimization_function(alpha):
                p_ij = self._estimate_instance_probabilities(dv_pre + alpha * y_pred)
                p_i = self._estimate_bag_probabilites(p_ij)
                return self._negative_log_likelihood(p_i)

            # TODO: Add option to choose optimization method.

            alpha, fval, err, n_func = fminbound(optimization_function, 0.0, 5.0, full_output=True, disp=1)
            if self.learning_rate < 1.0:
                alpha *= self.learning_rate
        return alpha, fval
Exemple #35
0
def infnorm(H):
    """Find the infinity norm of a z-domain transfer function.

    **Parameters:**

    H : object
        the LTI description of the DT system, which can be in one of the
        following forms:

        * an LTI object,
        * a zpk tuple,
        * a (num, den) tuple,
        * an ABCD matrix (internally converted to zpk representation),
        * a list-like containing the A, B, C, D matrices (also internally
          converted to zpk representation).

    **Returns:**

    Hinf : float
           The infinity norm of ``H``.
    fmax : float
           The frequency to which Hinf corresponds.
    """
    # Get a rough idea of the location of the maximum.
    N = 129
    w = np.linspace(0, 2 * np.pi, num=N, endpoint=True)
    dw = 2 * np.pi / (N - 1)
    Hval = evalTF(H, np.exp(1j * w))
    Hinf = np.max(np.abs(Hval))
    wi = np.where(np.abs(Hval) == Hinf)[0]

    # Home in using the scipy "fminbound" function.
    # original MATLAB code:
    #   wmax = fminbnd(nabsH, w(wi)-dw, w(wi)+dw, options, H);
    wmax = fminbound(nabsH, w[wi]-dw, w[wi]+dw, args=(H,), \
                     xtol=1e-08, maxfun=5000, full_output=0)

    if wmax is None:
        warn('Hinf: scipy.optimize.fminbound() failed.' +
             ' The result returned may not be very accurate.')
        wmax = w[wi]

    Hinf = -nabsH(wmax, H)
    fmax = wmax / (2 * np.pi)
    # in the original Toolbox, wmax is returned (seems to be never used though)
    # rep? We return fmax.
    return Hinf, fmax
Exemple #36
0
def rsmooth(y):
    if y.ndim < 2:
        y = np.atleast_2d(y)
        one_dim = True
    else:
        one_dim = False

    n1, n2 = y.shape
    n = n1 * n2  # noqa: F841
    N = (np.array([n1, n2]) != 1).sum()
    Lambda = (
        repmat(-2 + 2 * np.cos(np.arange(0, n2) * np.pi / n2), n1, 1)
        + (-2 + 2 * np.cos(np.arange(0, n1) * np.pi / n1))[:, None]
    )
    W = np.ones((n1, n2))
    z = zz = y

    def GCVscore(p):
        """Generalized cross-validation score."""
        # This makes the code more similar to the original
        # and avoids recomputing z after optimizing the penalty
        nonlocal z
        n = y.size
        s = 10 ** p  # Penalty term
        Gamma = 1 / (1 + s * Lambda ** 2)  # See equation 6
        z = idct2(Gamma * DCTy)
        RSS = norm(np.sqrt(W) * (y - z)) ** 2  # Residual sum-of-squares
        TrH = np.sum(Gamma)  # Trace of "hat matrix"
        GCVs = RSS / n / (1 - TrH / n) ** 2
        return GCVs

    for k in range(1, 7):
        tol = np.inf
        while tol > 1e-5:
            DCTy = dct2(W * (y - zz) + zz)
            p = fminbound(GCVscore, -15, 38)
            tol = norm(zz - z) / norm(z)
            zz = z
        s = 10 ** p
        tmp = np.sqrt(1 + 16 * s)
        h = (np.sqrt(1 + tmp) / np.sqrt(2) / tmp) ** N
        W = bisquare(y - z, h)

    if one_dim:
        z = z[0]

    return z
Exemple #37
0
    def optimal_t_compressed(self, seq_pair, multiplicity):
        """
        Find the optimal distance between the two sequences
        """

        def _neg_prob(t, seq_pair, multiplicity):
            """
            Probability to observe child given the the parent state, transition
            matrix and the time of evolution (branch length).

            Args:
             - t(double): branch length (time between sequences)
             - parent (numpy.array): parent sequence
             - child(numpy.array): child sequence
             - tm (GTR): model of evolution

            Returns:
             - prob(double): negative probability of the two given sequences
               to be separated by the time t.
            """
            return -1.0*self.prob_t_compressed(seq_pair, multiplicity,t, return_log=True)

        try:
            from scipy.optimize import minimize_scalar
            opt = minimize_scalar(_neg_prob,
                    bounds=[0,ttconf.MAX_BRANCH_LENGTH],
                    method='Bounded',
                    args=(seq_pair, multiplicity), tol=1e-8)
            new_len = opt["x"]
        except:
            import scipy
            print('legacy scipy', scipy.__version__)
            from scipy.optimize import fminbound
            new_len = fminbound(_neg_prob,
                    0,ttconf.MAX_BRANCH_LENGTH,
                    args=(seq_pair, multiplicity))
            opt={'success':True}

        if new_len > .9 * ttconf.MAX_BRANCH_LENGTH:
            self.logger("WARNING: GTR.optimal_t_compressed -- The branch length seems to be very long!", 4, warn=True)

        if opt["success"] != True:
            # return hamming distance: number of state pairs where state differs/all pairs
            new_len =  np.sum(multiplicity[seq_pair[:,1]!=seq_pair[:,0]])/np.sum(multiplicity)

        return new_len
Exemple #38
0
    def _inhomogeneous_process_wk(t, rate):
        # Work routine that implements the basic inhomogeneous
        # algorithm described above. This is called repeatedly to do
        # the blocking.
        t_rmax = fminbound(lambda x: -rate(x), 0.0, t)
        rmax = rate(t_rmax)

        ut = homogeneous_process(t, rmax)

        if ut.shape[0] == 0:
            return ut

        da = rnd.rand(ut.shape[0])

        ra = np.vectorize(rate)(ut)

        return ut[np.where(da < ra / rmax)]
def opt_alt_arc(d):

    # Last modified: 18 July 2019
    '''
    Finds the optimal satellite altitude h* for two ground stations 
    separated by an arc-distance of d (in meters), considering a sat at 
    the symmetry point. Optimization w.r.t. transmissivity eta_Tot. Note
    optimal altitude is NOT always a priori the lowest altitude. 
    '''
    def objfunc(h):

        return -eta_Tot(link_distance(d, h), h)

    opt = fminbound(objfunc, 0, 1500000)
    #opt=minimize_scalar(objfunc,bounds=(0,1500000),method='bounded')

    return opt
Exemple #40
0
def _adalasso_bic_nu(R, T, eps=1e-3):
    p = len(R) - 1

    def bic(nu):
        B_star, cost_star, lmbda_star, bic = _adalasso_bic(
            R, T, p, nu, eps=eps)
        return B_star, cost_star, lmbda_star, -bic

    nu_star, _bic_star, err, _ = fminbound(
        lambda nu: bic(nu)[-1], x1=0.5, x2=2.0,
        xtol=1e-2, full_output=True)

    if err:
        warnings.warn("fminbound exceeded maximum iterations")

    B_star, cost_star, lmbda_star, neg_bic_star = bic(nu_star)
    return B_star, cost_star, lmbda_star, -neg_bic_star, nu_star
Exemple #41
0
def XvT(J, hpj=1):
    Temp = np.linspace(0, 10, 51)

    xmin = []

    for Tpj in Temp:
        fun = lambda x: F(J, hpj * abs(J), Tpj * abs(J), x)
        xmin.append(opt.fminbound(fun, 0, 1, xtol=1e-9))

    plt.plot(Temp, xmin)
    plt.ylabel("Composition")
    plt.xlabel("Temperature (T/J)")

    title = "J=" + str(J) + ", h/J=" + str(hpj)
    plt.title(title)

    plt.show()
def v_bellman_objective(values, outputArray, l, w_a, v):
    """
        The maximazation task for each worker process at a given wealth stock/bequest<k>.
        Maximize the value function by the saving rate <s> assuming the value function form
        from the previous iteration. It returns the new new value at <k> in the new
        function form.

        The functional form is represented by the value of the function at each designated
        <k> and linear interpolation is used to get the value of the function for other
        <k>-s.
    """
    i = values[0]
    k = values[1]

    objective = lambda s, k: -np.log(k - s) - BETA * v_integral(s, w_a, v)[0]
    s_star = fminbound(objective, 1e-12, k - 1e-12, args=(k, ))
    outputArray[i] = -objective(s_star, k)
Exemple #43
0
def linear_velocity(r, m, l=None):
    '''
	Computes the velocity assuming it is a pulled wave. If l is given, uses the relationship between l and v.
	'''
    if r < 1: return 0  # linear approximation cannot work

    if l is None:

        def func_to_minimize(l):
            return linear_velocity(r, m, l=l)

        #return func_to_minimize(fminbound(func_to_minimize, 0.01, 100))
        l = fminbound(func_to_minimize, 0.01, 100)
        return (l, func_to_minimize(l))

    else:
        return np.log(r * (1 - m + m * (np.exp(l) + np.exp(-l)) / 2)) / l
Exemple #44
0
def cosmo_dist_to_z(d,
                    derr=None,
                    disttype=0,
                    inttol=1e-6,
                    normed=False,
                    intkwargs={}):
    """
    Convert a distance to a redshift. See :func:`cosmo_z_to_dist` for meaning of
    parameters. Note that if `d` is None, the maximum distance will be returned.
    """
    from scipy.optimize import brenth
    maxz = 10000.0

    if derr is not None:
        raise NotImplementedError

    if d is None:
        if disttype == 2:
            #find maximum value for angular diam dist
            from scipy.optimize import fminbound
            res = upper = 5
            while abs(res - upper) < inttol:
                #-2 flips sign so that we get a minimum instead of a maximum
                res = fminbound(cosmo_z_to_dist,
                                0,
                                upper, (None, -2, inttol, normed, intkwargs),
                                inttol,
                                full_output=1)
                res = res[
                    0]  #this is the redshift, -res[1] is the distance value
            return res
        else:
            d = cosmo_z_to_dist(None, None, disttype, inttol, normed,
                                intkwargs)

    f = lambda z, dmin: dmin - cosmo_z_to_dist(z, None, disttype, inttol,
                                               normed, intkwargs)
    try:
        while f(maxz, d) > 0:
            maxz = maxz**2
    except OverflowError:
        raise ValueError('input distance %g impossible' % float(d))

    zval = brenth(f, 0, maxz, (d, ), xtol=inttol)

    return zval
Exemple #45
0
def calcMI_MC(x, y, constellation):
    """
        Transcribed from Dr. Tobias Fehenberger MATLAB code.
        See: https://www.fehenberger.de/#sourcecode
    """
    if y.shape[0] != 1:
        y = y.T
    if x.shape[0] != 1:
        x = x.T
    if constellation.shape[0] == 1:
        constellation = constellation.T

    M = constellation.size
    N = x.size
    P_X = np.zeros((M, 1))

    x = x / np.sqrt(np.mean(np.abs(x)**2))  # normalize such that var(X)=1
    y = y / np.sqrt(np.mean(np.abs(y)**2))  # normalize such that var(Y)=1

    ## Get X in Integer Representation
    xint = np.argmin(np.abs(x - constellation)**2, axis=0)

    fun = lambda h: np.dot(h * x - y, np.conj(h * x - y).T)
    h = fminbound(fun, 0, 2)
    N0 = np.real((1 - h**2) / h**2)
    y = y / h

    ## Find constellation and empirical input distribution
    for s in np.arange(M):
        P_X[s] = np.sum(xint == s) / N

    ## Monte Carlo estimation of (a lower bound to) the mutual information I(XY)
    qYonX = 1 / (np.pi * N0) * np.exp(
        (-(np.real(y) - np.real(x))**2 - (np.imag(y) - np.imag(x))**2) / N0)

    qY = 0
    for ii in np.arange(M):
        qY = qY + P_X[ii] * (1 / (np.pi * N0) * np.exp(
            (-(np.real(y) - np.real(constellation[ii, 0]))**2 -
             (np.imag(y) - np.imag(constellation[ii, 0]))**2) / N0))

    realmin = np.finfo(float).tiny
    MI = 1 / N * np.sum(
        np.log2(np.maximum(qYonX, realmin) / np.maximum(qY, realmin)))

    return MI
Exemple #46
0
    def __init__(self, evolvable_pokemon, total_candies, evolution_cost):
        self._evolvable_pokemon = evolvable_pokemon
        self._total_candies = total_candies
        self._evolution_cost = evolution_cost

        minimization_results_tuple = optimize.fminbound(
            self._min_evolution_function,
            0,
            self._evolvable_pokemon,
            full_output=True)

        # [pokemon_to_transfer, negative pokemon_to_evolve, n/a, n/a]
        # The results array is more of a halfway point
        minimization_results_tuple = self._floor_results_tuple(
            minimization_results_tuple)
        self.pokemon_to_transfer = int(minimization_results_tuple[0])
        self.pokemon_to_evolve = int(minimization_results_tuple[1])
Exemple #47
0
    def optevidence(self, sigma=None, **kwargs):
        sigma = sigma if sigma is not None else self.estimatenoise()
        iprint = kwargs.pop('iprint', 0)
        delta = kwargs.pop('delta', 1E-4)
        tol = kwargs.pop('tol', 1E-2)
        bound = kwargs.pop('bound', (0., 1000))
        p0 = self.params

        def minusevd(gamma):
            self.gamma = gamma
            if iprint:
                print("\tgamma={}".format(gamma))
            self.set_params(p0.copy())
            self.fit(iprint=iprint, delta=delta)
            return -self.evidence(sigma)

        return fminbound(minusevd, bound[0], bound[1], xtol=tol, **kwargs)
Exemple #48
0
def maximization(image_db, parameters, hidden_variable_estimates):

    exp_h = hidden_variable_estimates[0]
    tp2 = exp_h * image_db
    parameters['mu'] = tp2.sum(axis=0) / np.sum(exp_h)

    parameters['cov'] = np.zeros((image_db.shape[1], image_db.shape[1]))

    for i in range(image_db.shape[0]):
        diff = image_db[i, :] - parameters['mu']
        diff = np.array(diff)[np.newaxis]
        parameters['cov'] += exp_h[i] * np.matmul(diff.T, diff) / np.sum(exp_h)

    parameters['dof'] = fminbound(t_cost_calculation, 0, 999,
                                  args=(hidden_variable_estimates[0], hidden_variable_estimates[1]))

    return parameters
Exemple #49
0
        def _alpha_min(estimate):
            # Initialize cost evaluation to zero
            cost = 0
            for ndx in range(n_items):
                # pylint: disable=cell-var-from-loop
                scalar = the_sign[ndx, :] * estimate

                def _beta_min(beta):
                    otpt = np.exp(scalar * (thetas - beta))
                    return np.log1p(otpt).dot(counts)

                # Solves for the difficulty parameter for a given item at
                # a specific discrimination parameter
                betas[ndx] = fminbound(_beta_min, -6, 6)
                cost += _beta_min(betas[ndx])

            return cost
    def minimize_obj_given_e(_e):
        objective_fct_fixed_e = lambda d: objective_fct(_e, d)
        domain_fct_fixed_e = lambda d: domain_fct(_e, d)

        d_min = 0.
        d_max = 2 * (sqrt(_e) - _e)
        d_inside_domain = find_d_minimizing_KL_given_e(_e)
        if empirical_disagreement > 0. and False: # Hack
            d_min = optimize.brentq(domain_fct_fixed_e, 1e-9, d_inside_domain)
        if domain_fct_fixed_e(d_max) > 0.:
            d_max = optimize.brentq(domain_fct_fixed_e, d_inside_domain, d_max)
        
        if(d_max<5*10**(-5)): # Small hack
            optimal_d = 0
        else:
            optimal_d = optimize.fminbound( objective_fct_fixed_e, d_min, d_max)
        return objective_fct(_e, optimal_d)
Exemple #51
0
def main():
    # load data
    particle = loadtxt('p3 data.txt', delimiter=',')
    x, y = map(array, zip(*particle))
    N = sum(y)

    Y = y * 1. / N

    # distribution fitting
    def fit_error(L_guess):
        # calculate the distance between our guess and the data
        z = poisson.pmf(x, L_guess)
        return norm(Y - z, 1) / norm(Y, 1)  # can be any p-norm

    L_best = fminbound(fit_error, 1e-5, 6 - 1e-5)
    err_best = fit_error(L_best)
    print("Best fit: L = %f,  error = %f" % (L_best, fit_error(L_best)))

    # generate data for animation
    F = [(p, fit_error(p)) for p in linspace(1e-4, 6 - 1e-4, 137)]
    u, v = map(array, zip(*F))

    # draw basic figure
    fig = figure()

    subplot(2, 1, 2)
    plot(u, v, 'b-')
    #plot( L_best, fit_error(L_best), 'ko')
    marker_fit_error = plot(u[2], v[2], 'ro').pop()
    ylim(0, 1)
    xlabel('Poisson intensity $\lambda$')

    subplot(2, 1, 1)
    plot(x, y, 'ko')
    z = poisson.pmf(x, L_best)
    width = .3
    xw = x - width * .5
    bar_ob = bar(xw, z * N, width, color='r')
    ttl = 'N = %d observations, $\lambda_{best} = %.3f$, $Err_{best} = %1.2g$'
    title(ttl % (N, L_best, err_best))
    ylabel('Number of times observed')
    xlabel('Number of successes')
    xlim(-0.5, max(x) + .5)
    ylim(0, max(y) * 1.2)

    show(block=False)
Exemple #52
0
    def computeAffectedDepth(self, Beff):
        '''Computes affected depth.

            Args:
                :Beff: (float) width of the effective foundation area
                      (see figure 12 in page 44 of reference[2]).
        '''
        Bmin = Beff
        Bmax = 3.0 * Beff
        # define the starting guess
        start_guess = (Bmin + Bmax) / 2.0

        # define the acceptable range for Beff
        my_ranges = (Bmin, Bmax)

        # minimize
        return fminbound(self.getEquivalentPhi, Bmin, Bmax)
Exemple #53
0
    def bellman_operator(self, V, return_policy=False):
        """
        The approximate Bellman operator, which computes and returns the
        updated value function TV (or the V-greedy policy c if
        return_policy is True).

        Parameters
        ----------
        V : array_like(float)
            A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals)
        return_policy : bool, optional(default=False)
            Indicates whether to return the greed policy given V or the
            updated value function TV.  Default is TV.

        Returns
        -------
        array_like(float)
            Returns either the greed policy given V or the updated value
            function TV.

        """
        # === Simplify names, set up arrays === #
        R, Pi, beta, u, b = self.R, self.Pi, self.beta, self.u, self.b
        asset_grid, z_vals = self.asset_grid, self.z_vals
        new_V = np.empty(V.shape)
        new_c = np.empty(V.shape)
        z_idx = list(range(len(z_vals)))

        # === Linear interpolation of V along the asset grid === #
        vf = lambda a, i_z: interp(a, asset_grid, V[:, i_z])

        # === Solve r.h.s. of Bellman equation === #
        for i_a, a in enumerate(asset_grid):
            for i_z, z in enumerate(z_vals):

                def obj(c):  # objective function to be *minimized*
                    y = sum(vf(R * a + z - c, j) * Pi[i_z, j] for j in z_idx)
                    return -u(c) - beta * y

                c_star = fminbound(obj, np.min(z_vals), R * a + z + b)
                new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star)

        if return_policy:
            return new_c
        else:
            return new_V
Exemple #54
0
def min_slope(x, y):
    order = np.argsort(x)
    # print(f'xorder: {x[order]} xshape {x.shape}')
    # print(f'yoder: {y[order]} yshape {y.shape}')
    f = splrep(x[order], y[order])
    e = (np.max(x) - np.min(x)) * 1e-4

    def df2(x, f, e):
        return ((splev(x + e, f) - splev(x - e, f)) / (2 * e))**2

    bw, slope, err, iters = fminbound(df2,
                                      np.min(x),
                                      np.max(x),
                                      args=(f, e),
                                      full_output=True)
    mode = splev(bw, f)
    return bw, mode, slope
Exemple #55
0
def get_losses(lossfun, target_values, x_seq, eps_seq):
    all_losses_sq = []
    ERM_loss = []
    for x in x_seq:
        loss_values = lossfun(x, target_values)
        chi_losses = []
        ERM_loss.append(np.mean(loss_values))
        for i in range(len(eps_seq)):
            chi_loss = loss_map_chi_factory(loss_values, eps_seq[i])
            cutpt = optimize.fminbound(chi_loss,
                                       np.min(loss_values) - 1000.0,
                                       np.max(loss_values),
                                       xtol=0.01)
            chi_losses.append(chi_loss(cutpt))
        all_losses_sq.append(chi_losses)
    losses_by_qtl_sq = zip(*all_losses_sq)
    return losses_by_qtl_sq, ERM_loss
Exemple #56
0
 def _find_optimal_xmin(cls, xmins, clean_data, discrete, approx, method,
                        solver_opts):
     """Find optimal xmin by minimizing Kolmogorov-Smirnov (KS) distance."""
     if method == 'brute':
         xmin, D = cls._brute_force_minimize(xmins, clean_data, discrete,
                                             approx)
     elif method == 'bounded':
         result = optimize.fminbound(cls._compute_ks_distance,
                                     xmins.min(),
                                     xmins.max(),
                                     args=(clean_data, discrete, approx),
                                     full_output=True,
                                     **solver_opts)
         xmin, D, _, _ = result
     else:
         raise ValueError
     return xmin, D
Exemple #57
0
def linear_velocity_DSCT(r, m, cs, K, l=None):
    '''
	Computes the velocity assuming it is a pulled wave. If l is given, uses the relationship between l and v.
	'''
    if r < 1: return 0  # linear approximation cannot work

    if l is None:

        def func_to_minimize(l):
            return linear_velocity_DSCT(r, m, cs, K, l=l)

        #return func_to_minimize(fminbound(func_to_minimize, 0.01, 100))
        l = fminbound(func_to_minimize, 0.01, 100)
        return (l, func_to_minimize(l))

    else:
        return (r / K * cs + m * (1 + np.cosh(l))) / l
Exemple #58
0
    def __call__(self, A, Y, rng=None, E=None):
        try:
            from scipy.optimize import fminbound
        except ImportError:
            fminbound = None

        tstart = time.time()

        # m, n = A.shape
        # _, d = Y.shape
        m = A.shape[0]

        Amax = A.max()
        get_lambda = lambda reg: m * (reg * Amax)**2

        AA = np.dot(A.T, A)
        e, Q = np.linalg.eigh(AA)
        P = np.dot(A, Q)
        PY = np.dot(P.T, Y)
        P2 = P**2

        def looe(reg):  # leave-one-out error for given regularization
            lamb = get_lambda(reg)
            c = Y - np.dot(P, PY / (e[:, None] + lamb))
            d = (P2 / (e + lamb)).sum(axis=1)
            return ((c / (1 - d)[:, None])**2).sum()

        # find regularization that minimizes LOOE
        b0, b1 = 10.**(-3), 0.2
        if fminbound is None:  # do a simple grid search
            regs = np.logspace(np.log10(b0), np.log10(b1), 25)
            looes = [looe(reg) for reg in regs]
            reg = regs[np.argmin(looes)]
        else:  # assume unimodal function (only one local minimum) and search
            reg = fminbound(looe, b0, b1, xtol=5e-4, maxfun=50)

        lamb = get_lambda(reg)
        X = np.dot(Q, PY / (e[:, None] + lamb))

        tend = time.time()

        return self.mul_encoders(X, E), {
            'rmses': npext.rms(np.dot(A, X) - Y, axis=1),
            'time': tend - tstart,
            'reg': reg}
Exemple #59
0
def maximize_c_bound_under_constraints(empirical_disagreement, empirical_joint_error, right_hand_side, sup_joint_error=0.5):
    """
    maximize F(d,e) such that:
        kl( empirical_disagreement, empirical_joint_error || d,e ) <= right_hand_side
        2*e + d < 1 (i.e., the Gibbs risk is less than 1/2)
        d <= 2*[sqrt(e)-e] (i.e., the margin variance is positive)
        e <= sup_joint_error (default: 0.5; used by PAC-Bound 2 prime)
    """

    # Objective function
    objective_fct = lambda e,d: -1 * c_bound_third_form(e+d/2, d)

    # Domain constraint given by the KL-divergence
    domain_fct = lambda e,d: KL_trinomial(empirical_joint_error, empirical_disagreement, e, d) - right_hand_side

    # If the constraint 2*e + d < 1 crosses the domain, the bound is trivial
    if empirical_disagreement > 0.0:
        if domain_fct( (1.0-empirical_disagreement)/2, empirical_disagreement) < 0.0:
            return 1.0

    # Find max value of joint error inside the domain
    find_d_minimizing_KL_given_e = lambda e: (e-1.0)*empirical_disagreement/(empirical_joint_error-1.0)
    minimize_domain_fct_given_e = lambda e: domain_fct( e, find_d_minimizing_KL_given_e(e) )
    e_max = optimize.brentq(minimize_domain_fct_given_e, empirical_joint_error, .5)
    e_max = min( e_max, sup_joint_error)

    # Given a fixed value of joint error, maximize the objective under the domain constraints
    def minimize_obj_given_e(_e):
        objective_fct_fixed_e = lambda d: objective_fct(_e, d)
        domain_fct_fixed_e = lambda d: domain_fct(_e, d)

        d_min = 0.
        d_max = 2 * (sqrt(_e) - _e)
        d_inside_domain = find_d_minimizing_KL_given_e(_e)
        if empirical_disagreement > 0.:
            d_min = optimize.brentq(domain_fct_fixed_e, 1e-9, d_inside_domain)
        if domain_fct_fixed_e(d_max) > 0.:
            d_max = optimize.brentq(domain_fct_fixed_e, d_inside_domain, d_max)

        optimal_d = optimize.fminbound( objective_fct_fixed_e, d_min, d_max)
        return objective_fct(_e, optimal_d)

    # Solve the optimization problem!
    obj_value = optimize.fminbound( minimize_obj_given_e, empirical_joint_error, e_max, full_output=True)[1]
    return -1 * obj_value
Exemple #60
0
def quad_leja(order, dist):
    """
    Generate Leja quadrature node.

    Example:
        >>> abscisas, weights = quad_leja(3, chaospy.Normal(0, 1))
        >>> print(abscisas)
        [[-2.71725575 -1.41421279  0.          1.76349491]]
        >>> print(weights)
        [ 0.02201589  0.16287411  0.65057226  0.16453774]
    """
    assert not dist.dependent()

    if len(dist) > 1:
        if isinstance(order, int):
            out = [quad_leja(order, _) for _ in dist]
        else:
            out = [quad_leja(order[_], dist[_]) for _ in range(len(dist))]

        abscissas = [_[0][0] for _ in out]
        weights = [_[1] for _ in out]
        abscissas = chaospy.quad.combine(abscissas).T
        weights = chaospy.quad.combine(weights)
        weights = numpy.prod(weights, -1)

        return abscissas, weights

    lower, upper = dist.range()
    abscissas = [lower, dist.mom(1), upper]
    for _ in range(order):

        obj = create_objective(dist, abscissas)
        opts, vals = zip(
            *[fminbound(
                obj, abscissas[idx], abscissas[idx+1], full_output=1)[:2]
              for idx in range(len(abscissas)-1)]
        )
        index = numpy.argmin(vals)
        abscissas.insert(index+1, opts[index])

    abscissas = numpy.asfarray(abscissas).flatten()[1:-1]
    weights = create_weights(abscissas, dist)
    abscissas = abscissas.reshape(1, abscissas.size)

    return numpy.array(abscissas), numpy.array(weights)