Beispiel #1
0
    def mr(self, retAll = False, checkBoxBounds = True):
        # returns max residual
        
        if not hasattr(self, '_mr'):
            r, fname, ind = 0, None, 0
            if self.isMultiArray: 
                r = zeros(self.x.shape[0])
            ineqs = ['lin_ineq', 'lb', 'ub'] if checkBoxBounds else ['lin_ineq']
            eqs = ['lin_eq']
            if self.p._baseClassName == 'NonLin':
                ineqs.append('c')
                eqs.append('h')
#            elif self.p.probType in ['MILP', 'MINLP']:
#                pass
                #ineqs.append('intConstraints')
            for field in ineqs:
                fv = array(getattr(self, field)())#.flatten()
                if fv.size > 0:
                    val_max = nanmax(fv, fv.ndim-1)
                    r = where(r < val_max, val_max, r) # handles nan ok
                    if not self.isMultiArray and not isnan(val_max):
                        ind_max = where(fv==val_max)[0][0]
                        if r < val_max:
                            r, ind, fname = val_max, ind_max, field
            for field in eqs:
                fv = array(getattr(self, field)())#.flatten()
                if fv.size > 0:
                    fv = abs(fv)
                    val_max = nanmax(fv, fv.ndim-1)
                    r = where(r < val_max, val_max, r) # handles nan ok
                    if not self.isMultiArray and not isnan(val_max):
                        ind_max = argmax(fv)
                        #val_max = fv[ind_max]
                        if r < val_max:
                            r, ind, fname = val_max, ind_max, field
            if not self.isMultiArray:
                self._mr, self._mrName,  self._mrInd= r, fname, ind
        if retAll:
            return asscalar(copy(self._mr)), self._mrName, asscalar(copy(self._mrInd))
        else: return asscalar(copy(self._mr)) if not self.isMultiArray else r
Beispiel #2
0
    def linePoint(self, alp, point2, ls=None):
        # returns alp * point1 + (1-alp) * point2
        # where point1 is self, alp is real number
        #assert isscalar(alp)
        p = self.p
        r = p.point(self.x * (1-alp) + point2.x * alp)
        
        #lin_eqs = self.lin_eq()*alp +  point2.lin_eq() * (1-alp)
        #print '!>>, ',  p.norm(lin_eqs), p.norm(lin_eqs - r.lin_eq())
        
        # TODO: optimize it, take ls into account!
        #if ls is not None and 
        if not (p.iter % 16):
            lin_ineq_predict = self.lin_ineq()*(1-alp) +  point2.lin_ineq() * alp
            #if 1 or p.debug: print('!>>', p.norm(lin_ineq_predict-r.lin_ineq()))
            r._lin_ineq = lin_ineq_predict
            r._lin_eq = self.lin_eq()*(1-alp) +  point2.lin_eq() * alp
        
        # don't calculate c for inside points
        if 0<alp<1:
            c1, c2 = self.c(), point2.c()
            ind1 = logical_or(c1 > 0,  isnan(c1))
            ind2 = logical_or(c2 > 0,  isnan(c2))
            
            # prev
#            ind = where(ind1 | ind2)[0]
#            
#            _c = zeros(p.nc)
#            if ind.size != 0:
#                _c[ind] = p.c(r.x, ind)

            # new, temporary walkaround for PyPy
            ind = logical_or(ind1, ind2)
            
            _c = zeros(p.nc)
            if any(ind):
                _c[ind] = p.c(r.x, where(ind)[0])
            r._c = _c
            r._nNaNs_C = isnan(_c).sum(_c.ndim-1)
        
        # TODO: mb same for h?
        
        #r._linePointDescriptor = alp # (self, alp, point2)
        return r
Beispiel #3
0
    def __decodeIterFcnArgs__(self, p, *args, **kwargs):
        """
        decode and assign x, f, maxConstr
        (and/or other fields) to p.iterValues
        """
        fArg = True

        if len(args) > 0 and isinstance(args[0], Point):
            if len(args) != 1:
                p.err(
                    'incorrect iterfcn args, if you see this contact OO developers'
                )
            point = args[0]
            p.xk, p.fk = point.x, point.f()
            p.rk, p.rtk, p.rik = point.mr(True)
            p.nNaNs = point.nNaNs()
            if p.solver._requiresBestPointDetection and (
                    p.iter == 0 or point.betterThan(p._bestPoint)):
                p._bestPoint = point
        else:
            if len(args) > 0: p.xk = args[0]
            elif 'xk' in kwargs.keys(): p.xk = kwargs['xk']
            elif not hasattr(p, 'xk'):
                p.err(
                    'iterfcn must get x value, if you see it inform oo developers'
                )
            if p._baseClassName == 'NonLin':
                C = p.c(p.xk)
                H = p.h(p.xk)
                p.nNaNs = len(where(isnan(C))[0]) + len(where(isnan(H))[0])
            if p.solver._requiresBestPointDetection:
                currPoint = p.point(p.xk)
                if p.iter == 0 or currPoint.betterThan(p._bestPoint):
                    p._bestPoint = currPoint
            if len(args) > 1: p.fk = args[1]
            elif 'fk' in kwargs.keys(): p.fk = kwargs['fk']
            else: fArg = False

            if len(args) > 2:
                #p.pWarn('executing deprecated code, inform developers')
                p.rk = args[2]
            elif 'rk' in kwargs.keys():
                #p.pWarn('executing deprecated code, inform developers')
                p.rk = kwargs['rk']
            else:
                p.rk, p.rtk, p.rik = p.getMaxResidual(p.xk, True)

        p.iterValues.r.append(p.rk)
        if p.probType != 'IP':
            # recalculations are not performed
            p.rk, p.rtk, p.rik = p.getMaxResidual(p.xk, True)
            p.iterValues.rt.append(p.rtk)
            p.iterValues.ri.append(p.rik)
        if p._baseClassName == 'NonLin': p.iterValues.nNaNs.append(p.nNaNs)

        #TODO: handle kwargs correctly! (decodeIterFcnArgs)

        #        for key in kwargs.keys():
        #            if p.debug: print 'decodeIterFcnArgs>>',  key,  kwargs[key]
        #            setattr(p, key, kwargs[key])

        p.iterValues.x.append(copy(p.xk))
        if not p.storeIterPoints and len(p.iterValues.x) > 2:
            p.iterValues.x.pop(0)

        if not fArg:
            p.Fk = p.F(p.xk)
            p.fk = copy(p.Fk)
        else:
            if asarray(p.fk).size > 1:
                if p.debug and p.iter <= 1:
                    p.warn(
                        'please fix solver iter output func, objFuncVal should be single number (use p.F)'
                    )
                p.Fk = p.objFuncMultiple2Single(asarray(p.fk))
            else:
                p.Fk = p.fk

        #if p.isObjFunValueASingleNumber: p.Fk = p.fk
        #else: p.Fk = p.objFuncMultiple2Single(fv)

        v = ravel(p.Fk)[0]
        if p.invertObjFunc: v = -v

        p.iterValues.f.append(v)

        if not isscalar(p.fk) and p.fk.size == 1:
            p.fk = asscalar(p.fk)
Beispiel #4
0
    def sum_of_all_active_constraints_gradient(self):
        if not hasattr(self, '_sum_of_all_active_constraints_gradient'):
            p = self.p
            contol = p.contol
            x = self.x
            direction = self.all_lin_gradient()
            if p.solver.__name__ == 'ralg':
                new = 1
            elif p.solver.__name__ == 'gsubg':
                new = 0
            else:
                p.err('unhandled case in Point._getDirection')
                
            if p.userProvided.c:
                th = 0.0
                #th = contol / 2.0
                C = p.c(x)
                Ind = C>th
                ind = where(Ind)[0]
                activeC = asarray(C[Ind])# asarray and Ind for PyPy compatibility
                if len(ind) > 0:
                    tmp = p.dc(x, ind)

                    if new:
                        if tmp.ndim == 1 or min(tmp.shape) == 1:
                            if hasattr(tmp, 'toarray'): 
                                tmp = tmp.toarray()#.flatten()
                            if activeC.size == prod(tmp.shape):
                                activeC = activeC.reshape(tmp.shape)
                            tmp *= (activeC-th*(1.0-1e-15))/norm(tmp)
                        else:
                            if hasattr(tmp, 'toarray'):
                                tmp = tmp.toarray()
                            tmp *= ((activeC - th*(1.0-1e-15))/sqrt((tmp**2).sum(1))).reshape(-1, 1)
                            
                    if tmp.ndim > 1:
                        tmp = tmp.sum(0)
                    direction += (tmp.A if type(tmp) != ndarray else tmp).flatten()
            

            if p.userProvided.h:
                #th = 0.0
                th = contol / 2.0
                H = p.h(x)
                Ind1 = H>th
                ind1 = where(Ind1)[0]
                H1 = asarray(H[Ind1])# asarray and Ind1 for PyPy compatibility
                if len(ind1) > 0:
                    tmp = p.dh(x, ind1)
                    
                    if new:
                        if tmp.ndim == 1 or min(tmp.shape) == 1:
                            if hasattr(tmp, 'toarray'): 
                                tmp = tmp.toarray()#.flatten()
                            if H1.size == prod(tmp.shape):
                                H1 = H1.reshape(tmp.shape)
                            tmp *= (H1-th*(1.0-1e-15))/norm(tmp)
                        else:
                            if hasattr(tmp, 'toarray'):
                                tmp = tmp.toarray()
                            tmp *= ((H1 - th*(1.0-1e-15))/sqrt((tmp**2).sum(1))).reshape(-1, 1)
                    
                    if tmp.ndim > 1: 
                        tmp = tmp.sum(0)
                    direction += (tmp.A if isspmatrix(tmp) or hasattr(tmp, 'toarray') else tmp).flatten()
                ind2 = where(H<-th)[0]
                H2 = H[ind2]
                if len(ind2) > 0:
                    tmp = p.dh(x, ind2)
                    if new:
                        if tmp.ndim == 1 or min(tmp.shape) == 1:
                            if hasattr(tmp, 'toarray'): 
                                tmp = tmp.toarray()#.flatten()
                            if H2.size == prod(tmp.shape):
                                H2 = H2.reshape(tmp.shape)                                    
                            tmp *= (-H2-th*(1.0-1e-15))/norm(tmp)
                        else:
                            if hasattr(tmp, 'toarray'):
                                tmp = tmp.toarray()
                            tmp *= ((-H2 - th*(1.0-1e-15))/sqrt((tmp**2).sum(1))).reshape(-1, 1)
                    
                    if tmp.ndim > 1: 
                        tmp = tmp.sum(0)
                    direction -= (tmp.A if type(tmp) != ndarray else tmp).flatten()
            self._sum_of_all_active_constraints_gradient = direction
        return Copy(self._sum_of_all_active_constraints_gradient)
Beispiel #5
0
    def all_lin_gradient(self):
        if not hasattr(self, '_all_lin_gradient'):
            p = self.p
            n = p.n
            d = zeros(n)


            lb, ub = self.lb(), self.ub()
            lin_ineq = self.lin_ineq()
            lin_eq = self.lin_eq()
            ind_lb, ind_ub = lb > 0.0, ub > 0.0
            ind_lin_ineq = lin_ineq > 0.0
            ind_lin_eq = abs(lin_eq) != 0.0
            

            USE_SQUARES = 1
            if USE_SQUARES:
                if any(ind_lb):
                    d[ind_lb] -= lb[ind_lb]# d/dx((x-lb)^2) for violated constraints
                if any(ind_ub):
                    d[ind_ub] += ub[ind_ub]# d/dx((x-ub)^2) for violated constraints
                if any(ind_lin_ineq):
                    # d/dx((Ax-b)^2)
                    Ind_lin_ineq = where(ind_lin_ineq)[0]
                    b = p.b[Ind_lin_ineq]
                    if hasattr(p, '_A'):
                        a = p._A[Ind_lin_ineq] 
                        tmp = a._mul_sparse_matrix(csr_matrix((self.x, (arange(n), zeros(n))), shape=(n, 1))).toarray().flatten() - b 
                        
                        #tmp = a._mul_sparse_matrix(csr_matrix((self.x, reshape(p.n, 1))).toarray().flatten() - b 
                        d += a.T._mul_sparse_matrix(tmp.reshape(tmp.size, 1)).A.flatten()
                        #d += dot(a.T, dot(a, self.x)  - b) 
                    else:
                        a = p.A[ind_lin_ineq] 
                        d += dot(a.T, dot(a, self.x)  - b) # d/dx((Ax-b)^2)
                if any(ind_lin_eq):
                    #Ind_lin_eq = where(ind_lin_eq)[0]
                    if isspmatrix(p.Aeq):
                        p.err('this solver is not ajusted to handle sparse Aeq matrices yet')
                    #self.p.err('nonzero threshold is not ajusted with lin eq yet')
                    aeq = p.Aeq#[Ind_lin_eq]
                    beq = p.beq#[Ind_lin_eq]
                    d += dot(aeq.T, dot(aeq, self.x)  - beq) # d/dx((Aeq x - beq)^2)
                    

#                self._all_lin_gradient = 2.0 * d
                self._all_lin_gradient = 2.0 * d / p.contol

            else:
                assert 0
                if any(ind_lb):
                    d[ind_lb] -= 1# d/dx(lb-x) for violated constraints
                if any(ind_ub):
                    d[ind_ub] += 1# d/dx(x-ub) for violated constraints
                if any(ind_lin_ineq):
                    # d/dx(Ax-b)
                    b = p.b[ind_lin_ineq]
                    if hasattr(p, '_A'):
                        d += (p._A[ind_lin_ineq]).sum(0).A.flatten()
                    else:
                        d += (p.A[ind_lin_ineq]).sum(0).flatten()
                if any(ind_lin_eq):
                    # currently for ralg it should be handled in dilation matrix
                    p.err('not implemented yet, if you see it inform OpenOpt developers')
#                    beq = p.beq[ind_lin_eq]
#                    if hasattr(p, '_Aeq'):
#                        tmp = p._Aeq[ind_lin_eq]
#                        ind_change = where()
#                        tmp
#                        d += ().sum(0).A.flatten()
#                    else:
#                        #d += (p.Aeq[ind_lin_eq]).sum(0).flatten()

#                    aeq = p.Aeq[ind_lin_eq]
#                    beq = p.beq[ind_lin_eq]
#                    d += dot(aeq.T, dot(aeq, self.x)  - beq) # 0.5*d/dx((Aeq x - beq)^2)
                self._all_lin_gradient = d
        return copy(self._all_lin_gradient)