def VOI(x,a=aVec,grad=False): bVec=bfunc(x) a,b,keep=AffineBreakPointsPrep(a,bVec) keep1,c=AffineBreakPoints(a,b) keep1=keep1.astype(np.int64) M=len(keep1) keep2=keep[keep1] return hvoi(b,c,keep1)+a_0(x)
def VOI(x,a2=aVec,grad=False): lsum=np.sum(x) newIndexes=[(i,j) for (i,j) in possiblePoints if np.sum(j)==totalOpenings-lsum] bVec=bfunc(x,[j for (i,j) in newIndexes]) bVec=bVec+np.sqrt(sigma_0(x,x)) a2=np.array([a2[i] for (i,j) in newIndexes]) a2=a2+a_0(x) a,b,keep=AffineBreakPointsPrep(a2,bVec) keep1,c=AffineBreakPoints(a,b) keep1=keep1.astype(np.int64) M=len(keep1) keep2=keep[keep1] return hvoi(b,c,keep1)+a_0(x)
def VOIfunc(self, n, pointNew, L, data, kern, temp1, temp2, grad, a, onlyGrad=False): n1 = self.n1 tempN = n + self._numberTraining b, temp5, inner, tempB = self.aANDb(n, self._points, pointNew, L, data, kern, temp1, temp2) a, b, keep = AffineBreakPointsPrep(a, b) keep1, c = AffineBreakPoints(a, b) keep1 = keep1.astype(np.int64) M = len(keep1) keep2 = keep[keep1] if grad: B2temp = np.zeros((M, tempN)) inv1temp = np.zeros((M, tempN)) for j in xrange(M): inv1temp[j, :] = temp2[keep2[j], :] if onlyGrad: return self.evalVOI( n, pointNew, a, b, c, keep, keep1, M, L, data.Xhist, kern, tempB, temp5, inner, inv1temp, grad, onlyGrad ) if grad == False: return self.evalVOI(n, pointNew, a, b, c, keep, keep1, M, L, data.Xhist, kern, tempB, grad=False) return self.evalVOI( n, pointNew, a, b, c, keep, keep1, M, L, data.Xhist, kern, tempB, temp5, inner, inv1temp, grad )
def VOIfunc(self, n, pointNew, grad, L, temp2, a, scratch, kern, XW, B, onlyGradient=False): """ Output: Evaluates the VOI and it can compute its derivative. It evaluates the VOI, when grad and onlyGradient are False; it evaluates the VOI and computes its derivative when grad is True and onlyGradient is False, and computes only its gradient when gradient and onlyGradient are both True. Args: -n: Iteration of the algorithm. -pointNew: The VOI will be evaluated at this point. -grad: True if we want to compute the gradient; False otherwise. -L: Cholesky decomposition of the matrix A, where A is the covariance matrix of the past obsevations (x,w). -temp2: temp2=inv(L)*B.T, where B is a matrix such that B(i,j) is \int\Sigma_{0}(x_{i},w,x_{j},w_{j})dp(w) where points x_{p} is a point of the discretization of the space of x; and (x_{j},w_{j}) is a past observation. -a: Vector of the means of the GP on g(x)=E(f(x,w,z)). The means are evaluated on the discretization of the space of x. -scratch: Matrix where scratch[i,:] is the solution of the linear system Ly=B[j,:].transpose() (See above for the definition of B and L) -kern: Kernel. -XW: Past observations. -B: Computes B(x,XW)=\int\Sigma_{0}(x,w,XW[0:n1],XW[n1:n1+n2])dp(w). Its arguments are: -x: Vector of points where B is evaluated -XW: Point (x,w) -n1: Dimension of x -n2: Dimension of w -onlyGradient: True if we only want to compute the gradient; False otherwise. """ n1 = self.n1 b, gamma, BN, temp1, aux4 = self.aANDb( n, self._points, pointNew[0, 0:n1], pointNew[0, n1 : n1 + self.n2], L, temp2=temp2, past=XW, kernel=kern, B=B, ) a, b, keep = AffineBreakPointsPrep(a, b) keep1, c = AffineBreakPoints(a, b) keep1 = keep1.astype(np.int64) M = len(keep1) nTraining = self._numberTraining tempN = nTraining + n keep2 = keep[keep1] if grad: scratch1 = np.zeros((M, tempN)) for j in xrange(M): scratch1[j, :] = scratch[keep2[j], :] if onlyGradient: return self.evalVOI( n, pointNew, a, b, c, keep, keep1, M, gamma, BN, L, scratch=scratch1, inv=temp1, aux4=aux4, grad=True, onlyGradient=onlyGradient, kern=kern, XW=XW, ) if grad == False: return self.evalVOI( n, pointNew, a, b, c, keep, keep1, M, gamma, BN, L, aux4=aux4, inv=temp1, kern=kern, XW=XW ) return self.evalVOI( n, pointNew, a, b, c, keep, keep1, M, gamma, BN, L, aux4=aux4, inv=temp1, scratch=scratch1, grad=True, kern=kern, XW=XW, )