Esempio n. 1
0
 def test_single_state_func(self):
     """
     Just to see if the functions manage to run at all
     """
     y = self.solution[1::, 2]
     # test out whether the single state function 'ok'
     sir_obj = SquareLoss(self.theta, self.ode, self.x0, self.t[0],
                          self.t[1::], y, 'R')
     sir_obj.cost()
     sir_obj.gradient()
     sir_obj.hessian()
Esempio n. 2
0
 def test_single_state_func(self):
     """
     Just to see if the functions manage to run at all
     """
     y = self.solution[1::, 2]
     # test out whether the single state function 'ok'
     sir_obj = SquareLoss(self.theta, self.ode, self.x0, self.t[0],
                          self.t[1::], y, 'R')
     sir_obj.cost()
     sir_obj.gradient()
     sir_obj.hessian()
Esempio n. 3
0
    def test_FH_Square_vector_weight(self):
        # now the weight is a vector
        w = np.random.rand(29, 2)
        obj = SquareLoss(self.theta, self.ode, self.x0, self.t[0], self.t[1::],
                         self.solution[1::, :], ['V', 'R'], w)

        s = ((self.r * np.array(w))**2).sum()

        self.assertTrue(np.allclose(obj.cost(), s))
Esempio n. 4
0
    def test_FH_Square(self):
        # initial values
        x0 = [-1.0, 1.0]
        # params
        paramEval = [('a', 0.2), ('b', 0.2), ('c', 3.0)]
        # the time points for our observations
        t = numpy.linspace(0, 20, 30).astype('float64')
        ode = common_models.FitzHugh().setParameters(
            paramEval).setInitialValue(x0, t[0])
        # Standard.  Find the solution which we will be used as "observations later"
        solution, output = ode.integrate(t[1::], full_output=True)
        # initial guess
        theta = [0.5, 0.5, 0.5]

        #objFH = squareLoss(theta,ode,x0,t0,t,solution[1::,1],'R')
        objFH = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::, :],
                           ['V', 'R'])

        r = objFH.residual()

        # weight for each component
        w = [2.0, 3.0]

        s1 = 0
        for i in range(2):
            s1 += ((r[:, i] * w[i])**2).sum()

        objFH1 = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::, :],
                            ['V', 'R'], w)

        # now the weight is a vector
        w = numpy.random.rand(29, 2)
        objFH2 = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::, :],
                            ['V', 'R'], w)

        s2 = ((r * numpy.array(w))**2).sum()

        if abs(objFH1.cost() - s1) >= 1e-2:
            raise Exception("Failed!")

        if abs(objFH2.cost() - s2) >= 1e-2:
            raise Exception("Failed!")
Esempio n. 5
0
    def test_FH_Square_scalar_weight(self):
        # weight for each component
        w = [2.0, 3.0]

        s = 0
        for i in range(2):
            s += ((self.r[:, i] * w[i])**2).sum()

        obj = SquareLoss(self.theta, self.ode, self.x0, self.t[0], self.t[1::],
                         self.solution[1::, :], ['V', 'R'], w)

        self.assertTrue(np.allclose(obj.cost(), s))
Esempio n. 6
0
    def test_SIR_Estimate_SquareLoss(self):
        # define the model and parameters
        ode = common_models.SIR({'beta':0.5,'gamma':1.0/3.0})

        # the initial state, normalized to zero one
        x0 = [1, 1.27e-6, 0]
        # set the time sequence that we would like to observe
        t = numpy.linspace(0, 150, 100)
        # Standard.  Find the solution.
        solution = scipy.integrate.odeint(ode.ode, x0, t)

        # y = copy.copy(solution[:,1:3])
        # initial value
        theta = [0.2, 0.2]

        # test out whether the single state function 'ok'
        objSIR = SquareLoss(theta, ode, x0, t[0], t[1::],
                            solution[1::,2], 'R')
        objSIR.cost()
        objSIR.gradient()
        objSIR.hessian()

        # now we go on the real shit
        objSIR = SquareLoss(theta, ode, x0, t[0], t[1::],
                            solution[1::,1:3], ['I','R'])

        # constraints
        EPSILON = numpy.sqrt(numpy.finfo(numpy.float).eps)

        boxBounds = [(EPSILON, 5), (EPSILON, 5)]

        resQP = scipy.optimize.minimize(fun=objSIR.cost,
                                        jac=objSIR.sensitivity,
                                        x0=theta,
                                        method='SLSQP',
                                        bounds=boxBounds)

        target = numpy.array([0.5, 1.0/3.0])
        if numpy.any(abs(resQP['x']-target) >= 1e-2):
            raise Exception("Failed!")
Esempio n. 7
0
    def test_SIR_Estimate_SquareLoss(self):
        # define the model and parameters
        ode = common_models.SIR({'beta': 0.5, 'gamma': 1.0 / 3.0})

        # the initial state, normalized to zero one
        x0 = [1, 1.27e-6, 0]
        # set the time sequence that we would like to observe
        t = numpy.linspace(0, 150, 100)
        # Standard.  Find the solution.
        solution = scipy.integrate.odeint(ode.ode, x0, t)

        # y = copy.copy(solution[:,1:3])
        # initial value
        theta = [0.2, 0.2]

        # test out whether the single state function 'ok'
        objSIR = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::, 2],
                            'R')
        objSIR.cost()
        objSIR.gradient()
        objSIR.hessian()

        # now we go on the real shit
        objSIR = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::, 1:3],
                            ['I', 'R'])

        # constraints
        EPSILON = numpy.sqrt(numpy.finfo(numpy.float).eps)

        boxBounds = [(EPSILON, 5), (EPSILON, 5)]

        resQP = scipy.optimize.minimize(fun=objSIR.cost,
                                        jac=objSIR.sensitivity,
                                        x0=theta,
                                        method='SLSQP',
                                        bounds=boxBounds)

        target = numpy.array([0.5, 1.0 / 3.0])
        if numpy.any(abs(resQP['x'] - target) >= 1e-2):
            raise Exception("Failed!")
Esempio n. 8
0
    def test_FH_Square(self):
        # initial values
        x0 = [-1.0, 1.0]
        # params
        paramEval = [('a', 0.2), ('b', 0.2),('c', 3.0)]
        # the time points for our observations
        t = numpy.linspace(0, 20, 30).astype('float64')
        ode = common_models.FitzHugh().setParameters(paramEval).setInitialValue(x0, t[0])
        # Standard.  Find the solution which we will be used as "observations later"
        solution, output = ode.integrate(t[1::], full_output=True)
        # initial guess
        theta = [0.5, 0.5, 0.5]

        #objFH = squareLoss(theta,ode,x0,t0,t,solution[1::,1],'R')
        objFH = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::,:], ['V','R'])

        r = objFH.residual()

        # weight for each component
        w = [2.0, 3.0]

        s1 = 0
        for i in range(2): s1 += ((r[:,i]*w[i])**2).sum()

        objFH1 = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::,:],
                   ['V','R'], w)

        # now the weight is a vector
        w = numpy.random.rand(29, 2)
        objFH2 = SquareLoss(theta, ode, x0, t[0], t[1::], solution[1::,:],
                   ['V','R'], w)

        s2 = ((r * numpy.array(w))**2).sum()

        if abs(objFH1.cost() - s1) >= 1e-2:
            raise Exception("Failed!")
        
        if abs(objFH2.cost() - s2) >= 1e-2:
            raise Exception("Failed!")
Esempio n. 9
0
                  disp=3, full_output=True)


xhat, output = ip(objFH.cost,
                  objFH.gradient,
                  x0=theta,
                  lb=lb, ub=ub,
                  G=None, h=None,
                  A=None, b=None,
                  maxiter=200,
                  method='bar',
                  disp=3, full_output=True)


x = numpy.array(theta)
oldFx = objFH.cost(x)
g = objFH.gradient(x)
deltaX = 0.5 * numpy.linalg.solve(objFH.jtj(x),-g)
objFH.cost(x + deltaX)

step, fc, gc, fx, old_fval, new_slope = scipy.optimize.line_search(objFH.cost,
                                                                   objFH.gradient,
                                                                   numpy.array(theta),
                                                                   deltaX,
                                                                   g,
                                                                   oldFx
                                                                   )

numpy.array(x) + step * deltaX
from pygotools.optutils import lineSearch
lineFunc = lineSearch(1, x, deltaX, objFH.cost)