def test_SIR(self): ''' Test the SIR model from the set of pre-defined models in common_models ''' # We we wish to test another (simpler) model ode = common_models.SIR() # define the parameters paramEval = [('beta', 0.5), ('gamma', 1.0 / 3.0)] ode.setParameters(paramEval) # the initial state, normalized to zero one initialState = [1, 1.27e-6, 0] # evaluating the ode ode.ode(initialState, 1) ode.Jacobian(initialState, 1) ode.Grad(initialState, 1) # b.sensitivity(sensitivity, t, state) ode.sensitivity(numpy.zeros(6), 1, initialState) ode.isOdeLinear() # set the time sequence that we would like to observe t = numpy.linspace(1, 150, 100) # now find the solution soltion, output = scipy.integrate.odeint(ode.ode, initialState, t, full_output=True) if output['message'] != 'Integration successful.': raise Exception("Failed integration")
def test_SIR_Estimate_NormalLoss(self): # define the model and parameters ode = common_models.SIR({'beta': 0.5, 'gamma': 1.0 / 3.0}) # the initial state, normalized to zero one x0 = [1, 1.27e-6, 0] # set the time sequence that we would like to observe t = numpy.linspace(0, 150, 100) # Standard. Find the solution. solution = scipy.integrate.odeint(ode.ode, x0, t) y = copy.copy(solution[:, 1:3]) # initial value theta = [0.2, 0.2] objSIR = NormalLoss(theta, ode, x0, t[0], t[1::], y[1::, :], ['I', 'R']) # constraints EPSILON = numpy.sqrt(numpy.finfo(numpy.float).eps) boxBounds = [(EPSILON, 5), (EPSILON, 5)] resQP = scipy.optimize.minimize(fun=objSIR.cost, jac=objSIR.sensitivity, x0=theta, method='SLSQP', bounds=boxBounds) target = numpy.array([0.5, 1.0 / 3.0]) if numpy.any(abs(resQP['x'] - target) >= 1e-2): raise Exception("Failed!")
def test_HessianJacobian(self): ''' Analytic Jacobian for the forward foward sensitivity equations i.e. the Hessian of the objective function against the forward differencing numeric Jacobian ''' # initial time t0 = 0 # the initial state, normalized to zero one x0 = [1, 1.27e-6, 0] # params paramEval = [('beta', 0.5), ('gamma', 1.0 / 3.0)] ode = common_models.SIR(paramEval).setInitialValue(x0, t0) d = ode.getNumState() p = ode.getNumParam() ff0 = numpy.zeros(d * p * p) s0 = numpy.zeros(d * p) x0 = numpy.array(x0) ffParam = numpy.append(numpy.append(x0, s0), ff0) # some small value h = numpy.sqrt(numpy.finfo(numpy.float).eps) # time frame t = numpy.linspace(0, 150, 100) # our integration solutionHessian, outputHessian = scipy.integrate.odeint( ode.odeAndForwardforward, ffParam, t, full_output=True) numFF = len(ffParam) J = numpy.zeros((numFF, numFF)) # define our target index = 50 # random.randomint(0,150) # get the info ff0 = solutionHessian[index, :] # evaluate at target point J0 = ode.odeAndForwardforward(ff0, t[index]) # the Analytical solution is JAnalytic = ode.odeAndForwardforwardJacobian(ff0, t[index]) # now we go and find the finite difference Jacobian for i in range(0, numFF): for j in range(0, numFF): ffTemp = copy.deepcopy(ff0) #ffTemp[i] += h ffTemp[j] += h J[i, j] = (ode.odeAndForwardforward(ffTemp, t[index])[i] - J0[i]) / h print(J - JAnalytic)
def test_simulateParam2(self): ''' Stochastic ode under the interpretation that the parameters follow some sort of distribution. In this case, a function handle which has the same name as R ''' t0 = 0 # the initial state, normalized to zero one x0 = [1, 1.27e-6, 0] # set the time sequence that we would like to observe t = numpy.linspace(0, 150, 100) # Standard. Find the solution. ode = common_models.SIR() ode.setParameters([0.5, 1.0 / 3.0]) ode.setInitialValue(x0, t0) solutionReference = ode.integrate(t[1::], full_output=False) # now we need to define our ode explicitly stateList = ['S', 'I', 'R'] paramList = ['beta', 'gamma'] transitionList = [ Transition(origState='S', destState='I', equation='beta*S*I', transitionType=TransitionType.T), Transition(origState='I', destState='R', equation='gamma*I', transitionType=TransitionType.T) ] # our stochastic version odeS = SimulateOdeModel(stateList, paramList, transitionList=transitionList) # define our parameters in terms of two gamma distributions # where the expected values are the same as before [0.5,1.0/3.0] d = dict() d['beta'] = (rgamma, {'shape': 100.0, 'rate': 200.0}) d['gamma'] = (rgamma, (100.0, 300.0)) odeS.setParameters(d).setInitialValue(x0, t0) # now we generate the solutions solutionDiff = odeS.simulateParam(t[1::], 1000) - solutionReference # test :) if numpy.any(abs(solutionDiff) >= 0.2): raise Exception("Possible problem with simulating the parameters")
def test_SensJacobian(self): ''' Analytic Jacobian for the forward sensitivity equations against the forward differencing numeric Jacobian ''' # initial time t0 = 0 # the initial state, normalized to zero one x0 = [1, 1.27e-6, 0] # params paramEval = [('beta', 0.5), ('gamma', 1.0 / 3.0)] ode = common_models.SIR(paramEval).setInitialValue(x0, t0) d = ode.getNumState() p = ode.getNumParam() s0 = numpy.zeros(d * p) x0 = numpy.array(x0) ffParam = numpy.append(x0, s0) t = numpy.linspace(0, 150, 100) # integrate without using the analytical Jacobian solutionSens, outputSens = scipy.integrate.odeint( ode.odeAndSensitivity, ffParam, t, full_output=True) # the Jacobian of the ode itself h = numpy.sqrt(numpy.finfo(numpy.float).eps) index = 50 # random.randomint(0,150) ff0 = solutionSens[index, :] J0 = ode.odeAndSensitivity(ff0, t[index]) J = numpy.zeros((d * (p + 1), d * (p + 1))) for i in range(0, d * (p + 1)): for j in range(0, d * (p + 1)): ffTemp = copy.deepcopy(ff0) ffTemp[j] += h J[i, j] = (ode.odeAndSensitivity(ffTemp, t[index])[i] - J0[i]) / h JAnalytic = ode.odeAndSensitivityJacobian(ff0, t[index]) if numpy.any(abs(J - JAnalytic) >= 1e-4): raise Exception("Test Failed")