コード例 #1
0
	def setPrevIteration(self, wArray):
		self.m_PrevIterArray = wArray
		self.m_PrevIterFn = linterp.LinInterp1D(self.m_stateVarGrid, self.m_PrevIterArray)
		# let S be the post-decision state (or the "end-of-period" state), i.e. M-d but before z is realized
		def calcEV(S):			
			# grid
			def nextV1(nextM):				
				return (0.0 if nextM < 0.0 else self.m_PrevIterFn(nextM))					
			vec_nextV = scipy.vectorize(nextV1)
			result = myfuncs.calculateEV_grid(self.m_stateVarGrid, vec_nextV, self.m_zRV, zOffset=S, leftK=0.0, rightK=self.m_PrevIterFn(self.m_stateVarGrid[-1]))			

			# integrate
			def nextV2(z):
				nextM = S + z
				return (0.0 if nextM < 0.0 else self.m_PrevIterFn(nextM))								
			#result = myfuncs.calculateEV_integrate(nextV2, self.m_zRV, a=-S)

			# monte carlo
			#loc = m_zRV.kwds['loc']
			#scale = m_zRV.kwds['scale']
			#grid2 = scipy.zeros(len(self.m_stateVarGrid) + 1)
			
			#result = myfuncs.calculateEV_montecarlo2(

			return result
			
			
		self.m_EVArray = scipy.array(map(calcEV, self.m_stateVarGrid))
		self.m_EVFn = linterp.LinInterp1D(self.m_stateVarGrid, self.m_EVArray)
コード例 #2
0
	def setPrevIteration(self, wArray):		
		self.m_PrevIterArray = wArray
		self.m_PrevIterFn = linterp.LinInterp1D(self.m_stateVarGrid, self.m_PrevIterArray)
		# let S be the post-decision state (or the "end-of-period" state), i.e. M-d but before z is realized.
		def calcEV(S):
			EV = 0.0
			for (zState, zProb) in zip(self.m_zStates, self.m_zProbs):
				nextM = S + zState;		
				EV += zProb * (0.0 if nextM < 0.0 else self.m_PrevIterFn(nextM))
			return EV	
		self.m_EVArray = scipy.array(map(calcEV, self.m_stateVarGrid))
		self.m_EVFn = linterp.LinInterp1D(self.m_stateVarGrid, self.m_EVArray)
コード例 #3
0
def test_montecarlo():
	stdnorm = scipy.stats.norm()
	x = scipy.linspace(-5, 5, 100)
	fn = linterp.LinInterp1D(x, x)
	EV1 = calculateEV_montecarlo(fn, stdnorm, nDraws=100000)
	EV2 = calculateEV_montecarlo2(x, x, stdnorm, nDraws=100000)
	print(EV1)
	print(EV2)
コード例 #4
0
def calculateEV_montecarlo2(grid, fArray, zRV, nDraws=10000):
	global g_montecarloDraws	
	if (not (zRV, nDraws) in g_montecarloDraws):
		g_montecarloDraws[(zRV, nDraws)] = scipy.sort(zRV.rvs(size=nDraws))
	draws = g_montecarloDraws[(zRV, nDraws)]		
	fn = linterp.LinInterp1D(grid, fArray)
	EV = fn.applySorted(draws) / nDraws
	return EV
コード例 #5
0
def test_optdiv1(beta=0.9, pHigh=0.75, grid=scipy.arange(21.0), useValueIter=True):
	time1 = time.time()
	localvars = {}
	
	def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):		
		global g_iterList
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		

	def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):				
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		
		
	initialVArray = grid;								# initial guess for V: a linear fn
	initialPolicyArray = grid;							# initial guess for d: pay out everything
	utilityFn = lambda x: x;							# linear utility
	zStates = [-1.0, 1.0];	
	zProbs = [1.0-pHigh, pHigh];						# income shock	
	params = OptDivParams1(utilityFn, beta, zStates, zProbs, grid);		# don't use parallel search with this, since it makes a callback to Python			
	if (useValueIter == True):		
		result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=False)
		(nIter, currentVArray, newVArray, optControls) = result
	else:
		result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False)
		(nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result
		newVArray = currentVArray
		optControls = currentPolicyArrayList
	time2 = time.time()
	nIters = localvars[0]
	print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters))
	
	print("x_0 == 0: %d" % alwaysPayAll(beta, pHigh))
	n0 = getn0(beta, pHigh)
	optd_fn = linterp.LinInterp1D(grid, optControls[0])
	print("n0: %f, d(floor(n0)): %f" % (n0, optd_fn(scipy.floor(n0))))
	# plot V
	fig = plt.figure()
	ax = fig.add_subplot(111)
	ax.plot(grid, newVArray)
	ax.set_xlabel("M")
	ax.set_ylabel("V")	
	# plot optimal d
	fig = plt.figure()	
	ax = fig.add_subplot(111)
	ax.plot(grid, optControls[0])	
	ax.axvline(scipy.floor(n0), color='gray')
	ax.set_xlabel("M")
	ax.set_ylabel("optimal d")	
	plt.show()
	return result
コード例 #6
0
def calculateEV_grid2(fGrid, fVals, pdfGrid, pdfVals, inverseFn):
	inverseList = map(inverseFn, pdfGrid)
	fFn = linterp.LinInterp1D(fGrid, fVals)
	fList = map(fFn, inverseList)
	xList = [f*p for (f,p) in zip(fList, pdfVals)]
	EV = scipy.integrate.trapz(xList, pdfGrid)
	
	fig = plt.figure()
	ax = fig.add_subplot(111)
	ax.plot(pdfGrid, xList)
	plt.title("f*p")
	
	return EV
コード例 #7
0
def test_optdiv3(beta=0.9, grid=scipy.arange(21.0), zDraws=scipy.array([-1.0]*25 + [1.0]*75), useValueIter=True):
	time1 = time.time()
	localvars = {}
	
	def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):		
		global g_iterList
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		

	def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):				
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		
		
	initialVArray = grid;								# initial guess for V: a linear fn
	initialPolicyArray = grid;							# initial guess for d: pay out everything
	params = OptDivParams3(grid, beta, zDraws);
	if (useValueIter == True):		
		result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True)
		(nIter, currentVArray, newVArray, optControls) = result
	else:
		result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False)
		(nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result
		newVArray = currentVArray
		optControls = currentPolicyArrayList
	time2 = time.time()
	nIters = localvars[0]
	print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters))
	
	optd_fn = linterp.LinInterp1D(grid, optControls[0])
	# plot V
	fig = plt.figure()
	ax = fig.add_subplot(111)
	ax.plot(grid, newVArray)
	dx = grid[1] - grid[0]
	deriv = scipy.diff(newVArray) / dx
	ax.plot(grid[:-1], deriv)
	ax.set_xlabel("M")
	ax.set_ylabel("V")		
	# plot optimal d
	fig = plt.figure()	
	ax = fig.add_subplot(111)
	ax.plot(grid, optControls[0])	
	ax.set_xlabel("M")
	ax.set_ylabel("optimal d")	
	plt.show()
	return result
コード例 #8
0
ファイル: cake.py プロジェクト: zsh1313/bellman
	def setPrevIteration(self, wArray):
		self.m_PrevIterArray = wArray
		self.m_PrevIterFn = linterp.LinInterp1D(self.m_stateVarGrid, self.m_PrevIterArray)