def pIteration(world, userMap, maxX, maxY, discount=0.99, MAX_ITERATIONS=100): gen = BasicGridWorld(userMap, maxX, maxY) domain = gen.generateDomain() initialState = gen.getExampleState(domain); rf = BasicRewardFunction(maxX, maxY, userMap) tf = BasicTerminalFunction(maxX, maxY) env = SimulatedEnvironment(domain, rf, tf, initialState) visualizeInitialGridWorld(domain, gen, env) hashingFactory = SimpleHashableStateFactory() timing = defaultdict(list) rewards = defaultdict(list) steps = defaultdict(list) convergence = defaultdict(list) policy_converged = defaultdict(list) last_policy = defaultdict(list) allStates = getAllStates(domain, rf, tf, initialState) print("*** {} Policy Iteration Analysis".format(world)) MAX_ITERATIONS = MAX_ITERATIONS iterations = range(1, MAX_ITERATIONS + 1) pi = PolicyIteration(domain,rf,tf,discount,hashingFactory,-1,1,1); pi.setDebugCode(0) for nIter in iterations: startTime = clock() #pi = PolicyIteration(domain,rf,tf,discount,hashingFactory,-1,1, nIter); #pi.setDebugCode(0) # run planning from our initial state p = pi.planFromState(initialState); endTime = clock() timing['Policy'].append((endTime-startTime)*1000) convergence['Policy'].append(pi.lastPIDelta) # evaluate the policy with one roll out visualize the trajectory runEvals(initialState, p, rewards['Policy'], steps['Policy'], rf, tf, evalTrials=1) if nIter == 1 or nIter == 50: simpleValueFunctionVis(pi, p, initialState, domain, hashingFactory, "Policy Iteration{}".format(nIter)) policy = pi.getComputedPolicy() allStates = pi.getAllStates() current_policy = [[(action.ga, action.pSelection) for action in policy.getActionDistributionForState(state)] for state in allStates] policy_converged['Policy'].append(current_policy == last_policy) last_policy = current_policy simpleValueFunctionVis(pi, p, initialState, domain, hashingFactory, "Policy Iteration{}".format(nIter)) dumpPolicyMap(MapPrinter.printPolicyMap(allStates, p, gen.getMap()), world + ' Policy Iteration Policy Map.pkl') dumpCSVp(iterations, timing['Policy'], rewards['Policy'], steps['Policy'],convergence['Policy'], world, 'Policy', policy_converged['Policy'])
simpleValueFunctionVis(vi, p, initialState, domain, hashingFactory, "Value Iteration {}".format(nIter)) dumpCSV(nIter, timing['Value'][1:], rewards['Value'], steps['Value'], convergence['Value'], world, 'Value') pi = PolicyIteration(domain, rf, tf, discount, hashingFactory, 1e-3, 10, 1) pi.toggleUseCachedTransitionDynamics(False) print "//{} Policy Iteration Analysis//".format(world) flag = True timing['Policy'].append(0) for nIter in iterations: startTime = clock() p = pi.planFromState(initialState) #timing['Policy'].append((clock()-startTime) * 1000) timing['Policy'].append(timing['Policy'][-1] + clock() - startTime) policy = pi.getComputedPolicy() current_policy = { state: policy.getAction(state).toString() for state in allStates } convergence['Policy2'].append(pi.lastPIDelta) if nIter == 1: convergence['Policy'].append(999) else: convergence['Policy'].append( comparePolicies(last_policy, current_policy)) last_policy = current_policy runEvals(initialState, p, rewards['Policy'], steps['Policy']) #if (nIter == 1): #simpleValueFunctionVis(pi, p, initialState, domain, hashingFactory, "Policy Iteration{}".format(nIter)) if (convergence['Policy2'][-1] < 1e-6) and flag: