if __name__ == '__main__':
   random.seed(1)
   height = 4
   width = 4
   initPos = (1,1)
   concreteInitPos = (initPos,(0,0),(1,2),(2,1),(3,3))
   iterations = 10
   # partially observable model
   pomdp = robotPomdp(height, width)
   # initial states
   init = set(map(lambda s: pomdp.inv_states[s], filter(lambda s: s[0] == initPos, pomdp.states.values())))
   #init = filter(lambda s: s[0] == initPos, pomdp.states.values())
   # initial belief
   prior = Distribution(set(pomdp.states.keys()),\
      lambda el,dom: Distribution.restrictedUniformPdf(el,dom,init))
   #prior = Distribution(set(pomdp.states.keys()), Distribution.uniformPdf)
   # prior belief updated to the first observation
   #prior = beliefUpdateObservation(pomdp,prior,pomdp.inv_observations[(False,False,False)])
   
   # generate explicit MDP
   print 'emdp construction'
   emdp = ExplicitMarkovDecisionProcess(pomdp,prior)
   print 'end (emdp empty)'

   # observation classes
   collision = filter(lambda o: o[0], pomdp.observations.values())
   notCollision = filter(lambda o: not o[0], pomdp.observations.values())
   sensing = filter(lambda o: reduce(lambda a,b: a or b, o), pomdp.observations.values())
   anyObs = filter(lambda o: True, pomdp.observations.values())
   sensAndNotCollide = [o for o in pomdp.observations.values() if (o in sensing and o in notCollision)]
		for (s1,o1),a in it.product(S,pomdp.actions.values()):
			for (s2,o2) in S:
				if pomdp.inv_states[s2] in pomdp.transitionFunction[(pomdp.inv_states[s1],pomdp.inv_actions[a])] and pomdp.inv_observations[o2] in pomdp.observationFunction[pomdp.inv_states[s2]]:
					if ((s1,o1),a) not in T:
						T[((s1,o1),a)] = dict()
					T[((s1,o1),a)][(s2,o2)] = pomdp.transitionFunction[(pomdp.inv_states[s1],pomdp.inv_actions[a])][pomdp.inv_states[s2]] * pomdp.observationFunction[pomdp.inv_states[s2]][pomdp.inv_observations[o2]]
		print 'S:',S,'\nA:',pomdp.actions.values(),'\nT:',T

		MarkovDecisionProcess.__init__(self,S,pomdp.actions.values(),T)

# >>> main test

if __name__ == '__main__':
	pomdp = cs.tigerPomdp()
	print pomdp
	prior = Distribution(set(pomdp.states.keys()),lambda el,dom: Distribution.restrictedUniformPdf(el,dom,{pomdp.inv_states['tl'],pomdp.inv_states['tr']}))
	# generate explicit MDP
	emdp = ExplicitMarkovDecisionProcess(pomdp,prior)
	print emdp
	# export to DOT
	e2d.export2dot(emdp,'emdp','emdp.dot', [], [], [],[])

# >>> authorship information

__author__ = "Marco Tinacci"
__copyright__ = "Copyright 2016"
__credits__ = ["Marco Tinacci"]
__license__ = "ASF"
__version__ = "2.0"
__maintainer__ = "Marco Tinacci"
__email__ = "*****@*****.**"