Example #1
0
    def addAgent(self, agent, skills=None):
        newAgent = copy.deepcopy(agent)
        if skills:
            newAgent.skillLevels['water'] = skills['water']
            newAgent.skillLevels['grass'] = skills['grass']
            newAgent.skillLevels['forest'] = skills['forest']
            newAgent.skillLevels['mountain'] = skills['mountain']
        else:
            newAgent.skillLevels['water'] = random.random() + .5
            newAgent.skillLevels['grass'] = random.random() + .5
            newAgent.skillLevels['forest'] = random.random() + .5
            newAgent.skillLevels['mountain'] = random.random() + .5

        if newAgent.type is "adp":
            self.adpAgents.append(newAgent)
            self.adpAgentStates.append(State.state())
            agent.setIndex(self.adpAgentIndex)
            newAgent.setIndex(self.adpAgentIndex)
            self.adpAgentIndex += 1

        elif newAgent.type is "td":
            self.tdAgents.append(newAgent)
            self.tdAgentStates.append(State.state())
            agent.setIndex(self.tdAgentIndex)
            newAgent.setIndex(self.tdAgentIndex)
            self.tdAgentIndex += 1

        else:
            self.randomAgents.append(newAgent)
            self.randomAgentStates.append(State.state())
            agent.setIndex(self.randomAgentIndex)
            newAgent.setIndex(self.randomAgentIndex)
            self.randomAgentIndex += 1
Example #2
0
	def addAgent(self, agent, skills=None):
		newAgent = copy.deepcopy(agent)
		if skills:
			newAgent.skillLevels['water'   ] = skills['water'   ]
			newAgent.skillLevels['grass'   ] = skills['grass'   ]
			newAgent.skillLevels['forest'  ] = skills['forest'  ]
			newAgent.skillLevels['mountain'] = skills['mountain']
		else:
			newAgent.skillLevels['water'] = random.random() + .5
			newAgent.skillLevels['grass'] = random.random() + .5
			newAgent.skillLevels['forest'] = random.random() + .5
			newAgent.skillLevels['mountain'] = random.random() + .5
		
		if newAgent.type is "adp":
			self.adpAgents.append(newAgent)
			self.adpAgentStates.append(State.state())
			agent.setIndex(self.adpAgentIndex)
			newAgent.setIndex(self.adpAgentIndex)
			self.adpAgentIndex += 1
		
		elif newAgent.type is "td":
			self.tdAgents.append(newAgent)
			self.tdAgentStates.append(State.state())
			agent.setIndex(self.tdAgentIndex)
			newAgent.setIndex(self.tdAgentIndex)
			self.tdAgentIndex += 1
		
		else:
			self.randomAgents.append(newAgent)
			self.randomAgentStates.append(State.state())
			agent.setIndex(self.randomAgentIndex)
			newAgent.setIndex(self.randomAgentIndex)
			self.randomAgentIndex += 1
Example #3
0
 def __init__(self):
     self.terrains = [
         Terrain.terrain(),
         Terrain.terrain(),
         Terrain.terrain()
     ]
     self.livingReward = -1
     self.discount = .95
     self.noise = 0
     self.startState = State.state()
     self.transitionalStates = [
         State.state((9, 0), 0),
         State.state((9, 0), 1)
     ]
     self.terminalState = State.state((9, 0), 2)
     self.adpAgents = []
     self.adpAgentIndex = 0
     self.adpAgentStates = []
     self.tdAgents = []
     self.tdAgentIndex = 0
     self.tdAgentStates = []
     self.randomAgents = []
     self.randomAgentIndex = 0
     self.randomAgentStates = []
     self.transitionalReward = 1000
     self.terminalReward = 2000
Example #4
0
	def moveAgent(self, agent, state, action):
		newAgent = self.getWorldAgent(agent)
		x, y = state.getPosition()
		terrainElement = repr(self.terrains[state.getWorld()].terrainWorld[x][y])
		chanceToFall = None
		chanceToSlideDown = None
		chanceToSlideLeft = None
		if state in self.transitionalStates or state == self.terminalState:
			self.setAgentState(newAgent, self.generateNextStates(state, action))
		else:
			if terrainElement == 'grass':
				chanceToFall = abs(newAgent.skillLevels['grass'] - 1) / 4
			elif terrainElement == 'water':
				chanceToFall = abs(newAgent.skillLevels['water'] - 1) / 4
			elif terrainElement == 'forest':
				chanceToFall = abs(newAgent.skillLevels['forest'] - 1) / 4
			else:
				chanceToFall = abs(newAgent.skillLevels['mountain'] - 1) / 2
			x, y = state.getPosition()
			chanceToSlideDown = 0.1 - ((0.1 / 10) * (abs(y -  0)))
			chanceToSlideLeft = 0.1 - ((0.1 / 10) * (abs(x - 9)))
			if random.random() <= chanceToSlideDown:
				self.setAgentState(newAgent, State.state((x, min([9, y + 1])), state.getWorld()))
			elif random.random() <= chanceToSlideLeft:
				self.setAgentState(newAgent, State.state((max([x - 1, 0]), y), state.getWorld()))
			elif random.random() <= chanceToFall:
				self.setAgentState(newAgent, State.state((max([x - 1, 0]), min([9, y + 1])), state.getWorld()))
			else:
				self.setAgentState(newAgent, self.generateNextStates(state, action))
Example #5
0
    def getTransitionStatesAndProbs(self, state, action):
        if action not in self.getPossibleActions(state):
            raise "Illegal action!"

        x, y = state.getPosition()
        t = state.terrain

        if action == 'finish':
            return [(state, 1)]

        # Store mapping from state to likelihood
        possibles = defaultdict(lambda:0)

        chanceToSlideLeft = 0.1 - (0.01 * (abs(x - 9)))
        if x != 9:
            possibles[State.state((x+1,y),t)]   += chanceToSlideLeft
        else:
            possibles[state]                    += chanceToSlideLeft


        chanceToSlideDown = 0.1 - (0.01 * (abs(y - 0)))
        if y != 0:
            possibles[State.state((x,y-1),t)]   += chanceToSlideDown
        else:
            possibles[state]                    += chanceToSlideDown


        terrainElement = state.getTerrainType()
        if terrainElement == 'mountain':
            chanceToFall = abs(self.skills[terrainElement] - 1) / 2
        else:
            chanceToFall = abs(self.skills[terrainElement] - 1) / 2

        if x != 9 and y != 0:
            possibles[State.state((x+1,y-1),t)] += chanceToFall
        elif x != 9:
            possibles[State.state((x+1,y  ),t)] += chanceToFall
        elif y != 0:
            possibles[State.state((x  ,y-1),t)] += chanceToFall
        elif x == 9 and y == 0:
            possibles[State.state((x  ,y  ),t)] += chanceToFall
        else:
            raise 'didnt account for this'


        if action == 'north':
            newState = State.state((x  ,y-1),t)
        if action == 'east':
            newState = State.state((x+1,y  ),t)
        if action == 'west':
            newState = State.state((x-1,y  ),t)
        if action == 'south':
            newState = State.state((x  ,y+1),t)
        possibles[newState] += 1 - (chanceToFall + chanceToSlideLeft + chanceToSlideDown)

        # Probabilities must sum to 1
        assert abs(sum(possibles.values()) - 1) < .001

        return possibles.items()
Example #6
0
def one_or_more():
    """One or more('+') function
  Connect initial states edge1 to operands initial state, this becomes new initial state of nfa.
  Original nfa accept state edge1 to new accept state, and edge2 two to initial state.
  This way nfa accepts one or more .
  """
    nfa1 = nfastack.pop()
    initial, accept = state(), state()
    initial.edge1 = nfa1.initial
    nfa1.accept.edge1, nfa1.accept.edge2 = accept, nfa1.initial
    nfastack.append(nfa(initial, accept))
def make_state_tree(dictionary):
    if not dictionary:
        return state()
    s = state()
    s.final = '' in dictionary
    for letter in alphabet:
        sub_dict = filter(lambda word: word.startswith(letter), dictionary)
        sub_dict = set(map(lambda word: word[1:], sub_dict))
        if sub_dict:  # delete this validation for the sake of more saturated state machine
            s.gotos[letter] = make_state_tree(sub_dict)
    return s
Example #8
0
def run(numRuns, log, sol=solSettings, name='', progConf=None):
    for i in xrange(numRuns):
        A = []
        evals = 0
        mu = 50
        lamb = 20
        k = 15
        survive = 20
        last = [solution.solution(sol) for j in xrange(mu)]

        bestLog = state.state()
        while evals < bbsaSettings['maxEvals']:
            children = []
            for x in xrange(lamb):
                parents = kTourn([last], {
                    'count': {
                        'value': 2
                    },
                    'k': {
                        'value': k
                    }
                })
                new = mutate([uniRecomb([parents], {'num': {
                    'value': 1
                }})], {'rate': {
                    'value': .05
                }})
                children.extend(evaluate([new], {'state': bestLog}))
                evals += len(new)
            last = trunc([children + last], {
                'count': {
                    'value': mu
                },
                'k': {
                    'value': survive
                }
            })

            st = state.state()
            st.last = last
            st.curEval = evals
            log.nextIter(st)
        for ind in last:
            ind.evaluate()
        st = state.state()
        st.last = last
        st.curEval = evals
        log.nextIter(st)
        print i
        log.nextRun()
        bestLog.logBestSoFar(i, name, progConf)
        bestLog.reset()
    log.nextProbConf()
    return log
Example #9
0
def kleene():
    """Pop operands nfa from stack.
  Connect initial edge1 to operands initial state, edge2 to accept state(representing empty input).
  Loop back nfa1 accept state to its initial state(acceptin any number of elements)
  Push on nfa stack
  """
    nfa1 = nfastack.pop()
    initial, accept = state(), state()
    initial.edge1, initial.edge2 = nfa1.initial, accept
    nfa1.accept.edge1, nfa1.accept.edge2 = nfa1.initial, accept
    nfastack.append(nfa(initial, accept))
Example #10
0
def zero_or_one():
    """Zero or more('?') function
  Pop operand from stack
  Connect new initial states edge1 to operands initial state and edge2 to accept state(zero or one) 
  Popped operands accept state connected to new accept state
  """
    nfa1 = nfastack.pop()
    initial, accept = state(), state()
    initial.edge1, initial.edge2 = nfa1.initial, accept
    nfa1.accept.edge1 = accept
    nfastack.append(nfa(initial, accept))
Example #11
0
    def push_stack(self, char):
        s0=state.state(self.state_id)
        self.state_id+=1

        s1 = state.state(self.state_id)
        self.state_id += 1

        s0.add_transition(s1, char)
        nfa = []
        nfa.append(s0)
        nfa.append(s1)
        self.nfastack.append(nfa)
Example #12
0
def start_anchor():
    """String start anchor('^') function.
  Initial state to nfa1 initial.
  Nfa1 accept edge1 to accept, nfa1 accept edge2 to itself.
  Because nfa accept state poits to itself it causes RecuresionError, this happens when string is matcged against reg ex.
  Dealing with error in match module, not the cleanest or smartest way but it works.
  """
    nfa1 = nfastack.pop()
    initial, accept = state(), state()
    initial.edge1 = nfa1.initial
    nfa1.accept.edge1, nfa1.accept.edge2 = accept, nfa1.accept
    nfastack.append(nfa(initial, accept))
Example #13
0
def alternate():
    """Alternate('|') function
  Pop two operands nfa's from stack.
  Create new initial state, connect edge1 to nfa1 initial and edge2 to nfa2 initial - this way alternation is achieved.
  Create new accept state, nfa1 and nfa2 accept states are connected to new accept state.
  Push new nfa on nfastack
  """
    nfa1, nfa2 = nfastack.pop(), nfastack.pop()
    initial, accept = state(), state()
    initial.edge1, initial.edge2 = nfa1.initial, nfa2.initial
    nfa1.accept.edge1, nfa2.accept.edge1 = accept, accept
    nfastack.append(nfa(initial, accept))
Example #14
0
    def star(self):
        nfa = self.nfastack.pop()
        s0 = state.state(self.state_id)
        self.state_id += 1

        s1 = state.state(self.state_id)
        self.state_id += 1

        s0.add_transition(nfa[0], "$")
        s0.add_transition(s1, "$")
        nfa[-1].add_transition(s1, "$")
        nfa[-1].add_transition(nfa[0], "$")
        nfa.insert(0, s0)
        nfa.append(s1)
        self.nfastack.append(nfa)
Example #15
0
	def generateNextStates(self, state, action):
		if (self.isTerminalState(state) or self.isTransitionalState(state)) and action is 'finish':
			return State.state((float("inf"), float("inf")), state.getWorld())
		x, y = state.getPosition()
		world = state.getWorld()
		if action is 'east':
			return State.state((x + 1, y), world)
		if action is 'west':
			return State.state((x - 1, y), world)
		if action is 'north':
			return State.state((x, y - 1), world)
		if action is 'south':
			return State.state((x, y + 1), world)
		else:
			raise "Error, invalid action"
Example #16
0
File: core.py Project: alxlhr/FLUOR
    def __init__(self, param):

        self.state = state.state(param)
        self.params = param
        self.init_speed()
        self.init_var(param)
        self.check_res()
Example #17
0
    def reset(self, initState=None):
        """
        resets the environment to starting conditions, i.e. starts a new game
 
        """
        if initState is None:
            rawData = self.rawGen.generateDinner()
            assigner = assignDinnerCourses(rawData[0], rawData[1])
            dinnerAssigned = assigner.assignDinnerCourses(random=False)
            self.env = state(data=dinnerAssigned,
                             dinnerTime=self.dinnerTime,
                             travelMode=self.travelMode,
                             padSize=self.padSize,
                             shuffleTeams=self.shuffleTeams)
            self.env.initNormalState()
        else:
            self.env = initState
        self.validActions = 0 * self.validActions
        self.validActions[self.env.getValidActions()] = 1
        if not self.restrictValidActions:
            self.validActions[:] = 1
        self.state = self.env.getState()
        self.netState = [self.raw2singleNetState()] * len(self.netState)
        self.is_done = False
        self.is_partDone = False  # is set to True if all non-rescue teams have been assigned.
        self.score = 0
        self.lastReward = -np.Inf
Example #18
0
	def __init__(self):
		self.world = Gameworld.gameWorld()
		self.highScores = defaultdict(lambda: 0)
		self.tdRaceOrder = list()
		self.adpRaceOrder = list()
		self.randomRaceOrder = list()

		# Every possible (state,action,nextState) tuple
		transitions = []
		for j in range(3):
			for k in range(10):
				for l in range(10):
					curState = State.state((k, l), j)
					curState.setTerrainType(self.world.getTerrainType(curState))
					for action in self.world.getActions(curState):
						for nextState in self.world.getAllPossibleSuccessors(curState, action):
							if nextState.getPosition() != (float("inf"), float("inf")):
								nextState.setTerrainType(self.world.getTerrainType(nextState))
							transitions.append( (curState, action, nextState) )

		skills = { k:(random.random() + .5)  for  k   in  ['water','grass','forest','mountain'] }
		for i in range(3):
			# Each team has equal skills (apples to apples comparison)
			self.world.addAgent(Agent.adpAgent(self.world, transitions, discount=self.world.discount), skills)
			self.world.addAgent(Agent.tdAgent((9,0)                   ,      gam=self.world.discount), skills)
			self.world.addAgent(Agent.randomAgent()                    , skills)
Example #19
0
    def __init__(self, suffix, name, main, num_episodes):
        self.total_steps = 0
        self.batch_size = 64
        self.gamma = 0.99

        self.gradient_update_step = 1
        self.main_update_step = 2

        self.grads = {}

        self.env = gym.make(name)

        oshape = self.env.observation_space.shape
        assert main.output_size == self.env.action_space.n
        assert oshape[0] == main.input_shape

        self.steps = history.history(self.batch_size)

        self.main = main
        self.num_episodes = num_episodes

        self.current_state = state.state(oshape[0], main.state_size)

        self.network = ann.ann('thread_' + suffix, main.input_size,
                               main.output_size, main.swriter)
        self.network.import_params(main.network.export_params())

        self.thread = threading.Thread(target=self.run)
Example #20
0
	def __init__(self, addr, config_file):
		self.S = state()
		self.S.state = 'Normal'

		self.check_servers_greenlet = None

		self.addr = addr

		#The sort function here arranges the addresses in an ascending order like a dictionary.
		#The first address in this list is least priority and last entry is highest priority
		self.servers = sorted([line for line in open(config_file).read().strip().split('\n')])
		print '%sMy addr: %s %s' % (fg(3), self.addr, attr(0))
		print '%sServer list: %s%s' % (fg(3), str(self.servers), attr(0))

		self.serverListBackup = [];

		self.n = len(self.servers)

		self.connections = []

		#this is the place where we can say we are assigning priority variable, according to the order in the list
		for i, server in enumerate(self.servers):
			if server == self.addr:
				self.priority = i
				self.connections.append(self)
			else:
				c = zerorpc.Client(timeout=5)
				c.connect('tcp://' + server)
				self.connections.append(c)
Example #21
0
 def NoRisk(self, speed, p, q):
     currentState = state(0.5, 0, 0, 1.0)
     currentState.setCost(0)
     distance = 0
     time = 0
     while distance < self.distance:
         time += 1
         rowCount = int(floor(distance / self.distance_block))
         columnCount = int(
             floor(time * self.time_interval / self.time_block))
         weatherWeight = self.parameter[rowCount][columnCount][0]
         trafficWeight = self.parameter[rowCount][columnCount][1]
         if (weatherWeight == 1 and trafficWeight == 1):
             currentState = self.transition(currentState, 0)
         else:
             currentState = self.transition(currentState, speed)
         distance = currentState.getx()
     per = currentState.getActionPercent()
     t = currentState.getStage()
     t = (t - 1 + per) * self.time_interval
     r = currentState.getCost()
     currentObj = p * r + t * q
     best_action = currentState.getActionHistory()
     print("No Risk at speed ", speed)
     print("p value", p, ", q value ", q, ", total time ", t,
           ", total risk cost (before times p)", r, ", optimal value ",
           currentObj, ", best action ", best_action)
Example #22
0
    def ObjectiveOptimization(self, p, q):
        initialState = state(0.5, 0, 0, 1.0)
        initialState.setCost(0)
        stageList = [initialState]
        endState = None
        bestObj = 10000
        stageCount = 0
        while stageList:
            currentState = stageList.pop(0)
            x = currentState.getx()
            t = currentState.getStage()
            stageCount += 1
            if x == self.distance:
                per = currentState.getActionPercent()
                r = currentState.getCost()
                currentObj = p * r + (t - 1 + per) * self.time_interval * q
                if currentObj < bestObj:
                    endState = currentState
                    bestObj = currentObj
                    total_time = (t - 1 + per) * self.time_interval
                    best_r = r

            if ((t < self.stage) and (x < self.distance)
                    and self.checkArrivalOnTime(x, t)):
                for a in self.action:
                    newState = self.transition(currentState, a)
                    stageList.append(newState)

        best_action = endState.getActionHistory()
        print("p value", p, ", q value ", q, ", total time ", total_time,
              ", total risk cost (before times p)", best_r, ", best action ",
              best_action, ", optimal value ", bestObj,
              ", total stage considered ", stageCount)
Example #23
0
 def __init__(self,
              dinnerTable,
              finalPartyLocation,
              dinnerTime,
              travelMode='simple',
              shuffleTeams=False,
              padSize=50,
              tableAssigner=randomAgent,
              **kwargs):
     """
     Args:
         dinnerTable (pandas dataframe): info about all the teams in defined format
         finalPartyLocation (array): geocoordinates of the final party location
         dinnerTime (datetime): time of the dinner
         travelMode (string): see state.__init__ documentation for details.
                              'simple' is safe and easy
         shuffleTeams (bool): If True, the choice, which team is seated next is random. 
                              Otherwise, always the subsequent team will be seated.
         padSize (int): see state.py __init__. Must be at least as high as the number of participating teams.
         tableAssigner (class with a chooseAction method): the logic to assign tables
         **kwargs: additional arguments passed to the tableAssigner
     """
     self.courseAssigner = assignDinnerCourses(dinnerTable,
                                               finalPartyLocation)
     self.state = state(dinnerTable, dinnerTime, travelMode, shuffleTeams,
                        padSize)
     self.tableAssigner = tableAssigner(**kwargs)
     self.validation = validation()
Example #24
0
	def getAllPossibleSuccessors(self, state, action):
		x, y = state.getPosition()
		successors = list()
		if action is 'finish':
			return [State.state((float("inf"), float("inf")), state.getWorld())]
		else:
			if x >= 1:
				successors.append(State.state((x - 1, y), state.getWorld()))
			if y < 9:
				successors.append(State.state((x, y + 1), state.getWorld()))
			if x >= 1 and y < 9:
				successors.append(State.state((x - 1, y + 1), state.getWorld()))
			if x == 0 or y == 9:
				successors.append(State.state((x, y), state.getWorld()))
			successors.append(self.generateNextStates(state, action))
			return successors
Example #25
0
 def generateNextStates(self, state, action):
     if (self.isTerminalState(state)
             or self.isTransitionalState(state)) and action is 'finish':
         return State.state((float("inf"), float("inf")), state.getWorld())
     x, y = state.getPosition()
     world = state.getWorld()
     if action is 'east':
         return State.state((x + 1, y), world)
     if action is 'west':
         return State.state((x - 1, y), world)
     if action is 'north':
         return State.state((x, y - 1), world)
     if action is 'south':
         return State.state((x, y + 1), world)
     else:
         raise "Error, invalid action"
Example #26
0
File: score.py Project: geier/pyedf
	def load(self, score_file_name):

		score_file = open(score_file_name, 'r') 
		states = []

		for line in score_file:

			try:
				if self.isComment(line):
					continue

				x = line.split(self.lineSeparator)
				start = x[0].strip(' ')
				duration = x[1].strip(' ')
				annot = x[2].strip('\n').strip('\r').strip(' ')

				if duration == '':
					duration = '-1'
	
				states.append( st.state(start=start, duration=duration, annot=annot) )

			except:
				if self.verbose > 0: print "# line not readable:", line


		score_file.close()

		return states
Example #27
0
 def successors(self, id):
     """
     :param id: state class object
     :return: list of successors for given state + info(acc,aux,cost)
     :raises ValueError: not entering a valid state for given json
     """
     try:
         successors = []
         if self.belongNode(id._current):
             adjacents = [
                 key for key in self._edges.keys() if id._current in key[0]
             ]
             for data in adjacents:
                 acc = "I'm in %s and I go to %s c/%s" % (data[0].zfill(
                     10), data[1].zfill(10), self._edges[data][0])
                 aux = state(data[1],
                             id.visited(data[1],
                                        id._nodes))  #creates new ._md5
                 cost = self._edges[data][1]
                 successors.append((acc, aux, cost))
             return successors
         else:
             raise ValueError
     except ValueError:
         print("error. the state does not belong to given json")
         sys.exit(1)
Example #28
0
 def __init__(self, numHands, numFingers):
     self.numHands = numHands
     self.numFingers = numFingers
     self.root = state(player(numHands, numFingers),
                       player(numHands, numFingers), 0)
     self.root.score = 0
     self.allStates = set()
     self.allStates.add(self.root)
Example #29
0
    def __getFeatures(self, state, action):
        x, y = state.getPosition()
        dx, dy = self.__dirToVect(action)
        next_x, next_y = x + dx, y + dy
        dy = next_y - self.y + 1
        dx = self.x - next_x + 1

        feat = {(State.state((x, y), state.getWorld()), action): 1}
        return feat
Example #30
0
 def getAllPossibleSuccessors(self, state, action):
     x, y = state.getPosition()
     successors = list()
     if action is 'finish':
         return [
             State.state((float("inf"), float("inf")), state.getWorld())
         ]
     else:
         if x >= 1:
             successors.append(State.state((x - 1, y), state.getWorld()))
         if y < 9:
             successors.append(State.state((x, y + 1), state.getWorld()))
         if x >= 1 and y < 9:
             successors.append(State.state((x - 1, y + 1),
                                           state.getWorld()))
         if x == 0 or y == 9:
             successors.append(State.state((x, y), state.getWorld()))
         successors.append(self.generateNextStates(state, action))
         return successors
Example #31
0
 def __init__(self, json):
     self._json = json
     self._file = self._readJson()
     self._init_state = state(self._file["IntSt"]["node"],
                              self._file["IntSt"]["listNodes"],
                              self._file["IntSt"]["id"])
     xml = "/".join(self._file["graphlmfile"].strip("/").split('/')
                    [1:]) + ".xml"  #not getting town folder and adding .xml
     self._state_space = stateSpace(xml)
     self._visitedList = {}
Example #32
0
 def __init__(self):
     self.terrainWorld = list()
     for i in range(10):
         section = list()
         for j in range(10):
             obj = random.choice([grass(), water(), mountain(), forest()])
             obj.index = terrain.index
             section.append(state((i,j), obj))
         self.terrainWorld.append(section)
     terrain.index += 1
Example #33
0
	def __init__(self):
		self.terrains = [Terrain.terrain(), Terrain.terrain(), Terrain.terrain()]
		self.livingReward = -1
		self.discount = .95
		self.noise = 0
		self.startState = State.state()
		self.transitionalStates = [State.state((9,0), 0), State.state((9,0), 1)]
		self.terminalState = State.state((9,0), 2)
		self.adpAgents = []
		self.adpAgentIndex = 0
		self.adpAgentStates = []
		self.tdAgents = []
		self.tdAgentIndex = 0
		self.tdAgentStates = []
		self.randomAgents = []
		self.randomAgentIndex = 0
		self.randomAgentStates = []
		self.transitionalReward = 1000
		self.terminalReward = 2000
Example #34
0
    def execute_aux(self, node):
        cur_state = state("R")
        for child in node.get_next():
            child_state = self.execute_aux(child)
            cur_state.add_state(child_state)

        if cur_state.get_matrix() is None:
            cur_state.set_state(node.get_matrix())
        else:
            cur_state.apply_operator(node)
        return cur_state
Example #35
0
 def moveAgent(self, agent, state, action):
     newAgent = self.getWorldAgent(agent)
     x, y = state.getPosition()
     terrainElement = repr(
         self.terrains[state.getWorld()].terrainWorld[x][y])
     chanceToFall = None
     chanceToSlideDown = None
     chanceToSlideLeft = None
     if state in self.transitionalStates or state == self.terminalState:
         self.setAgentState(newAgent,
                            self.generateNextStates(state, action))
     else:
         if terrainElement == 'grass':
             chanceToFall = abs(newAgent.skillLevels['grass'] - 1) / 4
         elif terrainElement == 'water':
             chanceToFall = abs(newAgent.skillLevels['water'] - 1) / 4
         elif terrainElement == 'forest':
             chanceToFall = abs(newAgent.skillLevels['forest'] - 1) / 4
         else:
             chanceToFall = abs(newAgent.skillLevels['mountain'] - 1) / 2
         x, y = state.getPosition()
         chanceToSlideDown = 0.1 - ((0.1 / 10) * (abs(y - 0)))
         chanceToSlideLeft = 0.1 - ((0.1 / 10) * (abs(x - 9)))
         if random.random() <= chanceToSlideDown:
             self.setAgentState(
                 newAgent,
                 State.state((x, min([9, y + 1])), state.getWorld()))
         elif random.random() <= chanceToSlideLeft:
             self.setAgentState(
                 newAgent,
                 State.state((max([x - 1, 0]), y), state.getWorld()))
         elif random.random() <= chanceToFall:
             self.setAgentState(
                 newAgent,
                 State.state((max([x - 1, 0]), min([9, y + 1])),
                             state.getWorld()))
         else:
             self.setAgentState(newAgent,
                                self.generateNextStates(state, action))
Example #36
0
    def union(self):
        nfa2 = self.nfastack.pop()
        nfa1 = self.nfastack.pop()
        s0 = state.state(self.state_id)
        self.state_id += 1

        s1 = state.state(self.state_id)
        self.state_id += 1
        s0.add_transition(nfa1[0],"$")
        s0.add_transition(nfa2[0],"$")

        nfa1[-1].add_transition(s1, "$")
        nfa2[-1].add_transition(s1, "$")

        nfa1.insert(0, s0)
        nfa2.append(s1)

        for s in nfa2:
            nfa1.append(s)


        self.nfastack.append(nfa1)
Example #37
0
 def execute_circuit(self):  #returns state
     cur_state = state("result")
     for node in self.nodes:
         status = True
         for parent in self.nodes:
             for child in parent.get_next():
                 if child.get_name() == node.get_name():
                     status = False
                     break
         if status:
             new_state = self.execute_aux(node)
             cur_state.add_state(new_state)
     return cur_state
Example #38
0
    def __init__(self, num_episodes, output_path):
        self.num_episodes = num_episodes
        self.step = 0

        self.env = gym.make('MountainCar-v0')
        self.env = gym.wrappers.Monitor(self.env, 'mc0_wrappers')

        ospace = self.env.observation_space.shape

        self.obs_size = 2
        self.current_state = state.state(ospace[0], self.obs_size)

        self.q = qlearn.qlearn((ospace[0] * self.obs_size, ),
                               self.env.action_space.n, output_path)
Example #39
0
def compile(postfix):
    """Main thomspons algorithm function dealing with postfix reg expresssion"""
    for c in postfix:
        if c == '.':
            concat()
        elif c == '|':
            alternate()
        elif c == '*':
            kleene()
        elif c == '?':
            zero_or_one()
        elif c == '+':
            one_or_more()
        elif c == '^':
            start_anchor()
        else:
            #Create nfa fragment for operand
            accept = state()
            initial = state()
            initial.label = c
            initial.edge1 = accept
            nfastack.append(nfa(initial, accept))

    return nfastack.pop()
Example #40
0
def run(numRuns,log,sol=solSettings,name = '',progConf =None):
    for i in xrange(numRuns):
        A = []
        evals = 0
        mu = 50
        lamb = 20
        k = 15
        survive = 20
        last = [solution.solution(sol) for j in xrange(mu)]

        bestLog =state.state()
        while evals< bbsaSettings['maxEvals']:
            children =[]
            for x in xrange(lamb):
                parents = kTourn([last],{'count':{'value':2},'k':{'value':k}})
                new = mutate([uniRecomb([parents],{'num':{'value':1}})],{'rate':{'value':.05}})
                children.extend(evaluate([new],{'state':bestLog}))
                evals+=len(new)
            last = trunc([children+last],{'count':{'value':mu},'k':{'value':survive}})

            st = state.state()
            st.last = last
            st.curEval = evals
            log.nextIter(st)
        for ind in last:
            ind.evaluate()
        st = state.state()
        st.last = last
        st.curEval = evals
        log.nextIter(st)
        print i
        log.nextRun()
        bestLog.logBestSoFar(i,name,progConf)
        bestLog.reset()
    log.nextProbConf()
    return log
Example #41
0
	def load(self, filename):

		score_file = open(filename, 'r') 
		states = []

		for line in score_file:

			try:
				if self.isComment(line):
					continue

				line = line.strip('\n').strip('\r').strip(' ')
				x = line.split(self.lineSeparator)

				if len(x) > 0:					# for example 1
					start = x[0].strip(' ')

				if len(x) == 1:
					annot	 = ''
					duration = ''

				if len(x) == 2:
					annot	 = x[1]
					duration = ''

				elif len(x) > 2:				# for example 3.
					duration = x[1].strip(' ')
					annot	 = x[2]


				if duration == '':
					duration = '-1'
	
				states.append( st.state(start=start, duration=duration, annot=annot) )

			except:
				if self.verbose > 0: print("# line not readable:", line)


		score_file.close()

		return states
Example #42
0
 def __init__(self, server=False, dispatch_proj=None, id=False):
     super(Player, self).__init__()
     self.state = state(vec2(100, 130), vec2(0, 0), 100)
     self.move = Movement(*self.state.pos)
     self.dispatch_proj = dispatch_proj
  # spawning player at 0,0, width 32 = 1280 / 40. and height 72 = 720/10.
     if not server:
         self.Rect = Rect
     else:
         self.Rect = AABB
     if id:
         self.id = id
         self.weapons = WeaponsManager(self.dispatch_proj, self.id)
     #self.color = Options()['color']  #(0, 204, 255)
     self.set_color(Options()['color'])
     self.rect = self.Rect(0, 0, 32, 72, self.color, isplayer=True)
     #input will be assigned by windowmanager class
     self.input = proto.Input()
     self.listeners = {}
     self.ready = False
Example #43
0
    def __init__(self):
        self.states = [Terrain.terrain(), Terrain.terrain(), Terrain.terrain()]
        self.finishPos = (float('inf'), float('inf'))
        self.finalStates = []
        self.livingReward = -1.
        self.discount = 0.95
        self.noise = 0
        self.transitionalStates = [self.states[0][9][0], self.states[1][9][0]]
        self.terminalState = self.states[2][9][0]
        self.adpAgents = []
        self.adpAgentIndex = 0
        self.tdAgents = []
        self.tdAgentIndex = 0
        self.randomAgents = []
        self.randomAgentIndex = 0
        self.transitionalReward = 1000.
        self.terminalReward = 2000

        for i in range(3):
            self.finalStates.append(State.state(self.finishPos, Terrain.terrainObject()))
            self.finalStates[-1].terrain.index = i
Example #44
0
def genAllPossMoves(posStates):
  '''Recursively generate all possible moves given a game-state'''
  if (len(posStates) > 10000):
    print "Runaway recursion :( - game exited"
    overflowMovesList = open('overflowMovesList.txt', 'w')
    for item in posStates:
      overflowMovesList.write(str(item))
    overflowMovesList.close()
    print len(posStates)
    exit(1)
  givenState = posStates[0]

  if (givenState.existValidMoves() == False):
    test = False
    for st in posStates:
      if (st.existValidMoves() == True):
        test = True
        break

    if (test == True):
      posStates.remove(givenState)
      genAllPossMoves(posStates)

    else:
      return posStates


  else: #There exists at least one valid move
    # CURRENT PLAYER HAS PIECE IN JAIL
    if ((givenState.turn == 0 and givenState.board[26] > 0 or \
      givenState.turn == 1 and givenState.board[27] < 0)):
      #print "piece in jail"

      for x in range(0, len(givenState.roll)):
        cpy_state = state.state(givenState)
        if (cpy_state.turn == 0): #White
          space_to_valid = cpy_state.checkSpaceTo(26, 25 - cpy_state.roll[x])
        else: # Black
          space_to_valid = cpy_state.checkSpaceTo(27, cpy_state.roll[x])
        #### IF VALID MOVE, THEN EXECUTE
        if (space_to_valid[0] == True):

          #UPDATE values
          space_from = space_to_valid[1]
          space_to = space_to_valid[2]
          move_dist = space_to_valid[3]

          # REMOVE piece being moved
          if (cpy_state.turn): #Black
            cpy_state.board[27] = cpy_state.board[27] + 1
          else: #White
            cpy_state.board[26] = cpy_state.board[26] - 1


          # CAPTURE opponent piece and put it in jail
          if ((cpy_state.board[space_to] < 0 and cpy_state.turn == False) or \
            (cpy_state.board[space_to] > 0 and cpy_state.turn == True)):
            if (int(math.fabs(cpy_state.board[space_to])) == 1):
              if (cpy_state.turn): #Black
                cpy_state.board[26] = cpy_state.board[26] + 1
              else: #White
                cpy_state.board[27] = cpy_state.board[27] - 1
              cpy_state.board[space_to] = 0

          # ADD piece to new space
          if (cpy_state.turn): #Black
            cpy_state.board[space_to] = cpy_state.board[space_to] - 1
          else: #White
            cpy_state.board[space_to] = cpy_state.board[space_to] + 1

          cpy_state.roll.remove(cpy_state.roll[x])
          cpy_state.updatePipCount()

          if (cpy_state.compareStateToList(posStates) == False):
            #print "not getting here?"
            posStates.append(cpy_state)


    # CURRENT PLAYER HAS NO PIECES IN JAIL
    else:
      for x in range(1, 25):

        # No one to move
        if (givenState.board[x] == 0):
          continue

        # Current space owned by other player
        elif ((givenState.board[x] < 0 and givenState.turn == 0) \
          or (givenState.board[x] > 0 and givenState.turn == 1)):
          #print "wrong color"
          continue

        # Current space a valid space_from
        else:
          for y in range(0, len(givenState.roll)):
            cpy_state = state.state(givenState)
            if (cpy_state.turn == 0): #White
              space_to_valid = cpy_state.checkSpaceTo(x, x - cpy_state.roll[y])
            else: #Black
              space_to_valid = cpy_state.checkSpaceTo(x, x + cpy_state.roll[y])
            #print space_to_valid
            if (space_to_valid[0] == True):
              #print "Exist valid move?"
              space_from = space_to_valid[1]
              space_to = space_to_valid[2]
              move_dist = space_to_valid[3]

              # Execute move
              if (cpy_state.turn == 1): #Black
                cpy_state.board[space_from] = cpy_state.board[space_from] + 1
              elif (cpy_state.turn == 0): #White
                cpy_state.board[space_from] = cpy_state.board[space_from] - 1


              # Capture opponent piece and put it in jail
              if ((cpy_state.board[space_to] < 0 and cpy_state.turn == 0) or \
                (cpy_state.board[space_to] > 0 and cpy_state.turn == 1)):
                if (int(math.fabs(cpy_state.board[space_to])) == 1):
                  if (cpy_state.turn): #Black
                    cpy_state.board[26] = cpy_state.board[26] + 1
                  else: #White
                    cpy_state.board[27] = cpy_state.board[27] - 1
                  cpy_state.board[space_to] = 0

              if (cpy_state.turn): #Black
                cpy_state.board[space_to] = cpy_state.board[space_to] - 1
              else: #White
                cpy_state.board[space_to] = cpy_state.board[space_to] + 1

              cpy_state.roll.remove(cpy_state.roll[y])
              cpy_state.updatePipCount()

              if (cpy_state.compareStateToList(posStates) == False):
                posStates.append(cpy_state)
                #cpy_state.printState()
          #print x
          #print len(posStates)

    #print len(posStates)
    posStates.remove(givenState)
    #print len(posStates)
    genAllPossMoves(posStates)
Example #45
0
trend_aveDegree = [0 for i in xrange(args.size)]
trend_edges = [0 for i in xrange(args.size)]
trend_edgecut = [0 for i in xrange(args.size)]
trend_connectivity = [0 for i in xrange(args.size)]
trend_resilNode = [0 for i in xrange(args.size)]
trend_resilEdge = [0 for i in xrange(args.size)]
trend_fitness = [0.0 for i in xrange(args.size)]

degreeData= []
aveEdgeCut = 0.0
edges = 0.0
fitness = 0.0

s = None
for i in xrange(args.runs):
    s = state.state()

    for j in xrange(args.size):
        add = plug.selectNodes(s)
        s.addNode(add)
        if not j:
            continue
        trend_aveDegree[j]+=sum(s.calcDegree())/float(j)
        trend_edges[j]+=analysis.edges(s)
        trend_edgecut[j]+=analysis.eccentricity(s)
        trend_connectivity[j]+=analysis.connected(s)
        trend_resilNode[j] += analysis.resilNode(s)
        trend_resilEdge[j] += analysis.resilEdge(s)

        val = fitnessFunction.funcs[args.evaluator](s)*trend_connectivity[j]
        if val >-1000000:
Example #46
0
print "TRAINING AGENTS..."
print ""
a.trainAgents(50)
a.arrangeTeam()


#'''
for i,ind in enumerate(a.adpRaceOrder):
    agent = a.world.adpAgents[ind]
    agent.endTraining()
    agent.epsilon = 0.0
    print agent.epsilon, agent.discount, agent.alpha
    bad = False
    for j in range(10):
        for k in range(10):
            state = State.state((k,j),i)
            action = agent.solver.policy[state]
            if action == 'west' or action == 'south':
                bad = True

    bad = True
    if bad:
        for j in range(10):
            for k in range(10):
                state = State.state((k,j),i)
                action = agent.solver.policy[state]
                if action == 'south' or action == 'west':
                    print '%7s' % action.upper(),
                else:
                    print '%7s' % action,
            print
Example #47
0
	def getStartState(self, terrainNum = 0):
		return State.state((0,9), terrainNum)
Example #48
0
    def __init__(self):

        self.s = state.state()
        self.mlst = list()
        self.count = 0
        self.lock = threading.Lock()
Example #49
0
 def get_state(self, starter=None):
     ''' Get a new state in this basis '''
     new_state=state(self)
     if starter!=None: new_state.add(1, starter)
     return new_state
Example #50
0
File: score.py Project: geier/pyedf
	def append(self, new_state=None, start=None, duration=None, annot=None):

		if new_state == None:
			new_state = st.state(start=start, duration=duration, annot=annot)

		self.states.append(new_state)
Example #51
0
def drawEnvironment(windowSurface, race, worldNum, results):
    tdAgent = getAgentImage(race.tdRaceOrder[worldNum])
    randomAgent = getAgentImage(race.randomRaceOrder[worldNum])
    adpAgent = getAgentImage(race.adpRaceOrder[worldNum])
    tdHeadStart = headStart[worldNum][2]
    randomHeadStart = headStart[worldNum][0]
    adpHeadStart = headStart[worldNum][1]
    tdNewHeadStart, randomNewHeadStart, adpNewHeadStart = 0, 0, 0
    tdAgentScore = results[5][worldNum]
    randomAgentScore = results[3][worldNum]
    adpAgentScore = results[4][worldNum]
    grass = pygame.image.load("grass.png")
    grass = pygame.transform.scale(grass, (144, 70))
    water = pygame.image.load("water.png")
    water = pygame.transform.scale(water, (144, 70))
    mountain = pygame.image.load("mountain.jpg")
    mountain = pygame.transform.scale(mountain, (144, 70))
    forest = pygame.image.load("forest.png")
    forest = pygame.transform.scale(forest, (144, 70))
    sky = pygame.image.load("background.jpg")
    sky = pygame.transform.scale(sky, (1440, 850))
    platform = pygame.image.load("platform.tiff")
    platform = pygame.transform.scale(platform, (100, 50))
    font = pygame.font.SysFont("monospace", 25, True, False)
    start = font.render("START", 1, (0,0,0), (255, 255, 255))
    finish = font.render("FINISH", 1, (0,0,0), (255, 255, 255))
    pygame.display.set_caption("Relay Race: Leg " + str(worldNum + 1))
    move = 0
    numWinners = -1
    tdWinner, adpWinner, randomWinner = -1, -1, -1
    if tdAgentScore == max([randomAgentScore, tdAgentScore, adpAgentScore]):
        tdWinner = 0
    elif adpAgentScore == max([randomAgentScore, tdAgentScore, adpAgentScore]):
        adpWinner = 0
    else:
        randomWinner = 0

    if tdAgentScore == min([randomAgentScore, tdAgentScore, adpAgentScore]):
        tdWinner = 2
    elif adpAgentScore == min([randomAgentScore, tdAgentScore, adpAgentScore]):
        adpWinner = 2
    else:
        randomWinner = 2

    if tdWinner == -1:
        tdWinner = 1
    elif adpWinner == -1:
        adpWinner = 1
    else:
        randomWinner = 1
    while not(move + randomHeadStart > len(results[0][worldNum]) and move + adpHeadStart > len(results[1][worldNum]) and move + tdHeadStart > len(results[2][worldNum])):
        randomAgentState = results[0][worldNum][min([move + randomHeadStart, len(results[0][worldNum]) - 1])]
        adpAgentState = results[1][worldNum][min([move + adpHeadStart, len(results[1][worldNum]) - 1])]
        tdAgentState = results[2][worldNum][min([move + tdHeadStart, len(results[2][worldNum]) - 1])]
        for event in pygame.event.get():
            if event.type == QUIT:
                pygame.quit()
                sys.exit()
            windowSurface.blit(sky, (0,0))
            if event.type == USEREVENT + 1:
                for i in range(len(race.world.states[worldNum].terrainWorld)):
                    for j in range(len(race.world.states[worldNum].terrainWorld)):
                        position = ((144 * i), ((70 * j) + 150))
                        terrainType = race.world.states[worldNum][i][j].getTerrainType()
                        if terrainType == 'grass':
                            windowSurface.blit(grass, position)
                        elif terrainType == 'water':
                            windowSurface.blit(water, position)
                        elif terrainType == 'forest':
                            windowSurface.blit(forest, position)
                        else:
                            windowSurface.blit(mountain, position)        
                windowSurface.blit(platform, (0, 60))
                windowSurface.blit(platform, (100, 60))
                windowSurface.blit(platform, (200, 60))
                windowSurface.blit(platform, (570, 60))
                windowSurface.blit(platform, (670, 60))
                windowSurface.blit(platform, (770, 60))
                windowSurface.blit(platform, (1140, 60))
                windowSurface.blit(platform, (1240, 60))
                windowSurface.blit(platform, (1340, 60))
                windowSurface.blit(tdAgent, stateToCoordinates(tdAgentState ,'td', worldNum, tdWinner))
                if tdAgentState.getPosition() == (float("inf"), float("inf")):
                    drawAgentScore(results[5][worldNum], tdWinner, worldNum, windowSurface)
                windowSurface.blit(adpAgent, stateToCoordinates(adpAgentState ,'adp', worldNum, adpWinner))
                if adpAgentState.getPosition() == (float("inf"), float("inf")):
                    drawAgentScore(results[4][worldNum], adpWinner, worldNum, windowSurface)
                windowSurface.blit(randomAgent, stateToCoordinates(randomAgentState ,'random', worldNum, randomWinner))
                if randomAgentState.getPosition() == (float("inf"), float("inf")):
                    drawAgentScore(results[3][worldNum], randomWinner, worldNum, windowSurface)
                windowSurface.blit(start, stateToCoordinates(State.state((0,9), worldNum), None, worldNum))
                windowSurface.blit(finish, stateToCoordinates(State.state((9,0), worldNum), None, worldNum))
                if worldNum > 0:
                    for i, winner in enumerate(victors[0]):
                        windowSurface.blit(winner, victorPositions[0][i])
                    for j in range(3):
                        drawAgentScore(winnerHistory[0][2-j], j, 0, windowSurface)

                    if worldNum is 2:
                        for i, winner in enumerate(victors[1]):
                            windowSurface.blit(winner, victorPositions[1][i])
                        for j in range(3):
                            drawAgentScore(winnerHistory[1][2 - j], j, 1, windowSurface)

                pygame.display.flip()
                if len(results[0][worldNum]) <= move + randomHeadStart:
                    randomNewHeadStart += 1
                if len(results[1][worldNum]) <= move + adpHeadStart:
                    adpNewHeadStart += 1
                if len(results[2][worldNum]) <= move + tdHeadStart:
                    tdNewHeadStart += 1
                move += 1
    headStart.append([randomNewHeadStart, adpNewHeadStart, tdNewHeadStart])
    scoreList = [tdAgentScore, randomAgentScore, adpAgentScore]
    scoreList.sort()
    winnerHistory.append(scoreList)
    victors.append([tdAgent, adpAgent, randomAgent])
    victorPositions.append([stateToCoordinates(State.state((float("inf"), float("inf")), worldNum), 'td', worldNum, tdWinner), 
        stateToCoordinates(State.state((float("inf"), float("inf")), worldNum), 'adp', worldNum, adpWinner), 
        stateToCoordinates(State.state((float("inf"), float("inf")), worldNum), 'random', worldNum, randomWinner)])
    if worldNum is 2:
        drawClosingScreen(windowSurface, race, results)
Example #52
0
 def get_state(self, starter=None):
     ''' get an empty state to start building with '''
     new_state=state(self)
     if starter!=None: new_state.add(1, starter)
     return new_state
from Queue import Queue
from state import state

def contains(argList, argState):
    """
    check if argList contains argState 
    """
    for item in argList:
        if (item.m == argState.m and item.c == argState.c and item.b == argState.b):
            return True        
    return False

visited = []#visited states
path = []#complete path to the solution
q = Queue()
start = state() #in the beginning there were 3 missionaries and 3 cannibals on left bank of the river

#bredth first solution
q.put(start)
while(q.qsize()): #while queue is non empty go in loop
    next = q.get() #get the head state from the queue
    if (next.m == 0 and next.c == 0 and next.b == False):
        #done! we have found solution print the complete path by traversing q
        x = next        
        count =0
        while(x != None):
            #x.display()
            path.append(x)
            x = x.predecessor_state            
            count = count+1
        print "total river crossings:"+ str(count-1)+"\n"+"Miss(on L) Cann(on L) \t\tBoat At"
Example #54
0



aveEdgecut1 = 0.0
aveEdgecut2 = 0.0


edges1 = 0.0
edges2 = 0.0
fitness1 = 0.0
fitness2 = 0.0
s1 = None
s2 = None
for i in xrange(runs):
    s1= state.state()
    s2 = state.state()

    for j in xrange(size):
        add1 = plug1.selectNodes(s1)
        s1.addNode(add1)
        add2 = plug2.selectNodes(s2)
        s2.addNode(add2)
        if not j:
            continue
        trend_aveDegree1[j]+=sum(s1.calcDegree())/float(j)
        trend_edges1[j]+=analysis.edges(s1)
        trend_edgecut1[j]+=analysis.edgeCut(s1)
        trend_connectivity1[j]+=analysis.connected(s1)
        trend_edgecut_edge1[j]+=trend_edgecut1[j]/float(trend_edges1[j]+1)
        
Example #55
0
        node2 = tree.treenode(state2)
        node.subnodes.append(node1)
        node.subnodes.append(node2)
    else:
        return

    for n in node.subnodes:
        shakeit(n)


possibility = 0


def printdata(data):
    global possibility
    possibility = possibility + 1
    print "a_points:", data.a_points
    print "b_points:", data.b_points
    winner = "a" if data.a_points[-1] == 10 else "b"
    print "winner is:", winner


if __name__ == "__main__":
    global possibility
    initstate = state.state()
    rootnode = tree.treenode(initstate)
    shakeit(rootnode)
    tree.traverse(rootnode, printdata)
    print "total possibility:", possibility
    print "total instances:", state.state.instancecounter
Example #56
0
 def state(self,init=None):
     """Returns a state for this model. Same as calling state(thismodel,init)."""
     
     return state(self,init)
Example #57
0
	def __init__(self):
		self.scope=dict(scopes.base.items()+scopes.common.items())
		self.recog=recognizer('corpus')
		self.recog.connect('finished', self.parse)
		self.state=state()
Example #58
0
 def singlestate(self,init=None):
     return state(self._singlemodel,init)