Beispiel #1
0
    def leaf_iter(self, node=None, node_list=None):
        """ returns an iterator over the leafs which cover a node (or nodes)

        Args:
            node (int): node in self
            node_list (list): nodes in self

        Yields:
            leaf (int): node in self which is a descendant leaf of node (or
                        some node in node_list)
        """
        assert (node is None) != (node_list is None), 'node xor node_list'
        assert node in self.nodes, 'node not found'

        if node_list is None:
            node_list = [node]

        node_list = SortedSet(node_list)
        while node_list:
            # get largest node
            node = node_list.pop()
            neighbor_list = list(self.neighbors(node))
            if not neighbor_list:
                # n is a leaf
                yield node
            else:
                node_list |= set(neighbor_list)
Beispiel #2
0
    def build_mask(self, node_list):
        # init empty mask
        assert self.ref.shape is not None, 'ref must have shape'
        mask = Mask(np.zeros(self.ref.shape), ref=self.ref).astype(int)

        # sort nodes from biggest (value + space) to smallest
        node_set = SortedSet(node_list)
        while node_set:
            node = node_set.pop()

            # remove all the nodes which would be covered by node
            node_set -= set(nx.descendants(self, node))

            # record position of node
            for ijk in self.get_pc(node=node):
                mask[ijk] = node

        return mask
Beispiel #3
0
class Actor():
	state = None
	environment = None
	heuristic = None

	frontier = None
	explored = None

	c = 0
	f = 0

	actions = []

	def __init__(self, position, environment):
		self.state = State(position, position)
		self.environment = environment
		self.heuristic = manhattan_distance

		self.frontier = SortedSet(key = self.frontier_order)
		self.explored = set()

		self.reset()

	def update(self, action):
		self.state = action
		self.state.parent = None

	def move(self, position):
		self.state.target = position

	def can_act(self):
		return len(self.actions)

	def reset(self):
		self.actions = []
		self.frontier.clear()
		self.explored.clear()
		self.explored.add(self.state)
		self.c = 0
		self.f = 0

	def frontier_order(self, state):
		return -state.path_cost

	def act(self):
		# Recalculate when bumping
		#if sensors[BUMPED] == 1:
		#	del self.actions

		# No action is needed if we are at the target
		if self.state.goal():
			return None

		result = True
		if not self.can_act():
			# Reset values
			self.reset()

			# Think
			result = self.think(self.state)

		# If a route is already calculated, return next action
		if (result):
			return self.actions.pop()
		else:
			print "No solution found"
			return None

	def think(self, state):
		# Define the initial frontier
		self.expand_frontier(state)

		frontier_size = 1
		while frontier_size:
			self.c += 1

			# Get lowest valued frontier state
			state = self.frontier.pop()

			# Check for goal
			if state.goal(): 
				self.recreate_actions(state)
				return True

			# Add current state to explored
			self.explored.add(state.as_tuple())

			# Expand frontier
			self.expand_frontier(state)

			frontier_size = len(self.frontier)

			# DEBUG
			"""s = ''
			for i in self.frontier:
				s += "{}:({},{}) ".format(i.cost, i.row(), i.column())
			print s"""
			# DEBUG
		return False

	def expand_frontier(self, state):
		for row in (-1, 0, 1):
			for col in (-1, 0, 1):
				# Only allow adjacent non-diagonal moves
				#if row != 0 and col != 0:
				#	continue

				# Get the new position
				position = Position(state.row() + row, state.column() + col)

				# Rule out invalid positions
				if position.row() < 0 or position.column() < 0 or \
				   position.row() >= self.environment.height or position.column() >= self.environment.width:
					return

				p = position.as_tuple()

				# If not an obstacle and not explored, then add to frontier
				if p not in self.environment.obstacles and p not in self.explored:
					self.f += 1

					# Create the new state
					new_state = State(position, state.target, state.cost + 1, state)

					# Update state path cost
					new_state.path_cost = new_state.cost + self.heuristic(new_state)
					

					# Add to frontier
					self.frontier.add(new_state)

	def recreate_actions(self, state):
		while state is not None:
			self.actions.append(state)
			state = state.parent