Example #1
0
def IDAsearch(state, goalBoard, N):
	
	Q = PriorityQueue()
	initial_cost = Heuristic(state[1], N)
	Q.put((state, initial_cost), initial_cost)
	current_threshold = initial_cost
	next_threshold    = initial_cost
	visited = []
	while(Q.empty() != True):
	
		current_threshold = next_threshold
		next_threshold    = maxint
		visit = Q.get()
		print visit
		con = 0
		if(check(visit[0][1], goalBoard, N) == 0):
				#print "f**k yeah, found it"
				return visit[0][2]
		elif(visit[1] <= current_threshold):
			if visit[0][1] not in visited:
				con = 0
			else:
				con = 1	
		if(con == 0):				
			children = getNextState(visit[0], N)
			CQ = PriorityQueue()
			for child in children:
				b = Heuristic(child[1], N)
				h =  b + 1
				if( h < next_threshold):
					next_threshold = h	
				if child[1] not in visited:
					Q.put((child, h), h)
									
		visited.append(visit[0][1])
class Astar:
    def __init__(self, start, goal, n):
        self.path = []
        self. visited = []
        self.priority_queue = PriorityQueue()
        self.start = start
        self.goal = goal
        self.n = n

    def solve(self):
        start_state = State(value=self.start, start=self.start, goal=self.goal, n=self.n)

        self.priority_queue.put((0, start_state))
        while(not self.path and self.priority_queue.qsize()):
            closest_child = self.priority_queue.get()[1]
            closest_child.create_children()
            self.visited.append(closest_child.value)
            for child in closest_child.children:
                if child.value not in self.visited:
                    # If child's value is equal to goal, solution is found
                    if child.value == self.goal:
                        self.path = child.path
                        break
                    self.priority_queue.put((child.cost, child))
        if not self.path:
            print "Goal is unreachable. Terminating program."
        return self.path
Example #3
0
def ucs(source, target, graph):
    """ Uniform-cost graph search """
    queue = PriorityQueue() # fringe
    queue.put((0, source))

    parent = {source:None}
    visited = {}

    while not queue.empty():
        (d, v_in) = queue.get()

        if v_in not in visited or d < visited[v_in]:

            if v_in == target:
                return (d, build_path(parent, target))

            for v_out in graph.adj(v_in):
                cost = graph.distance(v_in, v_out) + d
                if v_out not in visited:
                    queue.put((cost, v_out))
                    parent[v_out] = v_in

            visited[v_in] = cost

    return None
Example #4
0
	def dijkstra(self, adjacencies, start_point, end_point):
		seen_so_far = defaultdict(float)
		for k in adjacencies:
			seen_so_far[k] = float('inf')

		q = PriorityQueue()
		start = Vertex(start_point, [], 0.0)
		q.put(start)
		seen_so_far[start_point] = 0

		while not q.empty():
			v = q.get()
			if v.coords == end_point:
				new_path = v.path
				new_path.append(end_point)

				# write to file
				with open("output", "w") as out_file:
					for point in new_path:
						out_file.write('{} {}\n'.format(point[0], point[1]))
				self.best_path = [(new_path[i], new_path[i+1]) for i in range(len(new_path)-1)]
				return
			for point in adjacencies[v.coords]:
				# import pdb; pdb.set_trace()
				new_cost = v.cost+distance(v.coords, point)
				if seen_so_far[point]<new_cost:
					continue
				seen_so_far[point]=new_cost
				new_path = deepcopy(v.path)
				new_path.append(v.coords)
				q.put(Vertex(point, new_path, new_cost))
Example #5
0
    def __call__(self, graph, start_node, target_node):
        frontier = PriorityQueue()

        current_node = start_node
        distance_dict = defaultdict(lambda: infinity)
        distance_dict[current_node] = 0
        ancestors_dict = {}
        visited_set = set()

        while True:
            neighbors = graph.get_neighbors(current_node)

            current_distance = distance_dict[current_node]

            for neighbor in neighbors:
                if neighbor not in visited_set and (current_distance + 1) < distance_dict[neighbor]:
                    distance_dict[neighbor] = current_distance + 1
                    ancestors_dict[neighbor] = current_node
                    frontier.put((self.cost_function(distance_dict[neighbor], neighbor, target_node), neighbor))
                    self.nodes_expanded += 1

            visited_set.add(current_node)
            self.nodes_visited += 1

            if current_node == target_node:
                return list(reversed(find_ancestors(ancestors_dict, current_node, start_node)))
            else:
                try:
                    current_node = frontier.get_nowait()[1]
                except Empty:
                    break
class EventQueue(object):
    def __init__(self):
        """Event queue for executing events at 
        specific timepoints.

	In current form it is NOT thread safe."""
        self.q = PriorityQueue()
    
    def schedule(self, f, ts):
        """Schedule f to be execute at time ts"""
        self.q.put(EqItem(ts, f))
        
    def schedule_recurring(self, f, interval):
        """Schedule f to be run every interval seconds.

	It will be run for the first time interval seconds
        from now"""
        def recuring_f():
            f()
            self.schedule(recuring_f, time.time() + interval)
        self.schedule(recuring_f, time.time() + interval)
        
        
    def run(self):
        """Execute events in the queue as timely as possible."""
        while True:
            event = self.q.get()
            now = time.time()
            if now < event.ts:
                time.sleep(event.ts - now)
            event.f()
def prepPriorityQueue(game):
    global sudokuQueue 
    sudokuQueue = PriorityQueue()
    for i in range(0,N):
        for j in range(0,N):
            if((not game[i][j].domain==None) and (len(game[i][j].domain )!=0)):
                sudokuQueue.put((len(game[i][j].domain),game[i][j]))
Example #8
0
def solve(N, L, start, goal):
    """search for minimum bit switches to make start == goal
    NB: treat goal and start as sets of currents -- that is, order doesn't
    matter in comparing start to goal and all currents in start/goal
    are mutually unique"""
    # BFS w/ priority queue; queue orders by min bits flipped;
    # we don't use an 'explored' set because we may need to revisit the same
    # configuration of currents several times as index i increments;
    # instead, to constrain the search space, we use the is_consistent filter
    # before placing a state in the queue in order to check that the
    # configuration of currents in that state is consistent with goal at least
    # up to bit i-1
    start_state = (0, start, 0)  # ( n_bits_flipped, state_of_currents, index )
    queue = PriorityQueue()
    queue.put( start_state )

    while not queue.empty():
        nchanges, state, index = queue.get()

        if is_goal(state, goal):
            return nchanges

        for nch,s,i in successors(nchanges, state, index):
            if is_consistent(s, goal, i):
                # when i = len(goal)+1, s will be added to queue
                # only if s == goal
                queue.put([nch, s, i])

    return 'NOT POSSIBLE'
Example #9
0
class PoolStreamData(StreamData):
    def __init__(self, feat_path, feature_maker, label_path=None, fold_in_cv=None):
        self.pool = PriorityQueue(100000)
        super(PoolStreamData, self).__init__(feat_path, feature_maker, label_path, fold_in_cv)
        self.fill_pool()

    def fill_pool(self):
        while not self.pool.full():
            try:
                ins = super(PoolStreamData,self).next()
                self.pool.put((random.random(),ins))    # insert ins with random priority --> kind of random shuffle
            except StopIteration:
                break

    def rewind(self):
        self.pool = PriorityQueue(100000)
        super(PoolStreamData,self).rewind()
        self.fill_pool()

    def next(self):
        try:
            (_,ins) = self.pool.get(False)
        except Empty:
            raise StopIteration
        self.fill_pool()
        return ins
Example #10
0
def solve():
    with open("matrix.txt", "r") as f:
        matrix = [[int(x) for x in line.split(",")] for line in f.readlines()]
    pq = PriorityQueue()
    min_path = (0,0)
    start_value = matrix[0][0]

    pq.put((start_value, min_path))
    side_length = len(matrix) - 1

    i = 0
    scores = {min_path:start_value}
    min_path = (start_value, min_path)
    while min_path[1] != (side_length, side_length):
        min_path = pq.get()
        x_val = min_path[1][0]
        y_val = min_path[1][1]
        total = min_path[0]
        if x_val == side_length and y_val == side_length:
            return total
        next_points = []
        if x_val < side_length : next_points.append((x_val+1,y_val))
        if y_val < side_length : next_points.append((x_val,y_val+1))
        if y_val > 0 : next_points.append((x_val, y_val-1))
        if x_val > 0 : next_points.append((x_val-1, y_val))
        for next_point in next_points:
            next_value = matrix[next_point[0]][next_point[1]]
            next_total = total + next_value
            potential_best_path = next_total
            previous_best_path = scores.get(next_point, 0)
            if (not previous_best_path) or potential_best_path < previous_best_path:
                scores[next_point] = next_total
                pq.put((potential_best_path,next_point))
def schedule(tasks, num_processors):
	_cpy_list = [t for t in tasks]
	total_slice = 0
	for i in tasks:
		total_slice += i
	_cpy = total_slice
	pq = PriorityQueue()
	temp_out_list = [0]*len(tasks)
	for i in range(len(tasks)):
		pq.put((1.0 / tasks[i], i))
	iterations = 0
	misses = 0
	while pq.empty() == False:
		temp_out_c = 0
		for i in range(num_processors):
			if pq.empty() == True:
				break
			else:
				temp_out_list[temp_out_c] = pq.get()[1]
				temp_out_c += 1
		for i in range(temp_out_c):
			tasks[temp_out_list[i]] -= 1
			if tasks[temp_out_list[i]] != 0:
				pq.put(((_cpy_list[temp_out_list[i]] - tasks[temp_out_list[i]]) * _cpy / _cpy_list[temp_out_list[i]], temp_out_list[i]))
		iterations += 1
		#print iterations
		for i in range(len(tasks)):
			lag = 1.0 * _cpy_list[i] * iterations / _cpy - (_cpy_list[i] - tasks[i])
			if lag > 0:
				misses += math.floor(lag)
	return misses
Example #12
0
class MinimumWeightMatchingWithEdges:
    """Find a minimum weight matching using a greedy method.
    
    Attributes
    ----------
    graph : input undirected graph
    mate : dict with nodes (values are edges or None)
    cardinality : number
    """
    # Bedzie potrzebne do problemu chinskiego listonosza.

    def __init__(self, graph):
        """The algorithm initialization."""
        if graph.is_directed():
            raise ValueError("the graph is directed")
        self.graph = graph
        self.mate = dict((node, None) for node in self.graph.iternodes())
        self.cardinality = 0
        self._pq = PriorityQueue()

    def run(self):
        """Executable pseudocode."""
        for edge in self.graph.iteredges():
            self._pq.put((edge.weight, edge))
        while not self._pq.empty():
            _, edge = self._pq.get()
            if (self.mate[edge.source] is None and 
                self.mate[edge.target] is None):
                    self.mate[edge.source] = edge
                    self.mate[edge.target] = ~edge
                    self.cardinality += 1
def conflicts(board):

    conlist=PriorityQueue()
    clist=[]
    global conflictval,n,m,k
    #consistency check along col,row and grid
    for row in range(int(n)):
        for col in range(int(n)):
            recurval=0
            for c in range(int(n)):
                     #print "row check",board[row][c]
                     if board[row][c] == board[row][col]:
                         recurval=recurval+1
            for r in range(int(n)):
                     if board[r][col] == board[row][col]:
                         recurval=recurval+1
            for r1 in range(int(m)):
                for  c1 in range(int(k)):

                    if (board[r1+row-(row%int(m))][c1+col-(col%int(k))] == board[row][col]):
                        recurval=recurval+1
            if(recurval>3):
                clist.append((row,col))
                conlist.put((recurval,(row,col)))
                #conflictval[(row,col)]=recurval

    return clist
class AStar_Solver:
	def __init__(self, start, goal):
		''' Store the start and goal of program, and set up vars '''
		self.path = []
		self.visitedQueue = []
		self.priorityQueue = PriorityQueue()
		self.start = start
		self.goal = goal

	def Solve(self):
		''' Create start state, then organize children 
		based on value and check children of highest value child '''
		startState = State_String(self.start, 
									0, 
									self.start, 
									self.goal)
		count = 0
		self.priorityQueue.put((0, count, startState))
		while(not self.path and self.priorityQueue.qsize()):
			closestChild = self.priorityQueue.get()[2]
			closestChild.CreateChildren()
			self.visitedQueue.append(closestChild.value)
			for child in closestChild.children:
				if child.value not in self.visitedQueue:
					count += 1
					if not child.dist:
						self.path = child.path
						break
					self.priorityQueue.put((child.dist, count, child))
		if not self.path:
			print ("Goal of " + self.goal + " is not possible!")
		return self.path
Example #15
0
def Astar(start, goal, cost_matrix, strategy='minimize'):
    mindist = min([min(dists.values()) for c, dists in cost_matrix.items()])
    maxdist = max([max(dists.values()) for c, dists in cost_matrix.items()])
    def cost_fn(path):
        if strategy == 'minimize':
            return path_cost(path, cost_matrix) + (len(cost_matrix.keys()) - len(path)) * 2 * mindist
        else:
            return -(path_cost(path, cost_matrix) + (len(cost_matrix.keys()) - len(path)) * 2 * maxdist)
    abort_cost = 1 << 32
    q = PriorityQueue()
    q.put((cost_fn([start]), [start]))
    best_path = []
    while not q.empty():
        total_cost, partial_path = q.get()
        if total_cost > abort_cost:
            break
        if partial_path[-1] == goal and len(partial_path) == len(cost_matrix.keys()):
            if total_cost < abort_cost:
                abort_cost = total_cost
                best_path = partial_path
        else:
            for c in cost_matrix[partial_path[-1]]:
                if c not in partial_path:
                    new_path = partial_path + [c]
                    total_cost = cost_fn(new_path)
                    if total_cost < abort_cost:
                        q.put((total_cost, new_path))
    return best_path
Example #16
0
class AStarSolver(object):
    """docstring for AStarSolver"""
    def __init__(self, start, goal):
        self.path = []
        self.visited_queue = []
        self.priority_queue = PriorityQueue()
        self.start = start
        self.goal = goal

    def solve(self):
        start_state = StateString(
                        self.start,
                        0,
                        self.start,
                        self.goal
                        )
        self.priority_queue.put((0,start_state))
        while not self.path and self.priority_queue.qsize():
            closest_child = self.priority_queue.get()[1]
            closest_child.create_children()
            self.visited_queue.append(closest_child.value)
            for child in closest_child.children:
                if child.value not in self.visited_queue:
                    if not child.dist:
                        self.path = child.path
                        break
                    self.priority_queue.put((child.dist, child))
        if not self.path:
            print "Could not reach the goal: " + self.goal
        return self.path
    def __buildFromDB(self):
        try:
            dbConnection = self.pool.connection()
            cursor = dbConnection.cursor()
            cursor.execute("""select request_spec, id from priority_queue""")
            moreDBElement = True
            while moreDBElement:
                item = cursor.fetchone()
                if item:
                    request_spec = json.loads(item[0])
                    idRecord = item[1]
                    instance_properties = request_spec.get("instance_properties")
                    userId = instance_properties.get("user_id")
                    projectId = instance_properties.get("project_id")
                    timestamp = instance_properties.get("created_at")

                    requestPQ = {
                        "priority": 0,
                        "userId": userId,
                        "projectId": projectId,
                        "timestamp": timestamp,
                        "retryCount": 0,
                        "idRecord": idRecord,
                    }
                    PriorityQueue.put(self, (0, requestPQ))
                else:
                    moreDBElement = False

        except MySQLdb.Error, e:
            if dbConnection:
                dbConnection.rollback()
                LOG.info("Error %d: %s" % (e.args[0], e.args[1]))
Example #18
0
    def find_corners_for_room(self, room_center):
        """
        returns the 4 corners of given room center
        :param room_center:
        :return: [right_up, left_up, left_down, right_down]
        """
        [x, y] = room_center
        prio_q = PriorityQueue()
        [right_up, left_up, left_down, right_down] = [None, None, None, None]
        # find the 4 closest corners
        for corner in self.found_corners:
            dist = Calc.get_dist_from_point_to_point(room_center, corner)
            prio_q.put([dist, corner])
        for i in xrange(4):
            [x_c, y_c] = prio_q.get()[1]
            if x_c < x and y_c > y:
                left_up = [x_c, y_c]
            elif x_c < x and y_c < y:
                left_down = [x_c, y_c]
            elif x_c > x and y_c > y:
                right_up = [x_c, y_c]
            elif x_c > x and y_c < y:
                right_down = [x_c, y_c]

        return [right_up, left_up, left_down, right_down]
Example #19
0
def run(graph,start,goal):

    frontier = PriorityQueue()
    frontier.put((0,start))
    came_from = {}
    came_from[start] = None
    cost_so_far = {}
    cost_so_far[start] = 0

    while not frontier.empty():
        current = frontier.get()[1]

        if current == goal:
            break

        for nextNode in graph.neighbors(current):
            new_cost = cost_so_far[current] + graph.cost(current,nextNode)
            if nextNode not in cost_so_far or new_cost < cost_so_far[nextNode]:
                frontier.put((new_cost,nextNode))
                cost_so_far[nextNode] = new_cost;
                came_from[nextNode] = current

    #for x in came_from:
    #    print (x,came_from[x],),
    print len(came_from)

    current = goal
    path = [current]
    while current != start:
        current = came_from[current]
        path.append(current)
    print path
Example #20
0
class LazyQueue:

    def __init__(self):
        self._queue = PriorityQueue()

    def enq(self, item):
        self._queue.put(item)

    def deq(self):
        if self._queue.empty():
            return None
        else:
            return self._queue.get()

    def __len__(self):
        return self._queue.qsize()

    def mget(self, n):
        if n > self._queue.qsize():
            n = self._queue.qsize()
        lst = []
        for _ in xrange(n):
            lst.append(self._queue.get())

        return lst

    def extend(self, iterable):
        for ele in iterable:
            self.enq(ele)

    def append(self, item):
        self.enq(item)
Example #21
0
def run(boardS, populationS, maxKills, v):
#finds one optimal placement for queens
	generation = PriorityQueue()
	#intital population
	for g in range(populationS): 
		n=getRandomBoard(boardS)
		generation.put((fitness(n),n))

	best=generation.get()
	count=1
	bestL=best
	vari=0
	while best[0]>maxKills:
		if v: print str(best)
		if best==bestL: 
			vari+=1
			if vari>5: 
				change=mutate(best[1], 1)
				#print change
				best=(fitness(change),change)
		else: 
			vari=1
			bestL=best
		generation.put(best)
		generation=newGeneration(generation)
		best=generation.get()
		count+=1
	if v: 
		print "Solution: "+str(best[1]) +" in "+str(count)+" generations"
		print 
	return count
Example #22
0
	def run(self):
		"""
		basically calculate the shortest path with A* search algorithm
		"""

		origin = Node(self.startPose)
		
		#PriorityQueue of nodes
		pq = PriorityQueue()
		currentNode = origin
		
		# Compute the AStar search algorithm
		while not currentNode.pos.equals(self.goalPose) and not rospy.is_shutdown():
			children = self.expandNode(currentNode)
			
			for node in children:
				pq.put((node.cost(), node)) #it is ranked by the cost
			
			if(pq.qsize() == 0): #no more nodes to be expanded
				print "didn't find a path"
				currentNode = origin
				break
				
			# pop the "closest" node to the goal
			currentNode = pq.get()[1]
		
		self.calcPathWayPoints(currentNode) #lastNode
Example #23
0
def find_path(startkey, goalkey, world):

    frontier = PriorityQueue()
    frontier.put({"key": startkey, "cost": 0}, 0)
    came_from = {startkey: None}
    cost_so_far = {startkey: 0}
    step = 0

    while not frontier.empty() and step < 100000:
        step += 1  # just safety
        current = frontier.get()
        if current["key"] == goalkey:
            break

        for neighbor in get_neighbors(current["key"], world):
            new_cost = cost_so_far[current["key"]] + neighbor["cost"]
            if new_cost < cost_so_far.get(neighbor["key"], 10000000):
                cost_so_far[neighbor["key"]] = new_cost
                neighbor["cost"] += heuristic(neighbor["key"], goalkey)
                frontier.put(neighbor)
                came_from[neighbor["key"]] = current["key"]

    if current["key"] != goalkey:
        return None

    key = goalkey
    path = []
    step = 0
    while key and step < 1000:
        key = came_from[key]
        path.insert(0, key)
    return path
def run(graph,start,goal):
    frontier = PriorityQueue()
    frontier.put((0,start))
    came_from = {}
    came_from[start] = None

    while not frontier.empty():
        current = frontier.get()[1]
        if current == goal:
            break
        for next in graph.neighbors(current):
            new_cost = graph.gcost(next,goal)
            if next not in came_from:
                frontier.put((new_cost,next))
                came_from[next] = current

    #for x in came_from:
    #    print x,came_from[x]
    print len(came_from)

    current = goal
    path = [current]
    while current != start:
        current = came_from[current]
        path.append(current)
    print path
Example #25
0
File: event.py Project: nyc/netsim
class EventManager(object):
    """ Keeps track of and triggers events in our network simulation

    Instance Properties:
        Q - A priority queue of events ordered by time
    """

    def __init__(self):
        self._Q = PriorityQueue()
        self.t = 0

    def add_event(self, t, event, args):
        """ Add an event onto the event queue

        :param t: The time of the event
        :param event: The event to add
        :param args: A list of args for the event
        """
        self._Q.put((t, event, args))

    def pop_event(self):
        """ Getting the first event off of the event queue.
        
        :return: Returns a tuple consisting of the time, event, and the args
            for the event
        """
        return self._Q.get()

    def has_events(self):
        """ Whether or not the manager has any more events on its queue

        :return: True or False based on whether its empty
        """
        return not self._Q.empty()
Example #26
0
class AStar_Solver:
    def __init__(self,start,goal):
        self.path = []
        self.visitedQueue = []
        self.priorityQueue = PriorityQueue()
        self.start = start
        self.goal = goal

    def Solve(self):
        startState = State_String(self.start,0,self.start,self.goal)
        count = 0
        self.priorityQueue.put((0,count,startState))
        while(not self.path and self.priorityQueue.qsize()):
            closestChild = self.priorityQueue.get()[2]
            closestChild.CreateChildren()
            self.visitedQueue.append(closestChild.value)
            for child in closestChild.children:
                if child.value not in self.visitedQueue:
                    count +=1
                    if child.value == self.goal:
                        self.path = child.path
                        break
                    self.priorityQueue.put((child.dist,count,child))

        if not self.path:
            print "Goal of ", self.goal, " is not possible for this starting genome!"
        return self.path
Example #27
0
 def travel(self):
     visitedNodes = 0
     optimalPath = []
     priorityQueue = PriorityQueue()
     optimalPathLen = -1
     priorityQueue.put(self.addNode(0, []))
     startTime = time.time()
     while not priorityQueue.empty():
         currentPath = priorityQueue.get()[1]
         if optimalPath:
             pathLen = self.graph.getPathLength(optimalPath)
             upperBound = self.graph.upperBound(self.graph.getPathLength(currentPath), currentPath)
             optimalPathLen = self.graph.getPathLength(optimalPath)
         # Jesli nie mamy sciezki lub jest ona gorsza od UB
         if not optimalPath or upperBound < pathLen:
             visitedNodes += 1
             # Jesli odwiedzono wszystkie miasta
             if len(currentPath) >= self.graph.size:
                 # Jesli mozna wrocic do miasta poczatkowego z ostatniego punktu sciezki
                 if self.graph.distance(currentPath[len(currentPath) - 1], 0) > 0:
                     currentPath = self.addNode(0, currentPath)[1]
                     # Jesli nie mamy sciezki lub zmodyfikowan jest lepsza niz dotychczasowe
                     if not optimalPath or optimalPathLen > self.graph.getPathLength(currentPath):
                         optimalPath = currentPath
             else:
                 for i in range(self.graph.size):
                     # Jesli rozwazane miasto nie bylo odwiedzone i istnieje prowadzaca do niego droga
                     if i not in currentPath and self.graph.distance(currentPath[len(currentPath) - 1], i) > 0:
                         newPath = copy.deepcopy(currentPath)
                         bound, newPath = self.addNode(i, newPath)
                         if not optimalPath or bound < optimalPathLen:
                             priorityQueue.put((bound, newPath))
     return [optimalPath, self.graph.getPathLength(optimalPath), visitedNodes, time.time() - startTime]
Example #28
0
def aStar(initialState, heuristic):
    #stores a list of visited coords, the direction and whether the tile was affected by demolish
    visited = []
    fringe = PriorityQueue()

    #add the start node to the fringe
    fringe.put((-(initialState.score - heuristic(initialState)), initialState))

    while True:

        if fringe.empty():
            print "No Solution"
            return None

        nextState = fringe.get()[1]

        #check if the tile has been affected by demolish
        if len(nextState.actionList) > 0 and nextState.actionList[-1] is nextState.act_demolish:
                if (nextState.posX, nextState.posY, "s") in nextState.demolishedTiles:
                    tileDemolished = "S"
        else:
            if (nextState.posX, nextState.posY) in nextState.demolishedTiles:
                tileDemolished = "D"
            else:
                tileDemolished = "N"

        if nextState.isGoalState():
            #we have reached the goal. Return the relevant stats
            return (nextState.actionList, nextState.score, len(visited))

        elif (nextState.posX, nextState.posY, nextState.direction, tileDemolished) not in visited:
            visited.append((nextState.posX, nextState.posY, nextState.direction, tileDemolished))
            for successorState in nextState.getSuccessors():
                fringe.put((-(successorState.score - heuristic(successorState)), successorState))
Example #29
0
    def getMRV(self):
        
        q = PriorityQueue()
        for blank in self.blanks:
            possible = self.getPossibleValues(blank, True)
	    q.put((len(possible), blank))
        blanks = []
        blanks.append(q.get())
        minVal = blanks[0][0]

        while not q.empty(): #Get all equally-prioritized blanks
            next = q.get()
            if next[0] == minVal:
                blanks.append(next)
            else:
                break
            
        maxDeg = len(self.getNeighborBlanks(blanks[0][1]))
        maxDegBlank = blanks[0]

        for blank in blanks:
            degree = len(self.getNeighborBlanks(blank[1]))
            if degree > maxDeg:
                maxDegBlank = blank
                maxDeg = degree
        return maxDegBlank[1]
Example #30
0
    def a_star(self, start, goal):
        """"""
        # Init priority queue.
        q = PriorityQueue()
        q.put(start, 0)

        came_from = {start: None}
        # The known cost needed to travel from start to a point.
        visited = {start: 0}

        while not q.empty():
            # pop the point with lowest priority(cost).
            # The idea is always that points tends to be closer to goal first.
            p = q.get()

            if p == goal:
                # Reach the goal.
                self.reconstruct_path(came_from, goal)
                return

            for neighbor in self.neighbors(p):
                tentative = visited[p] + self.heuristic(p, neighbor)

                if neighbor not in visited or tentative < visited[neighbor]:
                    came_from[neighbor] = p
                    visited[neighbor] = tentative

                    # Priority is the estimated cost that it took to travel from
                    # neighbor to goal. In this case, it's just the Manhattan
                    # distance assuming that there are no obstacles between them.
                    priority = tentative + self.heuristic(neighbor, goal)
                    q.put(neighbor, priority)
Example #31
0
    node_count = int(Decimal(argv[1]))
    thread_count = int(Decimal(argv[2]))
    coord_file = argv[3]

    # read input
    readCoordinateFile(coord_file)

    # generate and read nidlist
    generateNidList(node_count)
    parseNidList()

    for m in monomers:
        description = "{m1}".format(m1=m.description)
        priority = int(1)
        calc = GamessCalculation(priority, description, nodes=4, ppn=8, qm=[m])
        q.put(calc)

    for dimer in itertools.combinations(monomers, 2):
        description = "{m1}-{m2}".format(m1=dimer[0].description,
                                         m2=dimer[1].description)
        td = 0
        td += distanceBetween(dimer[0], dimer[1])
        priority = int(td)
        calc = GamessCalculation(priority,
                                 description,
                                 nodes=4,
                                 ppn=8,
                                 qm=dimer)
        q.put(calc)

    for trimer in itertools.combinations(monomers, 3):
Example #32
0
 def put(self, item, priority):
     PriorityQueue.put(self, (priority, self.counter, item))
     self.counter += 1
     self.put_counter += 1
Example #33
0
class ConnectionPool(object):
    """
    Container holding the :class:`~elasticsearch.Connection` instances,
    managing the selection process (via a
    :class:`~elasticsearch.ConnectionSelector`) and dead connections.

    It's only interactions are with the :class:`~elasticsearch.Transport` class
    that drives all the actions within `ConnectionPool`.

    Initially connections are stored on the class as a list and, along with the
    connection options, get passed to the `ConnectionSelector` instance for
    future reference.

    Upon each request the `Transport` will ask for a `Connection` via the
    `get_connection` method. If the connection fails (it's `perform_request`
    raises a `ConnectionError`) it will be marked as dead (via `mark_dead`) and
    put on a timeout (if it fails N times in a row the timeout is exponentially
    longer - the formula is `default_timeout * 2 ** (fail_count - 1)`). When
    the timeout is over the connection will be resurrected and returned to the
    live pool. A connection that has been previously marked as dead and
    succeeds will be marked as live (its fail count will be deleted).
    """
    def __init__(self,
                 connections,
                 dead_timeout=60,
                 timeout_cutoff=5,
                 selector_class=RoundRobinSelector,
                 randomize_hosts=True,
                 **kwargs):
        """
        :arg connections: list of tuples containing the
            :class:`~elasticsearch.Connection` instance and it's options
        :arg dead_timeout: number of seconds a connection should be retired for
            after a failure, increases on consecutive failures
        :arg timeout_cutoff: number of consecutive failures after which the
            timeout doesn't increase
        :arg selector_class: :class:`~elasticsearch.ConnectionSelector`
            subclass to use if more than one connection is live
        :arg randomize_hosts: shuffle the list of connections upon arrival to
            avoid dog piling effect across processes
        """
        if not connections:
            raise ImproperlyConfigured("No defined connections, you need to "
                                       "specify at least one host.")
        self.connection_opts = connections
        self.connections = [c for (c, opts) in connections]
        # remember original connection list for resurrect(force=True)
        self.orig_connections = tuple(self.connections)
        # PriorityQueue for thread safety and ease of timeout management
        self.dead = PriorityQueue(len(self.connections))
        self.dead_count = {}

        if randomize_hosts:
            # randomize the connection list to avoid all clients hitting same node
            # after startup/restart
            random.shuffle(self.connections)

        # default timeout after which to try resurrecting a connection
        self.dead_timeout = dead_timeout
        self.timeout_cutoff = timeout_cutoff

        self.selector = selector_class(dict(connections))

    def mark_dead(self, connection, now=None):
        """
        Mark the connection as dead (failed). Remove it from the live pool and
        put it on a timeout.

        :arg connection: the failed instance
        """
        # allow inject for testing purposes
        now = now if now else time.time()
        try:
            self.connections.remove(connection)
        except ValueError:
            # connection not alive or another thread marked it already, ignore
            return
        else:
            dead_count = self.dead_count.get(connection, 0) + 1
            self.dead_count[connection] = dead_count
            timeout = self.dead_timeout * 2**min(dead_count - 1,
                                                 self.timeout_cutoff)
            self.dead.put((now + timeout, connection))
            logger.warning(
                "Connection %r has failed for %i times in a row, putting on %i second timeout.",
                connection,
                dead_count,
                timeout,
            )

    def mark_live(self, connection):
        """
        Mark connection as healthy after a resurrection. Resets the fail
        counter for the connection.

        :arg connection: the connection to redeem
        """
        try:
            del self.dead_count[connection]
        except KeyError:
            # race condition, safe to ignore
            pass

    def resurrect(self, force=False):
        """
        Attempt to resurrect a connection from the dead pool. It will try to
        locate one (not all) eligible (it's timeout is over) connection to
        return to the live pool. Any resurrected connection is also returned.

        :arg force: resurrect a connection even if there is none eligible (used
            when we have no live connections). If force is specified resurrect
            always returns a connection.

        """
        # no dead connections
        if self.dead.empty():
            # we are forced to return a connection, take one from the original
            # list. This is to avoid a race condition where get_connection can
            # see no live connections but when it calls resurrect self.dead is
            # also empty. We assume that other threat has resurrected all
            # available connections so we can safely return one at random.
            if force:
                return random.choice(self.orig_connections)
            return

        try:
            # retrieve a connection to check
            timeout, connection = self.dead.get(block=False)
        except Empty:
            # other thread has been faster and the queue is now empty. If we
            # are forced, return a connection at random again.
            if force:
                return random.choice(self.orig_connections)
            return

        if not force and timeout > time.time():
            # return it back if not eligible and not forced
            self.dead.put((timeout, connection))
            return

        # either we were forced or the connection is elligible to be retried
        self.connections.append(connection)
        logger.info("Resurrecting connection %r (force=%s).", connection,
                    force)
        return connection

    def get_connection(self):
        """
        Return a connection from the pool using the `ConnectionSelector`
        instance.

        It tries to resurrect eligible connections, forces a resurrection when
        no connections are availible and passes the list of live connections to
        the selector instance to choose from.

        Returns a connection instance and it's current fail count.
        """
        self.resurrect()
        connections = self.connections[:]

        # no live nodes, resurrect one by force and return it
        if not connections:
            return self.resurrect(True)

        # only call selector if we have a selection
        if len(connections) > 1:
            return self.selector.select(connections)

        # only one connection, no need for a selector
        return connections[0]

    def close(self):
        """
        Explicitly closes connections
        """
        for conn in self.orig_connections:
            conn.close()
    colorArray = map(lambda x: x.strip(), f.readline().strip().split(","))
    # print colorArray
    # Initial State Assignment
    initAssignment = f.readline().strip().split(",")
    for item in initAssignment:
        nodeName = item.split(":")[0].strip()
        color = item.split(":")[1].split("-")[0].strip()
        player = item.split(":")[1].split("-")[1].strip()
        # colored nodeNames list
        colored.append(nodeName)
        if (player == "1"):
            myNode = Node(nodeName, color, 0, inf, -inf, inf, player)
        if (player == "2"):
            myNode = Node(nodeName, color, 0, -inf, -inf, inf, player)

        initNodes.put(myNode)

    root = colored[-1]
    colorRoot = " "
    colored.remove(root)
    initNodesList = []
    while not initNodes.empty():
        a = initNodes.get()
        initNodesList.append(a)

    for item in initNodesList:
        if (root == item.name):
            colorRoot = item.color
            initNodesList.remove(item)

    # print colorRoot
Example #35
0
class AssetManager(object):
    """Base class for an Asset Manager.

    Args:
        machine: The main ``MachineController`` object.
        config_section: String of the name of the section in the config file
            for the asset settings that this Asset Manager will machine. e.g.
            'image'.
        path_string: The setting in the paths section of the config file that
            specifies what path these asset files are in. e.g. 'images'.
        asset_class: A class object of the base class for the assets that this
            Asset Manager will manage. e.g. Image.
        asset_attribute: The string name that you want to refer to this asset
            collection as. e.g. a value of 'images' means that assets will be
            accessible via ``self.machine.images``.
        file_extensions: A tuple of strings of valid file extensions that files
            for this asset will use. e.g. ``('png', 'jpg', 'jpeg', 'bmp')``

    There will be one Asset Manager for each different type of asset. (e.g. one
    for images, one for movies, one for sounds, etc.)
    """

    def __init__(self, machine, config_section, path_string, asset_class,
                 asset_attribute, file_extensions):

        self.log = logging.getLogger(config_section + ' Asset Manager')
        self.log.info("Initializing...")

        self.machine = machine
        self.max_memory = None
        self.registered_assets = set()
        self.path_string = path_string
        self.config_section = config_section
        self.asset_class = asset_class
        self.file_extensions = file_extensions
        self.loader_queue = PriorityQueue()
        self.loader_thread = None

        self.machine.asset_managers[config_section] = self

        if not hasattr(self.machine, asset_attribute):
            setattr(self.machine, asset_attribute, CaseInsensitiveDict())

        self.asset_list = getattr(self.machine, asset_attribute)

        self.create_loader_thread()

        self.machine.modes.register_load_method(self.load_assets,
                                                self.config_section,
                                                load_key='preload')

        self.machine.modes.register_start_method(self.load_assets,
                                                 self.config_section,
                                                 load_key='mode_start')

        # register & load systemwide assets
        self.machine.events.add_handler('init_phase_4',
                                        self.register_and_load_machine_assets)

        self.defaults = self.setup_defaults(self.machine.config)

    def process_assets_from_disk(self, config, path=None):
        """Looks at a path and finds all the assets in the folder.
        Looks in a subfolder based on the asset's path string.
        Crawls subfolders too. The first subfolder it finds is used for the
        asset's default config section.
        If an asset has a related entry in the config file, it will create
        the asset with that config. Otherwise it uses the default

        Args:
            config: A dictionary which contains a list of asset names with
                settings that will be used for the specific asset. (Note this
                is not needed for all assets, as any asset file found not in the
                config dictionary will be set up with the folder it was found
                in's assetdefaults settings.)
            path: A full system path to the root folder that will be searched
                for assetsk. This should *not* include the asset-specific path
                string. If omitted, only the machine's root folder will be
                searched.
        """

        if not path:
            path = self.machine.machine_path

        if not config:
            config = dict()

        root_path = os.path.join(path, self.path_string)

        self.log.info("Processing assets from base folder: %s", root_path)

        for path, _, files in os.walk(root_path, followlinks=True):

            valid_files = [f for f in files if f.endswith(self.file_extensions)]

            for file_name in valid_files:
                folder = os.path.basename(path)
                name = os.path.splitext(file_name)[0].lower()
                full_file_path = os.path.join(path, file_name)

                if folder == self.path_string or folder not in self.defaults:
                    default_string = 'default'
                else:
                    default_string = folder

                #print "------"
                #print "path:", path
                #print "full_path", full_file_path
                #print "file:", file_name
                #print "name:", name
                #print "folder:", folder
                #print "default settings name:", default_string
                #print "default settings:", self.defaults[default_string]

                built_up_config = copy.deepcopy(self.defaults[default_string])

                for k, v in config.iteritems():

                    if ('file' in v and v['file'] == file_name) or name == k:
                        if name != k:
                            name = k
                            #print "NEW NAME:", name
                        built_up_config.update(config[k])
                        break

                built_up_config['file'] = full_file_path

                config[name] = built_up_config

                self.log.debug("Registering Asset: %s, File: %s, Default Group:"
                               " %s, Final Config: %s", name, file_name,
                               default_string, built_up_config)

        return config

    def register_and_load_machine_assets(self):
        """Called on MPF boot to register any assets found in the machine-wide
        configuration files. (i.e. any assets not specified in mode config
        files.)

        If an asset is set with the load type of 'preload', this method will
        also load the asset file into memory.
        """

        self.log.debug("Registering machine-wide %s", self.config_section)

        if self.config_section in self.machine.config:
            config = self.machine.config[self.config_section]
        else:
            config = None

        self.machine.config[self.config_section] = self.register_assets(
            config=config)

        self.log.debug("Loading machine-wide 'preload' %s", self.config_section)

        # Load preload systemwide assets
        self.load_assets(self.machine.config[self.config_section],
                         load_key='preload')

    def setup_defaults(self, config):
        """Processed the ``assetdefaults`` section of the machine config
        files.

        """

        default_config_dict = dict()

        if 'assetdefaults' in config and config['assetdefaults']:

            if (self.config_section in config['assetdefaults'] and
                    config['assetdefaults'][self.config_section]):

                this_config = config['assetdefaults'][self.config_section]

                # set the default
                default_config_dict['default'] = this_config.pop('default')

                for default_section_name in this_config:

                    # first get a copy of the default for this section
                    default_config_dict[default_section_name] = (
                        copy.deepcopy(default_config_dict['default']))

                    # then merge in this section's specific settings
                    default_config_dict[default_section_name].update(
                        this_config[default_section_name])

        return default_config_dict

    def create_loader_thread(self):
        """Creates a loader thread which will handle the actual reading from
        disk and loading into memory for assets of this class. Note that one
        loader thread is created for each class of assets used in your game.

        Note that this asset loader as a separate *thread*, not a separate
        *process*. It will run on the same core as your main MPF Python
        instance.

        Note that it's possible to call this method multiple times to create
        multiple loader threads, but that will not make things load any faster
        since this process is limited by CPU and disk I/O. In fact if it's a
        magnetic disk, think multiple threads would make it slower.
        """

        self.loader_thread = AssetLoader(name=self.config_section,
                                         queue=self.loader_queue,
                                         machine=self.machine)
        self.loader_thread.daemon = True
        self.loader_thread.start()

    def register_assets(self, config, mode_path=None):
        """Scans a config dictionary and registers any asset entries it finds.

            Args:
                config: A dictionary of asset entries. This dictionary needs to
                    be "localized" to just the section for this particular
                    asset type. e.g. if you're loading "Images" the keys of this
                    dictionary should be image_1, image_2, etc., not "Images".
                mode_path: The full path to the base folder that will be
                    seaerched for the asset file on disk. This folder should
                    *not* include the asset-specific folder. If omitted, the
                    base machine folder will be searched.

        Note that this method merely registers the assets so they can be
        referenced in MPF. It does not actually load the asset files into
        memory.
        """

        # config here is already localized

        config = self.process_assets_from_disk(config=config, path=mode_path)

        for asset in config:

            if not os.path.isfile(config[asset]['file']):
                config[asset]['file'] = self.locate_asset_file(
                    file_name=config[asset]['file'],
                    path=mode_path)

            self.register_asset(asset=asset.lower(),
                                config=config[asset])

        return config

    def load_assets(self, config, mode=None, load_key=None, callback=None,
                    **kwargs):
        """Loads the assets from a config dictionary.

        Args:
            config: Dictionary that holds the assets to load.
            mode: Not used. Included here since this method is registered as a
                mode start handler.
            load_key: String name of the load key which specifies which assets
                should be loaded.
            callback: Callback method which is called by each asset once it's
                loaded.
            **kwargs: Not used. Included to allow this method to be used as an
                event handler.

        The assets must already be registered in order for this method to work.

        """
        # actually loads assets from a config file. Assumes that they've
        # aleady been registered.

        asset_set = set()

        for asset in config:
            if self.asset_list[asset].config['load'] == load_key:
                self.asset_list[asset].load(callback=callback)
                asset_set.add(self.asset_list[asset])

        return self.unload_assets, asset_set

    def register_asset(self, asset, config):
        """Registers an asset with the Asset Manager.

        Args:
            asset: String name of the asset to register.
            config: Dictionary which contains settings for this asset.

        Registering an asset is what makes it available to be used in the game.
        Note that registering an asset is separate from loading an asset. All
        assets will be registered on MPF boot, but they can be loaded and
        unloaded as needed to save on memory.

        """

        #file_name = self.locate_asset_file(config['file'], path)
        #
        ## get the defaults based on the path name
        #this_config = copy.deepcopy(self.defaults[default_config_name])
        #this_config.update(config)

        self.asset_list[asset] = self.asset_class(self.machine, config,
                                                  config['file'], self)

    def unload_assets(self, asset_set):
        """Unloads assets from memory.

        Args:
            asset_set: A set (or any iterable) of Asset objects which will be
                unloaded.

        Unloading an asset does not de-register it. It's still available to be
        used, but it's just unloaded from memory to save on memory.

        """
        for asset in asset_set:
            self.log.debug("Unloading asset: %s", asset.file_name)
            asset.unload()

    def load_asset(self, asset, callback, priority=10):
        """Loads an asset into memory.

        Args:
            asset: The Asset object to load.
            callback: The callback that will be called once the asset has been
                loaded by the loader thread.
            priority: The relative loading priority of the asset. If there's a
                queue of assets waiting to be loaded, this load request will be
                inserted into the queue in a position based on its priority.

        """
        self.loader_queue.put((-priority, asset, callback))
        # priority above is negative so this becomes a LIFO queue
        self.log.debug("Adding %s to loader queue at priority %s. New queue "
                       "size: %s", asset, priority, self.loader_queue.qsize())
        self.machine.num_assets_to_load += 1

    def locate_asset_file(self, file_name, path=None):
        """Takes a file name and a root path and returns a link to the absolute
        path of the file

        Args:
            file_name: String of the file name
            path: root of the path to check (without the specific asset path
                string)

        Returns: String of the full path (path + file name) of the asset.

        Note this method will add the path string between the path you pass and
        the file. Also if it can't find the file in the path you pass, it will
        look for the file in the machine root plus the path string location.

        """

        if path:
            path_list = [path]
        else:
            path_list = list()

        path_list.append(self.machine.machine_path)

        for path in path_list:

            full_path = os.path.join(path, self.path_string, file_name)
            if os.path.isfile(full_path):
                return full_path

        self.log.critical("Could not locate asset file '%s'. Quitting...",
                          file_name)
        raise Exception()

#defines a basic node class
class Node:
    def __init__(self, x_in, y_in, theta_in, id_in, parentid_in, pathcost_in):
        self.x = x_in
        self.y = y_in
        self.theta = theta_in
        self.id = id_in
        self.parentid = parentid_in
        self.pathcost = pathcost_in

    def printme(self):
        print "\tNode id", self.id, ":", "x =", self.x, "y =", self.y, "theta =", self.theta, "parentid:", self.parentid


#initialize the priority queue
q = PriorityQueue()

#insert objects of form (priority, Node)
q.put((1.46, Node(2, 3, 0.3, 1, 0, 0)))  #similar to push
q.put((2.6, Node(5, 2, 0.1, 2, 1, 0)))
q.put((5.6, Node(2, 3, 0.3, 3, 2, 0)))
q.put((0.6, Node(4, 3, 0.2, 4, 1, 0)))

print "Pop elements of queue in order of increasing priority:"
while not q.empty():
    next_item = q.get()  # like pop
    print "Priority:", next_item[0]
    next_item[1].printme()
Example #37
0
class EventHandler(object):
    """ The eventhandler class handles events and timers

    It is reponsible for actually running the simulation
    """
    def __init__(self, network, initialEvents=None):
        """ Constructor for an EventHandler.

        :param network: network.Network object that models the network
        :param initialEvents: List of initial events
        :return:
        """
        self._network = network
        self._queue = PriorityQueue()
        self.time = 0
        if initialEvents:
            for e in initialEvents:
                self._queue.put(e)
        elif network.events:
            for e in network.events:
                self._queue.put(e)
        else:
            print "No events queued"

    def step(self):
        """ Processes one Event from the queue, corresponding to one 'tick'.

        If the queue is empty, this will raise an Empty error.
        :param realtime: [optional] Set to true to simulate in real time
        :param slowdown: [optional] factor to slow down the simulation. Half the simulation rate with .5
        :return: The Event that was just processed.
        """
        # When we get an object from the queue, do not block if empty.
        # Simply raise an Empty exception. This may be changed later.
        event = self._queue.get(block=False)
        simtimer.simtime = event.timestamp

        # Log each event
        logger.log('[%10.3f][%15s] %s' %
                   (event.timestamp, event.__class__, event.logMessage))

        # enqueue new events
        newevents = event.eventObject.processEvent(event)
        for e in newevents:
            self._queue.put(e)
        self.time = event.timestamp
        return event

    def completed(self):
        """ Check whether simulation is completed
        :return: true if all flows are done
        """
        for flow_id in self._network.flows:
            if not self._network.flows[flow_id].done:
                return False
        return True

    def run(self, steps=0):
        """
        :param steps: [optional] Maximum number of steps to take.
            If 0, the simulation runs until completion.
        """

        # If interval is 0 we branch and to avoid calling time.sleep(0)
        if steps == 0:
            while not self._queue.empty():
                self.step()
                if self.completed():
                    break
        else:
            for _ in trange(steps):
                self.step()
                if self.completed():
                    break
Example #38
0
    print("---")

    # 除了按元素入列顺序外,有时需要根据队列中元素的特性来决定元素的处理顺序。
    # 例如,老板的打印任务可能比研发的打印任务优先级更高。PriorityQueue依据队列中内容的排序顺序(sort order)来决定那个元素将被检索。
    class Job(object):
        def __init__(self, priority, description):
            self.priority = priority
            self.description = description
            print('New job:', description)
            return

        def __lt__(self, other):
            return self.priority < other.priority

    q2 = PriorityQueue()
    q2.put(Job(5, 'Mid-level job'))
    q2.put(Job(10, 'Low-level job'))
    q2.put(Job(1, 'Important job'))  # 数字越小,优先级越高

    while not q2.empty():
        next_job = q2.get()  # 可根据优先级取序列
        print('Processing job', next_job.description)


四.跨进程通信队列
  1. multiprocessing.Process 多进程
    '''
    1.from queue import Queue  # 是进程内非阻塞队列
    这个是普通的队列模式,类似于普通列表,先进先出模式,get方法会阻塞请求,直到有数据get出来为止

    2.from multiprocessing import Queue # 是跨进程通信队列(各子进程共有)
Example #39
0
from Queue import PriorityQueue
from collections import defaultdict
from itertools import permutations


graph = defaultdict(dict)
pq = PriorityQueue()

# parse data
with open('09.txt') as f:
    for line in f.readlines():
        edge = tuple(re.findall(r'[A-Z][A-Za-z]+', line))
        cost = int(re.search(r'\d+', line).group())
        graph[edge[0]][edge[1]] = cost
        graph[edge[1]][edge[0]] = cost



for perm in permutations(graph):
    cost = 0
    for edge in zip(perm, perm[1::]):
        cost += graph[edge[0]][edge[1]]
    pq.put(cost, tuple(perm))

print pq.get()

while not pq.empty():
    biggest = pq.get()

print biggest
Example #40
0
emptyblockPos2 = Coordinate(x, y)
#====================================================================================

envObj = Environment(n, Mat, emptyblockPos1,
                     emptyblockPos2)  #Initialize environment
agent = Agent()  #Initialize Agent
print("Board = ", envObj.mat.M)
print("======================================")
actionList = ['left', 'right', 'up', 'down']  #Action Space

#---------------------Push Initial States-----------------------------
state = State(copy.copy(envObj.mat), copy.copy(envObj.emptyblockPos1),
              copy.copy(envObj.emptyblockPos2))
h = manhattanHeuristic(envObj, state)
curr = QueueNode(state, "None", None, 0, h)
pq.put(curr)  # rows and columns from 0 to n-1
#--------------------------------------------------------------------

#===================================A*===============================================
while (not pq.empty()):
    curr = pq.get()
    # update environment matrix with new matrix of state <curr>
    envObj.mat = curr.state.mat.getCopy()
    if (agent.isExplored(curr.state)):
        continue
    # if requried state is found, then break
    if (agent.getPerception(envObj)
        ):  #If agent has reache the goal break the loop
        print("Reached")
        destination = curr
        break
Example #41
0
class Spider(Req):
    def __init__(self, site, timeout, delay, cookie, depth, threads):
        super(Spider, self).__init__(site, timeout, delay, cookie, threads)
        self.depth = depth
        self.visited = []
        self.found = []
        self.tasks_queue = PriorityQueue()

    def get_page_content(self, url):
        '''
        获取网页源代码,如果content-type不是html,或者content-type>1M不读取内容
        '''
        r = 0
        r = self.send_http(url)
        if r != 0 and r.status == 200 and r.getheader('content-length') != self.not_found_page_length:
            self.pool.threadLock.acquire()
            print '[!]' + url.encode('utf-8')
            self.pool.threadLock.release()
            self.found.append(url)
            if r.getheader('content-type') or r.getheader('content-type').find('html') != 1:
                return r.read()
            elif r.getheader('content-length') and int(r.getheader('content-length') < 102400):
                return r.read()
            else:
                logger.info('%s content-length %s is too long' % (url, r.getheader('content-length')))

    def get_all_links(self, content):
        a_tag = self.get_a_links(content)
        all_links = a_tag
        return all_links

    def get_a_links(self, content):
        """
        获取href属性中的url
        """
        links = []
        soup = BeautifulSoup(content, 'html.parser')
        a = soup.find_all('a', attrs={'href': re.compile('.*')})
        for link in a:
            links.append(link['href'])
        return links

    def filter_links(self, url, links):
        """
        过滤链接包括非本域下、相对路径、非http
        :param url: 当前访问url
        :param links: 页面中全部链接
        :return: 过滤后有效链接
        """
        filtered_links = []
        for link in links:
            o = urlparse(link)
            if (self.site_parse[1].find(o[1]) > 0 or (o[0] == '' and o[1] == '' and o[2] != '')):
                ret = urljoin(url, link)
                filtered_links.append(ret)
        return filtered_links

    def crawl_page(self, url, depth):
        """
        爬取页面,获取链接
        :param url: 爬取目标url
        :param depth: 深度
        :return: 有效链接
        """
        if depth < 1:
            print '[-]%s' % url.encode('utf-8')
            return
        result = self.get_page_content(url)
        if not result:
            return
        links = self.get_all_links(result)
        links = self.filter_links(url, links)
        for link in links:
            self.tasks_queue.put((random.randint(1, 500), (link, depth-1)), True, 5)

    def get_visited(self):
        return self.visited

    def start(self):
        print '[%s] Start Spider...' % (time.strftime('%H:%M:%S'))
        self.tasks_queue.put((1, (self.site, self.depth)))
        while True:
            try:
                p, (url, depth) = self.tasks_queue.get(True, 1)
            except Empty, e:
                if self.pool.undone_tasks():
                    continue
                else:
                    break
            if url not in self.visited:
                self.pool.spawn(self.crawl_page, *(url, depth))
                self.visited.append(url)
                fuzz_urls.put(url)
        print '[%s] Stop Spider' % time.strftime('%H:%M:%S')
        print '[%s] %s Founded' % (time.strftime('%H:%M:%S'), len(self.found))
        result.spider = self.visited
Example #42
0
def main():
    global lucene_vm_init
    if not lucene_vm_init:
        lucene.initVM(vmargs=['-Djava.awt.headless=true'])
        lucene_vm_init = True

    is_index_Exist = os.path.exists(LUCENE_INDEX_DIR)
    # specify index path
    index_mm = MMapDirectory(Paths.get(LUCENE_INDEX_DIR))

    # configure search engine
    analyzer = StandardAnalyzer()
    config = IndexWriterConfig(analyzer)

    # load index to search engine
    reader = DirectoryReader.open(index_mm)
    searcher = IndexSearcher(reader)

    # read query
    read_query()

    # initialize mongodb client
    mongoObj = Mongo_Object('localhost', 27017)

    # initialize word2vec
    print 'load word2vec model'
    w2vmodel = gensim.models.Word2Vec.load_word2vec_format(
        "F:\\modified_w2v\\w2v_wiki_trigram_phrase_20170101\\wiki.en.text.vector.binary",
        binary=True)
    print 'finish loading word2vec model'

    # search
    global hitsPerPage
    fields = ['name', 'value']
    #parser=MultiFieldQueryParser(fields,analyzer)
    #parser.setDefaultOperator(QueryParserBase.AND_OPERATOR)
    rec_result = open('pylucene.runs', 'w')

    for i in range(len(queries)):
        query = queries[i]
        print 'processing query ' + str(i) + ':' + query[0]
        querystr = remove_duplicate(stemSentence(query[1]))
        #q_lucene=MultiFieldQueryParser.parse(parser,querystr)
        q_lucene = QueryParser("all_text", analyzer).parse(querystr)
        print "q_lucene: " + q_lucene.toString()
        collector = TopScoreDocCollector.create(hitsPerPage)
        searcher.search(q_lucene, collector)
        hits = collector.topDocs().scoreDocs

        # build query object for computeScore
        queryObj = Query_Object(query, mongoObj, w2vmodel)

        # initialize duplicate remover
        docDup = set()

        # find candidate results after 1st round filter
        candidates = PriorityQueue()
        for j in range(len(hits)):
            docID = hits[j].doc
            d = searcher.doc(docID)
            name = cleanSentence(d['title'].strip())
            if name in docDup:
                continue
            docDup.add(name)
            # build entity object
            entityObj = Entity_Object(d, mongoObj, w2vmodel)
            score = computeScore(queryObj, entityObj, mongoObj, w2vmodel)
            #score=hits[j].score
            candidates.put((-score, j))

        # output results from priority queue larger score first
        rank = 0
        while candidates.empty() == False and rank < 100:
            rank = rank + 1
            item = candidates.get()
            score = -item[0]
            j = item[1]  # index of hits[]
            docID = hits[j].doc
            d = searcher.doc(docID)
            title = '<dbpedia:' + d.get('title') + '>'
            res_line = query[0] + '\t' + 'Q0' + '\t' + title + '\t' + str(
                rank) + '\t' + str(score) + '\t' + 'pylucene_multifield'
            rec_result.writelines(res_line + '\n')
    rec_result.close()
Example #43
0
from Queue import PriorityQueue

t = int(raw_input())  # read a line with a single integer
for x in xrange(1, t + 1):
    N, K = [int(s) for s in raw_input().split(" ")
            ]  # read a list of integers, 2 in this case

    pq = PriorityQueue()
    pq.put((-1 * long(N), 0))

    for i in range(0, K - 1):
        #print "Person:", i
        gap_tuple = pq.get()
        gap = -1 * gap_tuple[0]
        start = gap_tuple[1]
        #print type(gap)
        #print "Gap:", gap, "Starting after:", start
        g1 = (-1 * ((gap - 1) / 2), start)
        g2 = (-1 * (gap - 1 - ((gap - 1) / 2)), start + ((gap - 1) / 2) + 1)
        pq.put(g1)
        pq.put(g2)
        #if (g1[0]!=0) :
        #if (g2[0]!=0) :

    fg = pq.get()
    gap = -1 * fg[0]
    a = (gap - 1) / 2
    b = (gap - 1 - ((gap - 1) / 2))
    print "Case #{}: {} {}".format(x, max(a, b), min(a, b))
    #print max(a,b), min(a,b)
Example #44
0
class ThreadedExecutor(Executor):
    """\
    This executor provides a method of executing callables in a threaded worker
    pool. The number of outstanding requests can be limited by the ``maxsize``
    parameter, which has the same behavior as the parameter of the same name
    for the ``PriorityQueue`` constructor.

    All threads are daemon threads and will remain alive until the main thread
    exits. Any items remaining in the queue at this point may not be executed!
    """

    def __init__(self, worker_count=1, maxsize=0):
        self.__worker_count = worker_count
        self.__workers = set([])
        self.__started = False
        self.__queue = PriorityQueue(maxsize)
        self.__lock = threading.Lock()

    def __worker(self):
        queue = self.__queue
        while True:
            priority, (function, future) = queue.get(True)
            if not future.set_running_or_notify_cancel():
                continue
            try:
                result = function()
            except Exception:
                future.set_exception_info(*sys.exc_info()[1:])
            else:
                future.set_result(result)
            queue.task_done()

    def start(self):
        with self.__lock:
            if self.__started:
                return

            for i in xrange(self.__worker_count):
                t = threading.Thread(target=self.__worker)
                t.daemon = True
                t.start()
                self.__workers.add(t)

            self.__started = True

    def submit(self, callable, priority=0, block=True, timeout=None):
        """\
        Enqueue a task to be executed, returning a ``TimedFuture``.

        Tasks can be prioritized by providing a value for the ``priority``
        argument, which follows the same specification as the standard library
        ``Queue.PriorityQueue`` (lowest valued entries are retrieved first.)

        If the worker pool has not already been started, calling this method
        will cause all of the worker threads to start running.
        """
        if not self.__started:
            self.start()

        future = self.Future()
        task = (priority, (callable, future))
        try:
            self.__queue.put(task, block=block, timeout=timeout)
        except Full as error:
            if future.set_running_or_notify_cancel():
                future.set_exception(error)
        return future
Example #45
0
            if token not in token_count_dict:
                token_count_dict[token] = 1
            else:
                token_count_dict[token] += 1

        if line_count % 1000 == 0:
            print "building dictionary: " + str(line_count)

        line_count += 1

#pick the most frequent tokens from all tokens
from Queue import PriorityQueue

q = PriorityQueue()
for t in token_count_dict:
    q.put([-token_count_dict[t], t])

token_dict = {}
#add special token
token_dict[zero_token] = 0
token_dict[unknown_token] = 1
token_dict[start_token] = 2
token_dict[end_token] = 3
token_index = 4

token_count_dict = {}

#priority queue
while (not q.empty()):
    get = q.get_nowait()
    if (token_index == max_dict_size):
Example #46
0
class Pool:
    """Represents a thread pool"""

    def __init__(self, workers = max_workers, rate_limit = 1000):
        self.max_workers = workers
        self.mutex       = Semaphore()
        self.results     = {}
        self.retries     = defaultdict(int)
        self.queue       = PriorityQueue()
        self.threads     = []
        self.rate_limit  = rate_limit

    def _tick(self):
        time.sleep(1.0/self.rate_limit)
        # clean up finished threads
        self.threads = [t for t in self.threads if t.isAlive()]
        return (not self.queue.empty()) or (len(self.threads) > 0)


    def _loop(self):
        """Handle task submissions"""

        def run_task(priority, f, uuid, retries, args, kwargs):
            """Run a single task"""
            try:
                t.name = getattr(f, '__name__', None)
                result = f(*args, **kwargs)
            except Exception as e:
                # Retry the task if applicable
                if log:
                    log.error(traceback.format_exc())
                if retries > 0:
                    with self.mutex:
                        self.retries[uuid] += 1
                    # re-queue the task with a lower (i.e., higher-valued) priority
                    self.queue.put((priority+1, dumps((f, uuid, retries - 1, args, kwargs))))
                    self.queue.task_done()
                    return
                result = e
            with self.mutex:
                self.results[uuid] = dumps(result)
                self.retries[uuid] += 1
            self.queue.task_done()

        while self._tick():
            # spawn more threads to fill free slots
            log.warn("Running %d/%d threads" % (len(self.threads),self.max_workers))
            if len(self.threads) < self.max_workers:
                log.debug("Queue Length: %d" % self.queue.qsize())
                try:
                    priority, data = self.queue.get(True, 1.0/self.rate_limit)
                except Empty:
                    continue
                f, uuid, retries, args, kwargs = loads(data)
                t = Thread(target=run_task, args=[priority, f, uuid, retries, args, kwargs])
                t.setDaemon(True)
                self.threads.append(t)
                t.start()
        log.debug("Exited loop.")
        for t in self.threads:
            t.join()


    def stop(self):
        """Flush the job queue"""
        self.queue = PriorityQueue()


    def start(self, daemonize=False):
        """Pool entry point"""

        self.results = {}
        self.retries = defaultdict(int)

        if daemonize:
            t = Thread(target = self._loop, args=[self])
            t.setDaemon(True)
            t.start()
            return
        else:
            self._loop()
Example #47
0
class KafkaStimuliConsumer(Thread):

    def __init__(self, cpstw):
        Thread.__init__(self)
        self.cpstw = cpstw
        self._kafka_consumer = None
        self.running = False
        self._pq = PriorityQueue()
        self._stimulus_issuer = None
        self._sleeper = Sleep()

    def __start_stimulus_issuer(self):
        self._stimulus_issuer = StimulusIssuer(self.cpstw, self._pq, 2, self._sleeper)
        self._stimulus_issuer.start()

    def __stop_stimulus_issuer(self):
        if self._stimulus_issuer is not None:
            self._stimulus_issuer.stop()

    def run(self):
        self.running = True
        self._kafka_consumer = KafkaConsumer(bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,
                                             auto_offset_reset='latest',
                                             consumer_timeout_ms=1000)
        self._kafka_consumer.subscribe([KAFKA_STIMULI_TOPIC])

        self.__start_stimulus_issuer()

        while self.running:
            for message in self._kafka_consumer:
                # Unmarshalling
                json_msg = json.loads(message.value)
                timestamp = json_msg["timestamp"]
                twin_name = json_msg["twin_name"]
                if twin_name in self.cpstw:
                    twin = self.cpstw[twin_name]
                    stimulus = None
                    if isinstance(twin, Plc) or isinstance(twin, Hmi):
                        tag_name = json_msg["tag_name"]
                        value = json_msg.get("value")
                        stimulus = TagStimulus(timestamp, twin_name, tag_name, value)
                    elif isinstance(twin, RfidReaderMqttWiFi):
                        value = json_msg["value"]
                        stimulus = RfidStimulus(timestamp, twin_name, value)
                    else:
                        logger.error(
                            "Could not replicate state, because [type=%s] of [twin=%s] is not supported.".format(
                                type(twin), twin_name))
                    # Add incoming stimulus to queue
                    if stimulus is not None:
                        self._pq.put((stimulus.timestamp, stimulus))
                        # Wake up stimuli issuer from sleeping
                        self._sleeper.wake()
                    else:
                        logger.error(
                            "Could not replicate state, [twin=%s] is unknown.".format(twin_name))

        self._kafka_consumer.close()
        logger.info("Kafka stimuli consumer terminated.")

    def stop(self):
        if self.running:
            logger.info("Stopping Kafka stimuli consumer...")
            if self._kafka_consumer is not None:
                self.__stop_stimulus_issuer()
                self.running = False
        else:
            logger.info("Kafka stimuli consumer not started. Nothing to stop!")
Example #48
0
class DijkstraPlanner(CellBasedForwardSearch):
    def __init__(self, title, occupancyGrid):
        CellBasedForwardSearch.__init__(self, title, occupancyGrid)
        self.priorityQueue = PriorityQueue()
        self.frontier = None
        self.frontierGot = False

    # This method determines if a cell is a frontier cell or not. A
    # frontier cell is open and has at least one neighbour which is
    # unknown.
    def isFrontierCell(self, x, y):

        # Check the cell to see if it's open
        if self.occupancyGrid.getCell(x, y) != 0:
            return False

        # Check the neighbouring cells; if at least one of them is unknown, it's a frontier
        return self.checkIfCellIsUnknown(x, y, -1, -1) | self.checkIfCellIsUnknown(x, y, 0, -1) \
            | self.checkIfCellIsUnknown(x, y, 1, -1) | self.checkIfCellIsUnknown(x, y, 1, 0) \
            | self.checkIfCellIsUnknown(x, y, 1, 1) | self.checkIfCellIsUnknown(x, y, 0, 1) \
            | self.checkIfCellIsUnknown(x, y, -1, 1) | self.checkIfCellIsUnknown(x, y, -1, 0)

    def checkIfCellIsUnknown(self, x, y, offsetX, offsetY):
        newX = x + offsetX
        newY = y + offsetY
        return (newX >= 0) & (newX < self.occupancyGrid.getWidthInCells()) \
            & (newY >= 0) & (newY < self.occupancyGrid.getHeightInCells()) \
            & (self.occupancyGrid.getCell(newX, newY) == 0.5)

    # Put the cell on the queue, using the path cost as the key to
    # determine the search order
    def pushCellOntoQueue(self, cell):

        if (cell.parent is not None):
            # Work out the cost of the action from the parent to self cell
            d = self.computeLStageAdditiveCost(cell.parent, cell)
            cell.pathCost = cell.parent.pathCost + d
        else:
            cell.pathCost = 0

        self.priorityQueue.put((cell.pathCost, cell))

    # Check the queue size is zero
    def isQueueEmpty(self):
        return self.priorityQueue.empty()

    # Simply pull from the front of the list
    def popCellFromQueue(self):
        tuple = self.priorityQueue.get()
        return tuple[1]

    def resolveDuplicate(self, cell, parentCell):

        # See if the cost from the parent cell to this cell is shorter
        # than the existing path. If so, use it instead.
        dX = cell.coords[0] - parentCell.coords[0]
        dY = cell.coords[1] - parentCell.coords[1]
        d = math.sqrt(dX * dX + dY * dY)
        pathCostThroughNewParent = parentCell.pathCost + d
        if (pathCostThroughNewParent < cell.pathCost):
            cell.parent = parentCell
            cell.pathCost = pathCostThroughNewParent
            self.reorderPriorityQueue()

    # Reorder the queue. I don't see another way to do this, other than
    # create a new queue and copy over tuple-by-tuple. This rebuilds
    # the heap trees.
    def reorderPriorityQueue(self):
        newQueue = PriorityQueue()

        while self.priorityQueue.empty() is False:
            tuple = self.priorityQueue.get()
            newQueue.put(tuple)

        self.priorityQueue = newQueue

    # search the closest frontier
    def searchFrontier(self, start, blackList):

        # change the pose to the coords
        startCoords = self.occupancyGrid.getCellCoordinatesFromWorldCoordinates(
            [start.x, start.y])

        self.handleChangeToOccupancyGrid()

        # Make sure the queue is empty. We do this so that we can keep calling
        # the same method multiple times and have it work.
        while (self.isQueueEmpty() == False):
            self.popCellFromQueue()

        # Check the start and end are not occupied. Note that "0.5" means
        # "don't know" which is why it is used as the threshold for detection.
        if (self.occupancyGrid.getCell(startCoords[0], startCoords[1]) > 0.5):
            return False

        # Get the start cell object and label it as such. Also set its
        # path cost to 0.
        self.start = self.searchGrid.getCellFromCoords(startCoords)

        #if self.start.label is CellLabel.OBSTRUCTED:
        #    return False

        self.start.pathCost = 0

        #if self.goal.label is CellLabel.OBSTRUCTED:
        #    return False

        if rospy.is_shutdown():
            return False

        # Insert the start on the queue to start the process going.
        self.markCellAsVisitedAndRecordParent(self.start, None)
        self.pushCellOntoQueue(self.start)

        # Reset the count
        self.numberOfCellsVisited = 0

        # initialize return variable
        self.frontierGot = False
        self.frontier = None

        # Iterate until we have run out of live cells to try or we reached the goal
        while (self.isQueueEmpty() == False):

            # Check if ROS is shutting down; if so, abort. This stops the
            # planner from hanging
            if rospy.is_shutdown():
                return False

            cell = self.popCellFromQueue()
            # check if the cell is frontier and not the start
            if (self.isFrontierCell(cell.coords[0], cell.coords[1]) == True
                    and cell.coords != startCoords):
                flag = False
                # if the cell is not in the blacklist, it is the new destination
                for k in range(0, len(blackList)):
                    if blackList[k] == cell.coords:
                        flag = True
                        break
                if not flag:
                    self.frontier = cell.coords
                    self.frontierGot = True
                    break

            cells = self.getNextSetOfCellsToBeVisited(cell)
            for nextCell in cells:
                if (self.hasCellBeenVisitedAlready(nextCell) == False):
                    self.markCellAsVisitedAndRecordParent(nextCell, cell)
                    self.pushCellOntoQueue(nextCell)
                    self.numberOfCellsVisited = self.numberOfCellsVisited + 1
                else:
                    self.resolveDuplicate(nextCell, cell)

            # Now that we've checked all the actions for this cell,
            # mark it as dead
            self.markCellAsDead(cell)

        return self.frontierGot, self.frontier
Example #49
0
class SteelBranchBound():
    def __init__(self,
                 steel_rtn,
                 opt_model,
                 branch_option=LEAD_FOLLOWER_ALL_STAGES):
        # todo reduce lp-solve-num, look at lp infeasibility
        # todo lp num, consider lead task for all stages ...
        self.opt_model = opt_model
        self.steel_rtn = steel_rtn
        self.cplex_lp_prob = solve_schedule.convert_to_cplex(opt_model)

        self.q_relax = PriorityQueue()
        self.q_integer = PriorityQueue()
        self.best_int_obj = sys.maxint
        self.branch_option = branch_option
        if branch_option == LEAD_FOLLOWER_EACH_STAGE:
            self.follower_offset = self.get_follower_offset_each_stage(
                steel_rtn)
        elif branch_option == LEAD_FOLLOWER_ALL_STAGES:
            self.follower_offset = self.get_follower_offset_all_stages(
                steel_rtn)
        self.group2caster = self._assign_caster()
        self.lead2follower = dict()

        self.count_lp = 0
        self.count_bb_node = 0
        self.obj_int_iter = dict()
        self.obj_relax_iter = dict()

    def solve_bb(self):
        t1 = time.time()

        # time range [) for task to start
        task_start_rng = dict()
        for task in range(1, self.steel_rtn.num_tasks + 1):
            task_start_rng[task] = (-1, -1)
        initial_result = self.solve_lp(task_start_rng)
        self.check_solution(initial_result['obj'], initial_result['xx'],
                            initial_result['yy'], task_start_rng)

        schedule_guess = util_search_schedule.find_feasible_task_rng(
            self.steel_rtn)
        result_guess = self.solve_lp(schedule_guess)
        if result_guess is not None:
            self.check_solution(result_guess['obj'], result_guess['xx'],
                                result_guess['yy'], schedule_guess)
        else:
            log.error('No relaxation solution.')
            # return None

        while not self.q_relax.empty():
            if time.time() - t1 > BB_MAX_CPU:
                break
            best_relax = self.q_relax.get()
            node = {'obj': best_relax[0], 'start_rng': best_relax[2]}
            self.obj_relax_iter[self.count_lp] = best_relax[0]
            if self.best_int_obj - node['obj'] < BB_OBJ_INTEGER_EPSILON:
                log.info(
                    'b+b terminates as upper lower bounds are close %.3f v.s. %.3f'
                    % (self.best_int_obj, node['obj']))
                break
            else:
                for node_i in self.branch(node):
                    result = self.solve_lp(node_i['start_rng'])
                    if result is None:
                        log.debug('Infeasible LP start_rng %s' %
                                  node_i['start_rng'])
                        continue
                    self.check_solution(result['obj'], result['xx'],
                                        result['yy'], node_i['start_rng'])

        cpu_time = time.time() - t1
        log.info('best integer objective %.6f' % self.best_int_obj)
        log.info('compute time: %.3f s' % cpu_time)
        log.info('lp solve num = %d' % self.count_lp)
        log.info('generated bb node num = %d' % self.count_bb_node)
        log.info('q_relax remained node num = %d' % self.q_relax.qsize())
        self.draw_iter()
        # with open(case_name+'_iter.json','w+') as f:
        #     json.dump({'q1':self.debug_q, 'q2_int':self.debug_q_int},f, indent=2)
        return self.q_integer.get()[2]

    def check_solution(self, obj, xx, yy, start_rng):
        compare = (np.absolute(xx) < BB_VAR_INTEGER_EPSILON) | (np.absolute(
            [x - 1 for x in xx]) < BB_VAR_INTEGER_EPSILON)
        is_integer = compare.all()
        if is_integer:
            self.q_integer.put((obj, self.count_lp, {
                'xx': sparse.coo_matrix(xx),
                'yy': sparse.coo_matrix(yy),
                'start_rng': start_rng,
                'obj': obj
            }))
            self.best_int_obj = min(obj, self.best_int_obj)
            self.obj_int_iter[self.count_lp] = self.best_int_obj
        else:
            self.q_relax.put((obj, self.count_lp, start_rng))

        # todo add a better rounding method
        if self.count_bb_node > 500 and self.count_bb_node % 500 == 1:
            log.warn('count_bb_node = %d' % self.count_bb_node)
            log.warn('q_relax.qsize = %d' % self.q_relax.qsize())
        if self.count_bb_node > 10**10:
            log.warn('count_bb_node = %d' % self.count_bb_node)
            log.warn('too long : queue size %d' % self.q_relax.qsize())
            self.q_relax = PriorityQueue()
            return True

        log.debug('%d int:%s obj %.1f start_rnt: %s' %
                  (self.count_lp, is_integer, obj, json.dumps(start_rng)))

        return is_integer

    def solve_lp(self, task_start_range):
        x_up = self.get_x_up(task_start_range, self.opt_model, self.steel_rtn)
        self.cplex_lp_prob.variables.set_upper_bounds(
            zip(range(len(x_up)), x_up))
        self.cplex_lp_prob.solve()
        self.count_lp += 1
        status = self.cplex_lp_prob.solution.get_status()
        if status == self.cplex_lp_prob.solution.status.optimal:
            return {
                'obj':
                self.cplex_lp_prob.solution.get_objective_value(),
                'xx':
                self.cplex_lp_prob.solution.get_values()
                [0:self.opt_model.num_x],
                'yy':
                self.cplex_lp_prob.solution.get_values()[self.opt_model.num_x:]
            }
        else:
            # log.error('LP solve status %s' % status)
            return None

    def branch(self, node):
        # todo add more branch options
        return self._branch_by_lead_task(node)

    def _branch_by_lead_task(self, node):
        """Branch by lead tasks.
        Consider the start time ranges for lead task and its followers all-together in one bb node.
        Lead task: the first heat in each group.
        """
        start_rng = node['start_rng']
        # if any task hasn't been considered, then consider this task
        if self.branch_option == LEAD_FOLLOWER_EACH_STAGE:
            for stage in [1, 2, 3]:
                task_cat = self.steel_rtn.stage2units[str(stage)].keys()[0]
                for group, heats in self.steel_rtn.group2heats.items():
                    task1 = self.steel_rtn.tasks[task_cat][heats[0] - 1]
                    if start_rng[task1][0] < 0:
                        split_task = [task1]
                        for idx in range(1, len(heats)):
                            task_i = self.steel_rtn.tasks[task_cat][heats[idx]
                                                                    - 1]
                            split_task.append(task_i)
                        self.lead2follower[task1] = split_task
                        return self._branch_node_batch_tasks(node, split_task)
        elif self.branch_option == LEAD_FOLLOWER_ALL_STAGES:
            task_type = 'EAF'
            for group_, heats in self.steel_rtn.group2heats.items():
                task_lead = self.steel_rtn.tasks[task_type][heats[0] - 1]
                if start_rng[task_lead][0] < 0:
                    task_array = []
                    for stage in [1, 2, 3]:
                        task_type = self.steel_rtn.stage2units[str(
                            stage)].keys()[0]
                        for idx in range(len(heats)):
                            task_i = self.steel_rtn.tasks[task_type][heats[idx]
                                                                     - 1]
                            task_array.append(task_i)
                    self.lead2follower[task_lead] = task_array
                    return self._branch_node_batch_tasks(node, task_array)
        # caster has been assigned ahead
        for group in range(self.steel_rtn.num_groups):
            caster = self.group2caster[group + 1]
            task = self.opt_model.steel_rtn.tasks[caster][group]
            if start_rng[task][0] < 0:
                self.lead2follower[task] = task
                return self._branch_node_batch_tasks(node, task)
        # all leading tasks have been considered, then narrow their start time range
        [split_task, rng_max] = self._widest_range(self.lead2follower.keys(),
                                                   start_rng)
        # narrow the leader task or narrow all tasks
        if rng_max > BB_BRANCH_SWITCH_THRESHOLD:
            return self._branch_node_batch_tasks(
                node, self.lead2follower[split_task])
        else:
            [split_task, rng_max
             ] = self._widest_range(range(1, self.steel_rtn.num_tasks + 1),
                                    start_rng)
            if rng_max > 1:
                return self._branch_node_batch_tasks(node, split_task)
            else:
                log.error('[warn] No Freedom' + str(start_rng))
                return []

    def _branch_node_batch_tasks(self, node, split_task):
        # todo delete isinstance checking
        if isinstance(split_task, list):
            task1 = split_task[0]
        else:
            task1 = split_task

        start_rng = node['start_rng']
        if start_rng[task1][0] >= 0 and start_rng[task1][
                1] <= self.opt_model.num_t:
            time_pair = start_rng[task1]
        else:
            time_pair = (0, self.opt_model.num_t)
        # branch into two nodes
        start_rng_1 = start_rng.copy()
        start_rng_2 = start_rng.copy()
        middle = int(math.floor(sum(time_pair) / 2))
        start_rng_1[task1] = (time_pair[0], middle)
        start_rng_2[task1] = (middle, time_pair[1])
        # also restrict the start time ranges for the follower tasks
        if isinstance(split_task, list):
            for task_i in split_task[1:]:
                offset = self.follower_offset[task_i]
                start_rng_1[task_i] = (start_rng_1[task1][0] + offset[0],
                                       start_rng_1[task1][1] + offset[1])
                start_rng_2[task_i] = (start_rng_2[task1][0] + offset[0],
                                       start_rng_2[task1][1] + offset[1])

        self.count_bb_node += 1
        node_1 = {
            'obj': node['obj'],
            'count': self.count_bb_node,
            'start_rng': start_rng_1
        }
        self.count_bb_node += 1
        node_2 = {
            'obj': node['obj'],
            'count': self.count_bb_node,
            'start_rng': start_rng_2
        }
        return [node_1, node_2]

    def draw_iter(self):
        plt.figure()
        obj_int = [0] * (self.count_lp + 1)
        obj_relax = [0] * (self.count_lp + 1)
        best_int = max(self.obj_int_iter.values())
        best_relax = min(self.obj_relax_iter.values())
        for i in range(self.count_lp + 1):
            if i in self.obj_int_iter:
                best_int = self.obj_int_iter[i]
            if i in self.obj_relax_iter:
                best_relax = self.obj_relax_iter[i]
            obj_int[i] = best_int
            obj_relax[i] = best_relax
        plt.plot(range(self.count_lp + 1), obj_int)
        plt.plot(range(self.count_lp + 1), obj_relax)
        plt.show()

    @staticmethod
    def get_follower_offset_each_stage(steel_rtn):
        """Get the time offsets between lead task and its followers.
        Lead task: the first heat in each group.
        Follower task: the other heats in each group.

        Lead and follower tasks are in the same process stage.
        For RTN1.
        """
        offset = dict()
        for stage in [1, 2, 3]:
            for group, heats in steel_rtn.group2heats.items():
                task_cat = steel_rtn.stage2units[str(stage)].keys()[0]
                num_equip = steel_rtn.stage2units[str(stage)].values()[0]
                equip_offset = [0] * num_equip
                delays = 0
                for idx in range(len(heats)):
                    heat = heats[idx]
                    task = steel_rtn.tasks[task_cat][heat - 1]
                    equip_id = idx % num_equip
                    if idx == 0:
                        offset[task] = (0, 0)
                        delays += steel_rtn.task_duration[task]
                    elif idx < num_equip:
                        offset[task] = (0, delays)
                        delays += steel_rtn.task_duration[task]
                    elif idx >= num_equip:
                        task_pre = steel_rtn.tasks[task_cat][heat - 1 -
                                                             num_equip]
                        task_pre_len = steel_rtn.task_duration[task_pre]
                        start = offset[task_pre][0] + task_pre_len
                        end = offset[task_pre][1] + task_pre_len
                        offset[task] = (start, end)
                    equip_offset[equip_id] += steel_rtn.task_duration[task]
        return offset

    @staticmethod
    def get_follower_offset_all_stages(steel_rtn):
        """Get the time offsets between lead task and its followers.
        Lead task: the first heat in each group.
        Follower task: the other heats in each group.

        Lead task is the first heat in the first process stage.
        The other heats in all the first three stages are the followers.
        For RTN1.
        """
        offset = SteelBranchBound.get_follower_offset_each_stage(steel_rtn)
        for stage in [2, 3]:
            task_cat = steel_rtn.stage2units[str(stage)].keys()[0]
            for group, heats in steel_rtn.group2heats.items():
                for idx in range(len(heats)):
                    heat = heats[idx]
                    task_process = steel_rtn.tasks[task_cat][heat - 1]
                    if task_process in offset:
                        del offset[task_process]
                    task_trans = task_process - steel_rtn.num_heats
                    min_trans_time = steel_rtn.task_duration[task_trans]
                    max_trans_time = int(
                        math.ceil(steel_rtn.time_trans_max['TR_S%d' %
                                                           (stage - 1)] /
                                  steel_rtn.rtn_t0))
                    # todo a smaller max_trans_time reduces the branches
                    max_trans_time = min_trans_time
                    task_pre_stage = steel_rtn.tasks[steel_rtn.stage2units[str(
                        stage - 1)].keys()[0]][heat - 1]
                    offset[task_process] = (offset[task_pre_stage][0] +
                                            min_trans_time,
                                            offset[task_pre_stage][1] +
                                            max_trans_time)
        return offset

    @staticmethod
    def get_x_up(start_rng, opt_model, steel_rtn):
        x_up = [1] * opt_model.num_x
        for task_cat, task_list in steel_rtn.tasks.items():
            for task in task_list:
                if task not in start_rng or start_rng[task] == (-1, -1):
                    continue
                for t in range(opt_model.num_t):
                    if t not in range(start_rng[task][0], start_rng[task][1]):
                        x_up[opt_model.pos_x_task_t(task, t)] = 0
                # restrict its parallel tasks
                if 'CC' in task_cat:
                    g_idx = task - steel_rtn.tasks[task_cat][0]
                    for caster in steel_rtn.stage2units['4'].keys():
                        if caster == task_cat:
                            continue
                        cast_task = steel_rtn.tasks[caster][g_idx]
                        for t_ in range(steel_rtn.num_t):
                            x_up[opt_model.pos_x_task_t(cast_task, t_)] = 0
        return x_up

    @staticmethod
    def _widest_range(considered_tasks, task_start_rng):
        split_task = -1
        width_max = -1
        for task in considered_tasks:
            if task_start_rng[task][0] < 0:
                continue
            width = task_start_rng[task][1] - task_start_rng[task][0]
            if width_max < width:
                width_max = width
                split_task = task
        return [split_task, width_max]

    @staticmethod
    def _assign_caster():
        # todo a better caster-group assignment method
        group2caster = {
            1: 'CC2',
            2: 'CC2',
            3: 'CC2',
            4: 'CC1',
            5: 'CC2',
            6: 'CC1'
        }
        return group2caster
Example #50
0
def aborted_dijkstra(origin_node, boundary_nodes, this_region_only=False,
                     on_forward_graph=True):
    # maintain set of boundary  nodes that have been visited by this search
    visited_boundary_nodes = set()
    visited_nodes = set()
    i = origin_node.boundary_node_id

    # Initialize Dijkstra queue with the origin node
    nodes_to_search = PriorityQueue()
    nodes_to_search.put((0, origin_node))

    expanded_count = 0
    max_pq_size = 0

    while(not nodes_to_search.empty()):
        # Get the nearest node from the priority queue
        max_pq_size = max(nodes_to_search.qsize(), max_pq_size)
        (_, node) = nodes_to_search.get()
        expanded_count += 1

        if boundary_nodes is not None:
            visited_nodes.add(node)
        # If this is a boundary node for this region, mark it as visited
        if(boundary_nodes is not None and node.is_boundary_node and
           node.region_id == origin_node.region_id):
            visited_boundary_nodes.add(node)
            # If we have now visited all boundary nodes, stop early
            if len(visited_boundary_nodes) == len(boundary_nodes):
                break
        connecting_links = None
        if on_forward_graph:
            connecting_links = node.backward_links
        else:
            connecting_links = node.forward_links
        # Propagate to neighbors on the forward graph using the backward links
        for connecting_link in connecting_links:
            neighbor = None
            if on_forward_graph:
                neighbor = connecting_link.origin_node
            else:
                neighbor = connecting_link.connecting_node
            # if this_region_only is set, then skip nodes from other regions
            if(this_region_only and
               neighbor.region_id != origin_node.region_id):
                continue
            time_from_boundary_node = None
            neighbor_time = None
            if on_forward_graph:
                time_from_boundary_node = node.forward_boundary_time
                neighbor_time = neighbor.forward_boundary_time
            else:
                time_from_boundary_node = node.backward_boundary_time
                neighbor_time = neighbor.backward_boundary_time
            # Compute the distance if we were to travel to the neighbor from
            # the current node
            proposed_distance = (time_from_boundary_node[i] +
                                 connecting_link.time)
            # If this is better than the current best path to the neighbor,
            # update it (relaxation)
            if(proposed_distance < neighbor_time[i]):
                neighbor_time[i] = proposed_distance
                # since the distance was updated, this node needs to be
                # re-added to the PQ
                nodes_to_search.put((proposed_distance, neighbor))

    # Now, all origin nodes (and some other nodes) all know their distance from
    # the given origin_node
    return visited_nodes, expanded_count, max_pq_size
def ep_lindyn_mg(env,
                 theta,
                 F,
                 b,
                 nb_day,
                 nb_ep_per_day,
                 pqueue_in=None,
                 step=0,
                 log=None):
    """
    Does a linear dyna variation from Sutton et al. (2012) with replay.

    env - The environment to use
    theta - The weight vector to compute V(Phi)
    F - The transition tables from Phi to Phi', one per action
    b - A reward matrix which gives for each values of phi and an action the
        expected reward. For instance, if the 32 place cell is at the center of
        the environment, and action 8 is "going south", then because it's
        forbidden to go south at the center of the enviroment, b[32][8] will
        converge to -10. b is somewhat the Q(s, a) matrix.
    nb_day - number of "days" before ending the training. Days can also be
             understood as the number of replay sessions.
    nb_ep_per_day - number of time to do the task before going into "sleep mode".
                    The task is done `nb_day` * `nb_ep_per_day` in total.
    replay_max - Number of experienced feature activations to replay before
                 waking up.
    log - A list in which every place cells activation is recorded, along with
          the position of the agent and the position of the goal. While the
          agent sleeps, only the feature which is reactivated is logged.

    See Sutton et al. 2012 Dyna-Style Planning with Linear Function
    Approximation and Prioritized Sweeping for details about the algorithm (it
    is the algorithm 3 in the article).
    """
    if pqueue_in:
        pqueue = pqueue_in
    else:
        pqueue = PriorityQueue()
    for day in range(nb_day):
        for episode in range(nb_ep_per_day):
            print("day", day, ", episode", episode)
            if log is not None:
                log.append("session_begin")
            env.reinit()
            while not env.end:
                step += 1
                alpha = alpha_0 * (N_0 + 1) / (N_0 + step)
                phi = env.get_features()
                #print("theta")
                #print(theta)
                q = np.array([-np.inf for i in env.action])  # Q of Q-learning
                for a in env.possible_actions(
                ):  # The impossible actions stay to -inf
                    q[a] = np.inner(b[a], phi) + gamma * np.inner(
                        theta.T, np.dot(F[a], phi))
                a = softmax(q, 20, straight_bias=False)
                phi_n, r = env.do_action(a)
                delta = r + gamma * np.inner(theta, phi_n) - np.inner(
                    theta, phi)
                theta = theta + alpha * delta * phi
                F[a] = F[a] + alpha * np.outer(
                    (phi_n - np.dot(F[a], phi)), phi)
                b[a] = b[a] + alpha * (r - np.inner(b[a], phi)) * phi
                for i in range(len(phi)):
                    if phi[i] != 0:
                        pqueue.put((-np.abs(delta * phi[i]), i))

                has_replayed = False
                if log is not None:
                    log.append([
                        env.get_features(),
                        np.copy(env.pos),
                        np.copy(env.goals[0]), theta
                    ])
                    if env.p > 0:
                        log.append("sleep")
                        has_replayed = True
                # Replay
                p = env.p  # Number of replay max
                while not pqueue.empty() and p > 0:
                    unused_prio, i = pqueue.get()
                    if log is not None:
                        activation = np.zeros(env.pc.nb_place_cells)
                        activation[i] = 1
                        log.append([
                            activation,
                            np.copy(env.pos),
                            np.copy(env.goals[0]), theta
                        ])
                    for j in range(F.shape[2]):

                        if np.any(F[:, i, j] != 0) or np.any(
                                F[:, j, i] != 0
                        ):  # Utilisation des lieux futurs possibles mais aussi lieux passées possibles, ne devrait pas gêner convergence
                            #raw_input()
                            delta = -np.inf
                            for a in range(len(env.action)):
                                cur = b[a][j] + gamma * np.inner(
                                    theta, F[a, j, :]) - theta[j]
                                if cur > delta:
                                    delta = cur
                            theta[j] = theta[j] + alpha * delta
                            pqueue.put((-np.abs(delta), j))
                    p -= 1
                if log is not None and has_replayed:
                    log.append("end")
    return theta, b, F, pqueue, step
Example #52
0
#Incomplete
from Queue import PriorityQueue


def makeDict(string):
    char_dict = {}
    for i in string:
        if i in char_dict:
            char_dict[i] += 1
        else:
            char_dict[i] = 1
    return char_dict


filename = "test_huffmanCoding.txt"

with open(filename) as f:
    for test in f:
        freq = makeDict(test.strip())
        tree = PriorityQueue(len(freq))
        print freq
        for key in freq:
            tree.put((key, freq[key]))
        while not tree.empty():
            print tree.get()
Example #53
0
    def beam_search_decode(self, src, src_len, sos_idx, beam_width=3):
        trg = torch.zeros(
            (100, src.shape[1])).long().fill_(sos_idx).to(self.device)

        batch_size = src.shape[1]
        max_len = trg.shape[0]
        topk = 1
        trg_vocab_size = self.decoder.output_dim

        encoder_outputs, hidden = self.encoder(src, src_len)

        output = trg[0, :]
        mask = self.create_mask(src)

        # beam search part
        endnodes = []
        number_required = min((topk + 1), topk - len(endnodes))

        # starting node - hidden vector, previous node, word id, logp, len
        node = BeamSearchNode(hidden, None, output, 0, 1)
        nodes = PriorityQueue()

        nodes.put((-node.eval(), node))
        qsize = 1

        while True:
            # give up when decoding takes too long
            if qsize > 2000: break

            # fetch the best node
            score, cur = nodes.get()
            decoder_input = cur.word_idx
            decoder_hidden = cur.hidden
            # print "word idx: %s\tscore: %.4f" % (cur.word_idx.item(), score)

            if cur.word_idx.item() == EOS_IDX and cur.pre_node != None:
                endnodes.append((score, cur))
                if len(endnodes) >= number_required: break
                else: continue

            decoder_output, decoder_hidden, attention = self.decoder(
                decoder_input, decoder_hidden, encoder_outputs, mask)

            log_prob, indexes = torch.topk(
                F.log_softmax(decoder_output, dim=1), beam_width)
            # print 'log_prob: %s\nindexes: %s' % (log_prob, indexes)

            for new_k in range(beam_width):
                decoded_t = indexes[0][new_k].view(-1)
                log_p = log_prob[0][new_k].item()

                node = BeamSearchNode(decoder_hidden, cur, decoded_t,
                                      cur.logp + log_p, cur.len + 1)

                score = -node.eval()

                # put new nodes into the queue
                nodes.put((score, node))
            qsize += beam_width - 1

        if len(endnodes) == 0:
            endnodes = [nodes.get() for _ in range(topk)]

        utterances = []
        decoded_batch = []
        for score, n in sorted(endnodes, key=operator.itemgetter(0)):
            utterance = []
            utterance.append(n.word_idx)

            # back tracking
            while n.pre_node != None:
                n = n.pre_node
                utterance.append(n.word_idx)

            utterance = utterance[::-1]
            utterance = [t.item() for t in utterance]
            utterances.append(utterance)

        decoded_batch.append(utterances)

        return decoded_batch
Example #54
0
            expanded_maarray.mask[index] = 1
            if currentMap.data[index] < 50 and currentMap.data[index] != -1:
                unexplored_maarray.mask[index] = 0
            else:
                unexplored_maarray.mask[index] = 1

    addToFrontier(start_x, start_y)

    print "Creating Priority Queues"
    frontier = PriorityQueue()
    expanded = PriorityQueue()

    # add start node
    print "Creating Start Node"
    current = Node(0, start_x, start_y, None)
    frontier.put((0, current))

    while 1 and not rospy.is_shutdown():
        try:
            current = frontier.get()[1]
        except Empty, e:
            print "Goal position is unreachable"
            return AstarResponse(None)

        print "i_x: %s" % current.i_x
        print "i_y: %s" % current.i_y
        print "cost: %s" % current.cost
        print "dist_cost: %s" % current.dist_cost

        if current.i_x == goal_x and current.i_y == goal_y:
            print "Goal node reached at (%s, %s)" % (current.i_x, current.i_y)
Example #55
0
class TaskManager:
    """
    Listens for new tasks and broadcasts highest priority job to executer
    """
    def __init__(self):
        print("new TaskManager")
        global pub

        rospy.init_node('Manager', anonymous=True)
        rospy.Subscriber("task_m", Task, self.subscriber)
        if pub is None:
            pub = rospy.Publisher('task_e', Task, queue_size=1)

        self.current_tasks = PriorityQueue()
        self.current_task = None

        self.publish_next_task()

    def __str__(self):
        o = "\nTask Manager:\n|\tCurrent Task -\n|\t\t" + str(
            self.current_task) + "\n|\tOther Tasks -"
        for task in self.current_tasks.queue:
            o = o + "\n|\t\t" + str(task)

        return o + "\n"

    def add_task(self, task):
        print("tm | add_task\t" + str(task) + "\n")
        self.current_tasks.put(task)
        print(str(self))

    def update_priorities(self):
        updated_priorities_queue = PriorityQueue()

        for task in self.current_tasks.queue:
            if task is not None:
                task.update_priority()
                updated_priorities_queue.put(task)
        self.current_tasks = updated_priorities_queue

    def publish_next_task(self):
        # if self.current_task is not None:
        #    self.add_task(self.current_task)

        if self.current_tasks.empty():
            self.current_task = tt.Wander()
        else:
            self.current_task = self.current_tasks.get()

        print(self.current_task.priority)
        if self.current_task.priority > 0:
            self.current_tasks.put(self.current_task)
            self.current_task = tt.Wander()
            rospy.logwarn("positive priority, replacing with wander")

        t = self.current_task.to_msg()
        r = rospy.Rate(0.2)
        r.sleep()
        pub.publish(t)
        print(self)

    def subscriber(self, task_msg):
        priority_task = tt.from_msg(task_msg)
        if task_msg.finished:
            print("tm | subscriber\tfinished")
            if priority_task == self.current_task:
                self.current_task = None
            self.update_priorities()
            self.publish_next_task()
        else:
            self.add_task(priority_task)
Example #56
0
class Device(object):
    """
    Class that represents a device.
    """
    def __init__(self, device_id, sensor_data, supervisor):
        """
        Constructor.

        @type device_id: Integer
        @param device_id: the unique id of this node; between 0 and N-1

        @type sensor_data: List of (Integer, Float)
        @param sensor_data: a list containing (location, data) as measured by this device

        @type supervisor: Supervisor
        @param supervisor: the testing infrastructure's control and validation component
        """
        self.device_id = device_id
        self.sensor_data = sensor_data
        self.supervisor = supervisor
        self.threads_no = 8
        self.scripts = PriorityQueue(
        )  # Current timepoint scripts + End-of-Timepoint scripts
        self.future_scripts = PriorityQueue(
        )  # Scripts already executed in the current timepoint
        self.neighbours = []
        self.neighbours_lock = Lock(
        )  # Structures which ensure that only one thread
        self.no_neighbours = True  # will get the neighbours per device
        self.timepoint_barr = None  # Guarantees synchronization between threads
        # at the end of a timepoint

        self.timepoint_lock = Lock(
        )  # Structures which ensure that only one thread
        self.next_timepoint = False  # will reset properties of a device for the next timepoint
        self.location_locks = {
        }  # Guarantees that only one thread will have access
        # to all data of a certain location
        self.setup_barr = None  # Guarantees synchronization of threads
        # once the devices are set up
        self.threads = []
        for _ in xrange(self.threads_no):
            self.threads.append(DeviceThread(self))

    def __str__(self):
        """
        Pretty prints this device.

        @rtype: String
        @return: a string containing the id of this device
        """
        return "Device %d" % self.device_id

    def setup_devices(self, devices):
        """
        Setup the devices before simulation begins.
        Assures device communication - devices will share the same instances of objets
        location_locks, timepoint_barr and setup_barr.

        @type devices: List of Device
        @param devices: list containing all devices
        """

        if self.device_id != 0:
            return

        devices_no = len(devices)
        timepoint_barr = ReusableBarrier(self.threads_no * devices_no)
        setup_barr = SimpleBarrier(self.threads_no * devices_no)
        location_locks = {}

        for device in devices:
            for location in device.sensor_data:
                location_locks[location] = RLock()

        for device in devices:
            device.timepoint_barr = timepoint_barr
            device.setup_barr = setup_barr
            device.location_locks = location_locks

            for i in xrange(self.threads_no):
                device.threads[i].start()

    def assign_script(self, script, location):
        """
        Provide a script for the device to execute.
        Regular scripts have to be executed first in a timepoint, even though the device
        has received an End-of-Timepoint script. Thus, the priority of regular scripts
        is 0 and End-of-Timepoint scripts have 1.
        An End-of-Timepoint script is sent per device. This method sends one End-of-Timepoint
        script per thread.

        @type script: Script
        @param script: the script to execute from now on at each timepoint; None if the
            current timepoint has ended

        @type location: Integer
        @param location: the location for which the script is interested in
        """

        if script is not None:
            self.scripts.put((0, script, location))
        else:
            for _ in xrange(self.threads_no):
                self.scripts.put((1, None, None))

    def get_data(self, location):
        """
        Returns the pollution value this device has for the given location.
        The thread which calls this method will acquire the lock for the
        given location (or will wait for it to be released), so that
        no other thread from any device will be able to concurently access
        this data.

        @type location: Integer
        @param location: a location for which obtain the data

        @rtype: Float
        @return: the pollution value
        """
        if location in self.sensor_data:
            self.location_locks[location].acquire()
            return self.sensor_data[location]
        return None

    def set_data(self, location, data):
        """
        Sets the pollution value stored by this device for the given location.
        This method is called by a thread once the processing of data for a certain
        location is done. The thread will have to release the lock it has, allowing
        other waiting threads to acquire it.

        @type location: Integer
        @param location: a location for which to set the data

        @type data: Float
        @param data: the pollution value
        """
        if location in self.sensor_data:
            self.sensor_data[location] = data
            self.location_locks[location].release()

    def get_neighbours(self):
        """
        Allows only one thread per device to get the neighbours of a device
        at a certain timepoint.
        """
        with self.neighbours_lock:
            if self.no_neighbours:
                self.next_timepoint = True
                self.no_neighbours = False
                self.neighbours = self.supervisor.get_neighbours()

    def advance_timepoint(self):
        """
        Allows only one thread per devive to reset properties of a device
        at the end of a timepoint.
        """
        with self.timepoint_lock:
            if self.next_timepoint:
                self.no_neighbours = True
                self.next_timepoint = False

                # Put proccessed scripts back in the queue for future runs
                for q_elem in self.future_scripts.queue:
                    self.scripts.put(q_elem)
                self.future_scripts = PriorityQueue()

    def shutdown(self):
        """
        Instructs the device to shutdown (terminate all threads). This method
        is invoked by the tester. This method must block until all the threads
        started by this device terminate.
        """
        for i in xrange(self.threads_no):
            self.threads[i].join()
class ExplorerNode(ExplorerNodeBase):

    def __init__(self):
        ExplorerNodeBase.__init__(self)

        self.blackList = []
	self.checkForDuplicateFrontiers = []
	self.listOfFrontiers = PriorityQueue()
	self.weightPriorityQueue = PriorityQueue()
	
	self.transferQueue = PriorityQueue()
	
	self.numberOfWaypoints = 0
	
	self.currentOdometrySubscriber = rospy.Subscriber('/robot0/odom', Odometry, self.odometryCallback)
	self.currentRobotPose = Pose2D()
	
    def updateFrontiers(self):
	pass

    def popFrontierFromQueue(self):
	frontier = self.listOfFrontiers.get()
	return frontier[1]

    def popWeightedFrontierFromQueue(self):
	weightedFrontier = self.weightPriorityQueue.get()
	return weightedFrontier[1]
    
    def pushFrontierOnToQueue(self, frontierLength, frontierList):
    	self.listOfFrontiers.put((frontierLength * (-1), frontierList))

    def pushWeightedFrontierOnToQueue(self, Weight, frontierList):
    	self.weightPriorityQueue.put((Weight, frontierList))
    
    def pushTransfer(self, frontierLength, frontierList):
    	self.transferQueue.put((frontierLength * (-1), frontierList))

    def popTransfer(self):
	frontier = self.transferQueue.get()
	return frontier[1]
    
    def findingMiddleCellOfAFrontier(self, frontierList):
	print('The initial plan was to go to that cell:')
	print(frontierList[0])
	xTotal = 0
	yTotal = 0
	averageX = 0
	averageY = 0
	
	thresholdPythagoras = float('inf')

	totalNumberOfElementInFrontierList = len(frontierList)
	for cells in frontierList:
		xTotal += cells[0]
		yTotal += cells[1]
	averageX = xTotal / totalNumberOfElementInFrontierList
	averageY = yTotal/ totalNumberOfElementInFrontierList
	
	for cellInList in frontierList:
		if sqrt((cellInList[0] - averageX)**2 + (cellInList[1] - averageY)**2) <  thresholdPythagoras:
			theNewCellIs = (cellInList[0], cellInList[1])
			thresholdPythagoras = sqrt((cellInList[0] - averageX)**2 + (cellInList[1] - averageY)**2)
	print('Now it is choosing that one: ')
	print(theNewCellIs)
	return theNewCellIs
	
 		
    def angleToTurnWeight(self, robotAngle, robotX, robotY, cellToGo):
	deltaX = cellToGo[0] - robotX
	deltaY = cellToGo[1] - robotY
	angleToCellToGo = atan2(deltaY, deltaX)
	
	if (deltaX < 0 and deltaY > 0) or (deltaX < 0 and deltaY < 0):
		angleToCellToGo += math.pi
	
	delta = angleToCellToGo - robotAngle
	if delta < -math.pi:
		delta += 2.0 * math.pi
	elif delta > math.pi:
		delta -= 2.0 * math.pi

	print('THEDIFFERENCE IN ANGLE IS')
	print(math.degrees(delta))
	#dx1 = currentCell[0] - cellToGo[0]
	#dy1 = currentCell[0] - cellToGo[0]
	#dx2 = previousCell[0] - currentCell[0]
	#dy2 = previousCell[0] - currentCell[0]
	#angle = abs(dx1 * dx2 - dx2 * dy1)
	return math.degrees(delta)

    def distanceToFrontierWeight(self, robotPose, cellToGo):
	dX = cellToGo[0] - robotPose[0]
	dY = cellToGo[1] - robotPose[1]
	distance = sqrt(dX * dX + dY * dY)
	print('THE DISTANCE IS')
	print(distance)
	return distance

    def updateTheCheckFrontierList(self, deletedFrontier):
	#print('THE DELETED FRONTIER IS:')
	#print(deletedFrontier)
	#print('the alreadyCheckedFrontierlist is :')
	#print(self.checkForDuplicateFrontiers)
	#for alreadyCheckedFrontierCells in self.checkForDuplicateFrontiers :
	#	print('the alreadyCheckedFrontierCells is :')
	#	print(alreadyCheckedFrontierCells)
		#if deletedFrontier == alreadyCheckedFrontierCells:
			#	self.checkForDuplicateFrontiers.remove(alreadyCheckedFrontierCells)
	#			print('the alreadyCheckedFrontierlist is noow:')
	#			print(self.checkForDuplicateFrontiers)
	pass

    def checkForDuplicate(self, frontierToCheck):
	elem_to_find = frontierToCheck
	res1 = any(elem_to_find in sublist for sublist in self.checkForDuplicateFrontiers)
	return res1
   
    def odometryCallback(self, odometry):
        odometryPose = odometry.pose.pose

        pose = Pose2D()

        position = odometryPose.position
        orientation = odometryPose.orientation
        
        pose.x = position.x
        pose.y = position.y
        pose.theta = 2 * atan2(orientation.z, orientation.w)
	self.currentRobotPose = pose


    def updateFrontiersCell(self, nextCandidate):
	self.neighbouringFrontierCellsList = []
	self.alreadyCheckedFrontiersList = []
	self.neighbouringFrontierCellsList.append(nextCandidate)
	
	while (len(self.neighbouringFrontierCellsList) != 0):
		nextFrontierCandidate = self.neighbouringFrontierCellsList[0]
		#print('NeighbouringFrontierCellsList before pop')
		#print(self.neighbouringFrontierCellsList)
		self.neighbouringFrontierCellsList.pop(0)
		#print('NeighbouringFrontierCellsList after pop')
		#print(self.neighbouringFrontierCellsList)
		#print('There is next candidate')
		#print(nextFrontierCandidate)
		self.alreadyCheckedFrontiersList.append(nextFrontierCandidate)
		#print('the already checked list is after appending nextfrontiercandidate')
		#print(self.alreadyCheckedFrontiersList)
		
		for cells in self.getNextSetOfCellsToBeVisited(nextFrontierCandidate):
			if self.isFrontierCell(cells[0], cells[1]) == True and cells not in self.alreadyCheckedFrontiersList and cells not in self.neighbouringFrontierCellsList:
				#print('the cell appending is')
				#print(cells)
				self.neighbouringFrontierCellsList.append(cells)

	if self.checkForDuplicate(self.alreadyCheckedFrontiersList[0]) == False:
		self.pushFrontierOnToQueue(len(self.alreadyCheckedFrontiersList), self.alreadyCheckedFrontiersList)

		self.checkForDuplicateFrontiers.append(self.alreadyCheckedFrontiersList)	
	
	
    def chooseNewDestination(self):
	self.temporaryTransfer = []
	
	self.coverageNumerator = 0
	self.TotalCellsToCover = 100 * 75
	

	robotX = self.currentRobotPose.x
	robotY = self.currentRobotPose.y
	robotAngle = self.currentRobotPose.theta
	robotPose = (robotX, robotY)
	
        candidateGood = False
        destination = None
      	
        for x in range(0, self.occupancyGrid.getWidthInCells()):
            for y in range(0, self.occupancyGrid.getHeightInCells()):
                candidate = (x, y)
		
		if self.occupancyGrid.getCell(candidate[0], candidate[1]) == 1 or self.occupancyGrid.getCell(candidate[0], candidate[1]) == 0:
			self.coverageNumerator += 1
		
		if self.occupancyGrid.getCell(candidate[0], candidate[1]) == 1 and candidate not in self.blackList:
			self.blackList.append(candidate)
		if self.isFrontierCell(x, y) == True:
			candidateGood = True
			for k in range(0, len(self.blackList)):
				if self.blackList[k] == candidate:
					candidateGood = False
					break
			
			if candidateGood is True:
				self.updateFrontiersCell(candidate)

	#while self.listOfFrontiers.empty() == False:
		#take = self.popFrontierFromQueue() 
	#	self.temporaryTransfer.append(take)
		#self.pushTransfer(len(take), take)
	
	#print('The Initial List of Frontiers is :')
	#print(self.temporaryTransfer)
	#print(len(self.temporaryTransfer))
	#self.temporaryTransfer = []
	
	#while self.transferQueue.empty() == False:
		#take = self.popTransfer()
		#self.pushFrontierOnToQueue(len(take), take)
	

	while self.listOfFrontiers.empty() == False:
		candidateFrontier = self.popFrontierFromQueue() 
		nextCellToGo = self.findingMiddleCellOfAFrontier(candidateFrontier)
		length = len(candidateFrontier)
		angle = self.angleToTurnWeight(robotAngle, robotX, robotY, nextCellToGo)
		distance = self.distanceToFrontierWeight(robotPose, nextCellToGo)
		Weight = -1 * angle + 6 * distance + 4 * length
		self.pushWeightedFrontierOnToQueue(Weight * (-1), candidateFrontier)
	i = 0

	while self.weightPriorityQueue.empty() == False:
		isItTheHighestWeightedFrontier = self.popWeightedFrontierFromQueue()
		if i == 0:
			candidateGood = True
			destination = self.findingMiddleCellOfAFrontier(isItTheHighestWeightedFrontier)
			#self.updateTheCheckFrontierList(isItTheHighestWeightedFrontier)
			i += 1
			self.numberOfWaypoints += 1
		elif destination == (3,38):
			destination = isItTheHighestWeightedFrontier[0]
		else:
			self.pushFrontierOnToQueue(len(isItTheHighestWeightedFrontier), isItTheHighestWeightedFrontier)
	
	 
		
	#while self.listOfFrontiers.empty() == False:
	#	take = self.popFrontierFromQueue() 
	#	self.temporaryTransfer.append(take)
	#	self.pushTransfer(len(take), take)
	
	#print('The Final list of Frontiers is :')
	#print(self.temporaryTransfer)
	#print(len(self.temporaryTransfer))
	#self.temporaryTransfer = []
	
	#while self.transferQueue.empty() == False:
	#	take = self.popTransfer()
	#	self.pushFrontierOnToQueue(len(take), take)
		
      #lalalla
	

	print('The number of waypoints visited is:')
	print(self.numberOfWaypoints)
	
	coverage = (float(self.coverageNumerator) / self.TotalCellsToCover) * 100
	print("The Current Coverage is: {}\n %".format(coverage))
	
	self.checkForDuplicateFrontiers = []
	
	while self.listOfFrontiers.empty() == False:
		caca = self.popFrontierFromQueue()

        return candidateGood, destination

    def destinationReached(self, goal, goalReached):
        if goalReached is False:
#             print 'Adding ' + str(goal) + ' to the naughty step'
            self.blackList.append(goal)

    def getNextSetOfCellsToBeVisited(self, cell):

        # self stores the set of valid actions / cells
        cells = list();

        # Go through all the neighbours and add the cells if they
        # don't fall outside the grid and they aren't the cell we
        # started with. The order has been manually written down to
        # create a spiral.
        self.pushBackCandidateCellIfValid(cell, cells, 0, -1)
        self.pushBackCandidateCellIfValid(cell, cells, 1, -1)
        self.pushBackCandidateCellIfValid(cell, cells, 1, 0)
        self.pushBackCandidateCellIfValid(cell, cells, 1, 1)
        self.pushBackCandidateCellIfValid(cell, cells, 0, 1)
        self.pushBackCandidateCellIfValid(cell, cells, -1, 1)
        self.pushBackCandidateCellIfValid(cell, cells, -1, 0)
        self.pushBackCandidateCellIfValid(cell, cells, -1, -1)

        return cells

    # This helper method checks if the robot, at cell.coords, can move
    # to cell.coords+(offsetX, offsetY). Reasons why it can't do this
    # include falling off the edge of the map or running into an
    # obstacle.
    def pushBackCandidateCellIfValid(self, cell, cells, offsetX, offsetY):
        newX = cell[0] + offsetX
        newY = cell[1] + offsetY
        extent = self.occupancyGrid.getExtentInCells()
        if ((newX >= 0) & (newX < extent[0]) \
            & (newY >= 0) & (newY < extent[1])):
            newCell = (newX, newY)
            cells.append(newCell)
Example #58
0
def symbolic_generation(dataset, sensitive_param, model_path, cluster_num,
                        limit):
    """
    The implementation of symbolic generation
    :param dataset: the name of dataset
    :param sensitive_param: the index of sensitive feature
    :param model_path: the path of testing model
    :param cluster_num: the number of clusters to form as well as the number of
            centroids to generate
    :param limit: the maximum number of test case
    """
    data = {"census": census_data, "credit": credit_data, "bank": bank_data}
    data_config = {"census": census, "credit": credit, "bank": bank}

    # the rank for priority queue, rank1 is for seed inputs, rank2 for local, rank3 for global
    rank1 = 5
    rank2 = 1
    rank3 = 10
    T1 = 0.3

    # prepare the testing data and model
    X, Y, input_shape, nb_classes = data[dataset]()
    arguments = gen_arguments(data_config[dataset])
    model = dnn(input_shape, nb_classes)
    x = tf.placeholder(tf.float32, shape=input_shape)
    y = tf.placeholder(tf.float32, shape=(None, nb_classes))
    preds = model(x)
    tf.set_random_seed(1234)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    sess = tf.Session(config=config)
    saver = tf.train.Saver()
    model_path = model_path + dataset + "/test.model"
    saver.restore(sess, model_path)

    # store the result of fairness testing
    global_disc_inputs = set()
    global_disc_inputs_list = []
    local_disc_inputs = set()
    local_disc_inputs_list = []
    tot_inputs = set()

    # select the seed input for fairness testing
    inputs = seed_test_input(dataset, cluster_num, limit)
    q = PriorityQueue()  # low push first
    for inp in inputs[::-1]:
        q.put((rank1, X[inp].tolist()))

    visited_path = []
    l_count = 0
    g_count = 0
    while len(tot_inputs) < limit and q.qsize() != 0:
        t = q.get()
        t_rank = t[0]
        t = np.array(t[1])
        found = check_for_error_condition(data_config[dataset], sess, x, preds,
                                          t, sensitive_param)
        p = getPath(X, sess, x, preds, t, data_config[dataset])
        temp = copy.deepcopy(t.tolist())
        temp = temp[:sensitive_param - 1] + temp[sensitive_param:]

        tot_inputs.add(tuple(temp))
        if found:
            if (tuple(temp)
                    not in global_disc_inputs) and (tuple(temp)
                                                    not in local_disc_inputs):
                if t_rank > 2:
                    global_disc_inputs.add(tuple(temp))
                    global_disc_inputs_list.append(temp)
                else:
                    local_disc_inputs.add(tuple(temp))
                    local_disc_inputs_list.append(temp)
                if len(tot_inputs) == limit:
                    break

            # local search
            for i in range(len(p)):
                path_constraint = copy.deepcopy(p)
                c = path_constraint[i]
                if c[0] == sensitive_param - 1:
                    continue

                if c[1] == "<=":
                    c[1] = ">"
                    c[3] = 1.0 - c[3]
                else:
                    c[1] = "<="
                    c[3] = 1.0 - c[3]

                if path_constraint not in visited_path:
                    visited_path.append(path_constraint)
                    input = local_solve(path_constraint, arguments, t, i,
                                        data_config[dataset])
                    l_count += 1
                    if input != None:
                        r = average_confidence(path_constraint)
                        q.put((rank2 + r, input))

        # global search
        prefix_pred = []
        for c in p:
            if c[0] == sensitive_param - 1:
                continue
            if c[3] < T1:
                break

            n_c = copy.deepcopy(c)
            if n_c[1] == "<=":
                n_c[1] = ">"
                n_c[3] = 1.0 - c[3]
            else:
                n_c[1] = "<="
                n_c[3] = 1.0 - c[3]
            path_constraint = prefix_pred + [n_c]

            # filter out the path_constraint already solved before
            if path_constraint not in visited_path:
                visited_path.append(path_constraint)
                input = global_solve(path_constraint, arguments, t,
                                     data_config[dataset])
                g_count += 1
                if input != None:
                    r = average_confidence(path_constraint)
                    q.put((rank3 - r, input))

            prefix_pred = prefix_pred + [c]

    # create the folder for storing the fairness testing result
    if not os.path.exists('../results/'):
        os.makedirs('../results/')
    if not os.path.exists('../results/' + dataset + '/'):
        os.makedirs('../results/' + dataset + '/')
    if not os.path.exists('../results/' + dataset + '/' +
                          str(sensitive_param) + '/'):
        os.makedirs('../results/' + dataset + '/' + str(sensitive_param) + '/')

    # storing the fairness testing result
    np.save(
        '../results/' + dataset + '/' + str(sensitive_param) +
        '/global_samples_symbolic.npy', np.array(global_disc_inputs_list))
    np.save(
        '../results/' + dataset + '/' + str(sensitive_param) +
        '/local_samples_symbolic.npy', np.array(local_disc_inputs_list))

    # print the overview information of result
    print("Total Inputs are " + str(len(tot_inputs)))
    print(
        "Total discriminatory inputs of global search- " +
        str(len(global_disc_inputs)), g_count)
    print(
        "Total discriminatory inputs of local search- " +
        str(len(local_disc_inputs)), l_count)
Example #59
0
class Learner:


	def __init__ (self, num_replicas, majority_numb, idnum):
		self.chat_log = []
		self.num_replicas = num_replicas
		self.majority_numb = majority_numb
		self.seq_dict = defaultdict(set) # This is a mapping of sequence number -> dictionary with key = value -> count
		self.commands_to_execute = PriorityQueue()
		self.last_executed_seq_number = - 1 # We haven't executed any commands yet
		self.idnum = idnum
		self.client_mapping = dict()
		self.connections_list = None
		self.prev_leader_nums = defaultdict(dict) #D2 # dict of dict: seq_num -> req_id -> set(acceptor_id)
		self.acceptor = None
		self.proposer = None
		self.accepted_seq_numbs = dict()
		self.exec_req_set = set() #D2
		self.catchup_requests_count = dict() # serves as timeout
		self.hasher = hashlib.md5()
		self.hash_count = 0
		self.exec_req_history = set() # set of req_id that have been exectured (exclude NOP)

	def acceptValue (self, leaderNum, idnum, req_id, seq_number, value):
		seq = int(seq_number)

		if seq > self.last_executed_seq_number:# or req_id == "NOP": # Else we should ignore

			self.seq_dict[seq].add(int(idnum)) # We've now seen one of these values

			if len(self.seq_dict[seq]) == self.majority_numb:
				printd(str(self.idnum) + " has majority for value at {} of {} (last exec seq num = {})".format(seq_number,str(value),self.last_executed_seq_number))
				self.reply_to_client(req_id, value)				   # so we can go ahead and reply to the client
				self.add_and_execute_seq_command(seq, value, req_id)
				del self.seq_dict[seq]
				return True
			else:
				printd("{} cannot execute for {},{} because we've only seen messages from{}.".format(self.idnum, seq_number, value, self.seq_dict[seq]))
				return False


	def set_acceptor(self, acceptor):
		self.acceptor = acceptor


	def set_proposer(self, proposer):
		self.proposer = proposer


	def try_to_execute_commands (self):
		if not self.commands_to_execute.empty() and self.last_executed_seq_number + 1 < int(self.commands_to_execute.queue[0][0]):
			printd("Replica {} sending catchup because it's missing {}.".format(self.idnum, self.last_executed_seq_number + 1).upper())
			(seq_number_found, missing_req_id, missing_value) = self.acceptor.get_value_at_seq_number(self.last_executed_seq_number + 1)
			self.fill_missing_value(seq_number_found, self.idnum, missing_req_id, self.last_executed_seq_number + 1, missing_value)
			if self.proposer:
				self.proposer.note_missing_value(seq_number_found, self.idnum, self.last_executed_seq_number + 1
				)
			else:
				printd("NO PROPOSER FOR " + str(self.idnum))

			msg = "{}:{}".format(MessageType.CATCHUP.value, self.last_executed_seq_number + 1)
			Messenger.broadcast_message (self.connections_list, msg)

			# keep track of how many times you request catchup. After so many, timeout
			if self.last_executed_seq_number + 1 in self.catchup_requests_count:
				self.catchup_requests_count[self.last_executed_seq_number + 1] += 1
			else:
				self.catchup_requests_count[self.last_executed_seq_number + 1] = 1
			printd("CATCHUP ATTEMPT COUNT: {}".format(self.catchup_requests_count[self.last_executed_seq_number + 1]))
			return

		# Convoluted way to peek at PriorityQueue
		while not self.commands_to_execute.empty() and int(self.commands_to_execute.queue[0][0]) == self.last_executed_seq_number + 1:
			command = self.commands_to_execute.get()
			self.execute_command(command)


	def execute_command (self, command):
		seq_number = command[0]
		value = command[1]
		req_id = command[2]

		self.add_msg_to_chat_log(seq_number, value, req_id)
		self.last_executed_seq_number = max(self.last_executed_seq_number,int(seq_number))

		printd(str(self.idnum) + " EXECUTES COMMAND " + str(command))


	# command succesfully executed
	def reply_to_client (self, req_id, value):
		if req_id == "NOP" or req_id == "NONE":
			return # no client to reply to
		client_name, client_seq_number = req_id.split('-')

		if client_name in self.client_mapping: # This client name must be in the client mapping
			clientsock = self.client_mapping[client_name]
			printd("Responding to client {} with client_seq_number {}.".format(client_name, client_seq_number))
			Messenger.send_message(clientsock, req_id)
		else:
			raise RuntimeError("This client name: {}, is not in our mapping for replica {}.".format(client_name, self.idnum))


	def add_client (self, clientname, clientsock):
		self.client_mapping[clientname] = clientsock


	def add_and_execute_seq_command (self, seq_number, value, req_id):
		if seq_number not in self.accepted_seq_numbs and seq_number != -1:
			if req_id not in self.exec_req_set: # if you have not already executed for this req_id
				self.commands_to_execute.put((seq_number, value, req_id)) #D2
				self.exec_req_set.add(req_id)
			else: # if you have executed for this req_id, but have majority again, execute NOP
				alt_req = pop_req_id_from_pq(self.commands_to_execute, req_id) # remove and return the item with matching req_id in the pq
				if alt_req == None: # req-id has already been executed
					if req_id in self.exec_req_history and req_id != "NOP":
						self.commands_to_execute.put((seq_number, value, req_id))
					else:
						self.commands_to_execute.put((seq_number, "NOP2", "NOP"))

				elif alt_req[0] > seq_number: # put the request with lowest seq_num onto the pq. The other will be a NOP
					self.commands_to_execute.put((seq_number, value, req_id))
					self.commands_to_execute.put((alt_req[0], "NOP3", "NOP"))
				else:
					self.commands_to_execute.put(alt_req)
					self.commands_to_execute.put((seq_number, "NOP4", "NOP"))
			self.accepted_seq_numbs[seq_number] = True
			self.try_to_execute_commands() # Now try to process commands again


	def fill_missing_value (self, seq_number_found, acceptor_id, missing_req_id, missing_seq_number, missing_value):
		#if missing_seq_number in self.missing_vals_of_learners: # if we have not already resolved this issue
		missing_seq_number = int(missing_seq_number)
		if missing_req_id not in self.prev_leader_nums[missing_seq_number]:
			self.prev_leader_nums[missing_seq_number][missing_req_id] = set()

		self.prev_leader_nums[missing_seq_number][missing_req_id].add(acceptor_id)
		printd("{} IN MISSING VALUE, NUM UNIQUE REQ_IDS = {}".format(self.idnum,len(self.prev_leader_nums[missing_seq_number])))

		if missing_seq_number in self.prev_leader_nums:

					# do not need to iterate through req_ids. In this call, only the current req_id could have attained majority
			#for i in range(len(self.prev_leader_nums[missing_seq_number])): # iterate through each unique req_id
			#	if i not in self.prev_leader_nums[missing_seq_number]:
			#		continue
			sum_of_votes = 0
			for i in self.prev_leader_nums[missing_seq_number].values():
				sum_of_votes += len(i)
			printd("{} has {} votes for req_id: {} at seq_num: {} (total num votes = {})".format(self.idnum,len(self.prev_leader_nums[missing_seq_number][missing_req_id]),missing_req_id,missing_seq_number,sum_of_votes))
			if len(self.prev_leader_nums[missing_seq_number][missing_req_id]) == self.majority_numb: # if this seq_num, req_id combo has majority
				value = missing_value #max(self.prev_leader_nums[missing_seq_number], key=itemgetter(0))[1]
				del self.prev_leader_nums[missing_seq_number]

				if missing_seq_number > self.last_executed_seq_number and self.commands_to_execute.queue and missing_seq_number < int(self.commands_to_execute.queue[0][0]): # ignore previous messages
					#self.chat_log[missing_seq_number] = missing_value
					# DREW: why is this the case? The last executed command shouldn't change, right? #
					#self.last_executed_seq_number = missing_seq_number # + 1

					#del self.missing_vals_of_learners[missing_seq_number]
					printd("A different learner had the missing value. Fixing internal to the learners")
					if seq_number_found == "True":
						self.add_and_execute_seq_command(missing_seq_number, value, missing_req_id)
					else:
						self.add_and_execute_seq_command(missing_seq_number, "NOP5", "NOP")
						#print("Defaulted to NOP when seq_num = {}, val = {}, and req_id ={}".format(missing_seq_number, value, missing_req_id))
					#self.commands_to_execute.put((missing_seq_number, value, "NONE"))
					#self.try_to_execute_commands() # Now try to process commands again

			else: # need to check if majority impossible. If so, take NOP
				sum_of_votes = 0
				for i in self.prev_leader_nums[missing_seq_number].values():
					sum_of_votes += len(i)
					# if this is true, it is unable to achieve majority and all learners should execute NOP (catchup_requests_count acts as timeout)
					if sum_of_votes >= self.majority_numb and self.catchup_requests_count[missing_seq_number] > 20:
						#print("Catchup attempts: {}".format(self.catchup_requests_count[missing_seq_number]))
						self.add_and_execute_seq_command(missing_seq_number, "NOP6", "NOP")

			'''
			else: # check if this req-id is already in the to-execute queue. If so, this is a NOP
				for i in self.commands_to_execute.queue:
					if i[2] == missing_req_id:
						self.add_and_execute_seq_command(missing_seq_number, "NOP", "NOP")
						printd("REQ_ID COMING LATER. DO NOP NOW")
			'''



		#else:
			#print("{}, {}.".format(missing_seq_number, self.commands_to_execute.queue))
			# DREW: decided to move this logic to proposer. Will delete...
			#self.missing_vals_of_learners[missing_seq_number] += 1
			# if a majority of learners are also missing this value, let the proposer know
			#if self.missing_vals_of_learners[missing_seq_number] >= self.majority_numb:

		#else:
		#	raise RuntimeError("Error: invalid seq_number_found arg for MISSING_VALUE command")
		#else:
		#	return # already resolved. no action required.


	def set_socket_list (self, connections_list):
		self.connections_list = connections_list


	def add_msg_to_chat_log (self, seq_number, msg, req_id):
		self.chat_log.append(req_id.split('-')[0] + ": " + msg)

		# point of EXECUTION
		# if NOP, add to chat log but do not print ('execute'). Also check seq_num for hashing
		if str(req_id) != "NOP": # do nothing for NO OP
			self.file_log = open("replica_" + str(self.idnum) + ".log", "a")
			self.file_log.write(self.chat_log[seq_number] + '\n')#self.get_chat_log() + "\n")
			self.file_log.close()
			self.exec_req_history.add(req_id)
		#else:
		#	print("{} executing NOP with value {} at seq_num {}".format(self.idnum,self.chat_log[seq_number],seq_number))
		if seq_number % 49 == 0 and seq_number != 0: # print hash every 50 commands
			self.hash_count += 1
			with open("replica_" + str(self.idnum) + ".log", 'rb') as afile:
				buf = afile.read()
				self.hasher.update(buf)
			print("Replica #{}, hash #{} | hash: {}".format(self.idnum,self.hash_count,self.hasher.hexdigest()))


	def get_chat_log (self):
		#chat_log_list = []
		#for i in range(0, self.last_executed_seq_number+1):
		#	if i in self.chat_log:
		#		chat_log_list.append(self.chat_log[i])
		#return ("Chat log for " + str(self.idnum) + ":\n\t" + '\n\t'.join(self.chat_log))
		return "\n\t" + '\n\t'.join(self.chat_log)
Example #60
0
# A priority queue is a container data structure that manages a set of records with
# totally-ordered keys (for example, a numeric weight value) to provide quick
# access to the record with the smallest or largest key in the set.
#instead of retrieving the next element by insertion time, it retrieves the
# highest-priority element

import heapq
from Queue import PriorityQueue

n_list = []

heapq.heappush(n_list, 2)
heapq.heappush(n_list, 1)
heapq.heappush(n_list, 3)

while n_list:
    print(heapq.heappop(n_list))

q = PriorityQueue()

q.put((2, 'code'))
q.put((1, 'eat'))
q.put((3, 'sleep'))

while not q.empty():
    next_item = q.get()
    print(next_item)