예제 #1
0
파일: maze.py 프로젝트: andrewyang96/CS440
 def a_star(self):
     # like BFS, but puts coords with lowest heuristic (path length + manhattan dist to goal) up front
     pq = PriorityQueue(maxsize=0)
     pq.put_nowait((self.manhattan_distance(self.currPos, self.goalPos), (self.currPos, [])))
     visited = set()
     bestPath = None
     bestHeur = None
     numNodes = 0
     while not pq.empty():
         priority, curr = pq.get_nowait()
         coord, path = curr
         visited.add(coord)
         if bestPath is not None and priority >= bestHeur:
             pass
         elif self.getChar(coord) == '%': # wall
             pass
         else: # recursive case
             if self.getChar(coord) == '.': # goal
                 print "Found a path:", path
                 if bestPath is None or len(path) < len(bestPath):
                     print "Is best path"
                     bestPath = path[:]
                     bestHeur = priority
             for adj, direction in self.adjacent(coord):
                 if adj not in visited and self.getChar(adj) != '%':
                     numNodes += 1
                     heur = len(path + direction) + self.manhattan_distance(adj, self.goalPos)
                     if bestPath is None or heur < bestHeur: # preselect based on heuristic
                         pq.put_nowait((heur, (adj, path + direction)))
     print "Num Nodes:", numNodes
     print self.debug(bestPath) # debug
     return bestPath
예제 #2
0
class MultiQueue(object):
    """
    Simple priority queue interface to push/pull tasks
    Priority queue maintain the order by first element of the tuple, no futher ordering is guarantied
    """
    def __init__(self):
        self.queue = PriorityQueue()

    def empty(self):
        return self.queue.empty()

    def pull_nowait(self):
        task_data = self.queue.get_nowait()
        if task_data:
            (EnterTime, User, Task) = task_data
            self.queue.task_done()
            return (User, Task)

    def pull_wait(self, wait):
        try:
            task_data = self.queue.get(block=True, timeout=wait)
            (EnterTime, User, Task) = task_data
            self.queue.task_done()
            return (User, Task)
        except Empty:
            return None

    def push(self, User, Tasks):
        EnterTime = time()
        for task in Tasks:
            self.queue.put_nowait((EnterTime, User, task))
예제 #3
0
def a_star(graph, start, goal):
    extendedSet = set()
    if start == goal:
        return list(start)

    paths = PriorityQueue()
    for node in graph.get_connected_nodes(start):
        paths.put_nowait((path_length(graph, [start, node]) +
                          graph.get_heuristic(node, goal), [start, node]))
    extendedSet.add(start)

    while not paths.empty():
        path = paths.get_nowait()
        if path[1][-1] == goal:
            return path[1]
        elif path[1][-1] not in extendedSet:
            extendedSet.add(path[1][-1])
            cnodes = graph.get_connected_nodes(path[1][-1])
            for node in cnodes:
                if path[1].count(node) == 0:
                    epath = list(path[1])
                    epath.append(node)
                    paths.put_nowait((path_length(graph, epath) +
                                      graph.get_heuristic(node, goal), epath))
    return []
예제 #4
0
파일: maze.py 프로젝트: andrewyang96/CS440
 def greedy(self):
     # like DFS, but puts coords closest to goal up front
     pq = PriorityQueue(maxsize=0)
     pq.put_nowait((self.manhattan_distance(self.currPos, self.goalPos), (self.currPos, [])))
     visited = set()
     bestPath = None
     numNodes = 0
     while not pq.empty():
         priority, curr = pq.get_nowait()
         coord, path = curr
         visited.add(coord)
         if bestPath is not None and len(path) >= len(bestPath):
             pass
         elif self.getChar(coord) == '%': # wall
             pass
         else: # recursive case
             if self.getChar(coord) == '.': # goal
                 print "Num Nodes:", numNodes
                 print self.debug(path)
                 return path # return on first path found
             for adj, direction in self.adjacent(coord):
                 if adj not in visited and self.getChar(adj) != '%':
                     numNodes += 1
                     heur = self.manhattan_distance(adj, self.goalPos)
                     if bestPath is None: # preselect based on heuristic
                         pq.put_nowait((heur, (adj, path + direction)))
     return [] # impossible
예제 #5
0
파일: maze.py 프로젝트: andrewyang96/CS440
 def a_star_penalize(self, forwardPenalty, turnPenalty):
     # part 1.2
     # using euclidean heuristic (not manhattan)
     pq = PriorityQueue(maxsize=0)
     pq.put_nowait((self.manhattan_distance(self.currPos, self.goalPos), (self.currPos, [])))
     visited = set()
     bestPath = None
     bestHeur = None
     numNodes = 0
     while not pq.empty():
         priority, curr = pq.get_nowait()
         coord, path = curr
         visited.add(coord)
         if bestPath is not None and priority >= bestHeur:
             pass
         elif self.getChar(coord) == '%': # wall
             pass
         else: # recursive case
             if self.getChar(coord) == '.': # goal
                 print "Found a path:", path
                 if bestPath is None or len(path) < len(bestPath):
                     bestPath = path[:]
             for adj, direction in self.adjacent(coord):
                 if adj not in visited and self.getChar(adj) != '%':
                     numNodes += 1
                     heur = self.calculate_penalty(path + direction, forwardPenalty, turnPenalty) + self.manhattan_distance(adj, self.goalPos) * forwardPenalty
                     if bestPath is None or heur < bestHeur: # preselect based on heuristic
                         pq.put_nowait((heur, (adj, path + direction)))
     print "Num Nodes:", numNodes
     print self.debug(bestPath) # debug
     return bestPath
예제 #6
0
def getAlphaBeta( game, player, other, reward=AlphaBeta(-100,100), depth=10, tab=0 ):
    '''
    alpha: minimum bound of the outcome -- currently, evaluate of best move possible by other
    beta: maximum bound of the outcome -- currently, evaluate of best move possible by player
    
    returns: (alpha, beta, evaluation, move)
    '''
    # evaluation is the current value of board, assuming no more moves in future
    # alpha == beta == finalvalue if we figure out the outcome.
    reward.evaluation = game.evaluate_cached( player )
    move = Move(-1,-1)
    # base case: can't play further, lost
    if (reward.evaluation==reward.beta):
#         print 'Player', player, 'won!!!'
        return AlphaBetaOfMove(AlphaBeta(reward.beta, reward.beta, reward.beta), move)
    # base case: can't evaluate further
    if (depth == 0):
        return AlphaBetaOfMove(reward, move )
    q = PriorityQueue()
    for m in game.next_moves( ):
            # g2 = copy.deepcopy( game )
            assert game.move( player, m)
            try:
                oponent_reward = AlphaBeta(-reward.beta, -reward.alpha, -reward.evaluation)
                oponent_reward = getAlphaBeta(game, other, player, reward=oponent_reward, depth=depth-1, tab=tab+1 ).alphabeta
                player_reward = AlphaBeta(-oponent_reward.beta, -oponent_reward.alpha, -oponent_reward.evaluation)
                q.put_nowait( AlphaBetaOfMove( player_reward, m ) )
            finally:
                assert game.unmove(player, m)
#             if( tab < 1 ):
#                 print ("\t"*tab), (player,row,col), (other,other_r,other_c), (other_alpha,other_beta,other_evaluation), (next_alpha,next_beta,evaluation), update
    if( q.empty() ):
        return AlphaBetaOfMove( reward, Move(-1,-1) )
    else:
        return q.get_nowait()
예제 #7
0
 def req_proxy(self, url):
     from urlparse import urlparse
     netloc = urlparse(url).netloc
     busy_queue = PriorityQueue()
     lazy_queue = PriorityQueue()
     index = 0
     now = datetime.utcnow()
     while index < self.proxy_in_queue_count():
         index += 1
         proxy_url = self.get_proxy_from_queue()
         if not proxy_url:
             break
         if proxy_url in self.proxy_meta_map:
             proxy_meta = self.proxy_meta_map[proxy_url]
             if proxy_meta.last_used_time and (now - proxy_meta.last_used_time).total_seconds() < self.settings["interval_second"]:
                 busy_queue.put_nowait((proxy_meta.last_used_time, proxy_url))
                 continue
             if netloc in proxy_meta.latency and proxy_meta.latency[netloc][0] >= self.settings["max_unavailable_count"]:
                 import random
                 if random.randint(1, 10) > 1:
                     lazy_queue.put_nowait((proxy_meta.latency[netloc], proxy_url))
                     continue
             proxy_meta.last_used_time = now
             proxy_meta.master = netloc
             return proxy_meta.proxy
     while not lazy_queue.empty():
         _, proxy_url = lazy_queue.get_nowait()
         if proxy_url in self.proxy_meta_map:
             proxy_meta = self.proxy_meta_map[proxy_url]
             proxy_meta.last_used_time = now
             proxy_meta.master = netloc
             return proxy_meta.proxy
     return None
예제 #8
0
def __main__():
    #check aruguments
    if len(sys.argv) == 1:
        details()
        sys.exit()
    
    if len(sys.argv) < 2:
        usage()
    
    try: 
        opts, args = getopt.getopt(sys.argv[2:],"l:")
    except getopt.GetoptError:
        usage()
    
    for opt, arg in opts:
        if opt == "-l":
            global _log 
            _log = str(arg)
        else:
            print ("Unrecognized option: "+opt+"\n")
            usage()
    
    annot = open(sys.argv[1],"r")
    
    if (annot == None):
        print ("Bad annotation name: "+sys.argv[1])
        sys.exit()
        
        
    logfile = open(_log,"w")
    
    if (logfile == None):
        print ("Bad logfile name: "+_log)
        sys.exit()
        
    #reg ex
    annotpat = re.compile("\w+_(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)(.*)")
    
    negQueue = PriorityQueue()
    
    #read the vcf file...
    for line in annot:
        line = line.rstrip()
        m = annotpat.match(line)
        
        if (m == None):# if we find a weird line...
            sys.stderr.write("Non-standard line (annot):\n\t"+line+"\n")
            continue
        else:
            scaf = int(m.group(1))
            start = int(m.group(2))
            direction = m.group(5)
            
            if direction == "-":
                negQueue.put_nowait((start, line))
                continue
            else:
                while not negQueue.empty():
                    print(negQueue.get()[1])
                print(line)
예제 #9
0
파일: taskmgr.py 프로젝트: taebow/PyPPL
	def put_nowait(self, item, where = 0):
		"""
		Put item to the queue, just like `PriorityQueue.put_nowait` but with an extra argument
		@params:
			`where`: Which batch to put the item
		"""
		with self.lock:
			PriorityQueue.put_nowait(self, item + where * self.batch_len)
예제 #10
0
    def getQueue(self):
        queue = PriorityQueue(len([Employees.objects.all()]))
        for day in self.sched.keys():
            for hour in self.sched.values():
                queue.put_nowait(
                    (len(self.sched[day][hour]), self.sched[day][hour], day, hour)
                )  # (num emps, emps[], day, hour)

        return queue
예제 #11
0
def outlierRejection(graph, K, percent=5.0, max_dist=5.0):
    """ 
    Examine graph and remove some top percentage of outliers 
    and those outside a certain radius. 
    """

    # iterate through all points
    pq = PriorityQueue()
    marked_keys = []
    for key, entry in graph["3Dmatches"].iteritems():

        X = entry["3Dlocs"]

        # mark and continue if too far away from the origin
        if np.linalg.norm(X) > max_dist:
            marked_keys.append(key)
            continue

        # project into each frame
        errors = []
        for frame, x in zip(entry["frames"], entry["2Dlocs"]):
            frame -= graph["frameOffset"]
            Rt = graph["motion"][frame]

            proj = fromHomogenous(K * Rt * toHomogenous(X))
            diff = proj - x

            err = np.sqrt(np.multiply(diff, diff).sum())
            #print (frame, err)

            errors.append(err)

        # get mean error and add to priority queue
        # (priority is reciprocal of error since this is a MinPQ)
        mean_error = np.array(errors).mean()
        pq.put_nowait((1.0 / mean_error, key))

    # remove worst keys
    N = max(
        0,
        int((percent / 100.0) * len(graph["3Dmatches"].keys())) -
        len(marked_keys))
    for i in range(N):
        score, key = pq.get_nowait()
        del graph["3Dmatches"][key]
        pq.task_done()

    # remove keys out of range
    for key in marked_keys:
        del graph["3Dmatches"][key]

    print "Removed %d outliers." % (N + len(marked_keys))
예제 #12
0
class AsyncDatabaseManager(Thread):
    def __init__(self, directory):
        super(AsyncDatabaseManager, self).__init__()
        self.directory = directory
        if not os.path.exists(self.directory):
            open(self.directory, 'w').close()
        self.queue = PriorityQueue()
        self.event = Event()
        self.start()  # Threading module start

    def run(self):
        super(AsyncDatabaseManager, self).run()
        db = sqlite3.connect(self.directory)
        cursor = db.cursor()
        while True:
            if self.queue.empty():
                time.sleep(0.1)
                continue
            job, sql, arg, res = self.queue.get_nowait()
            if sql == '__close__':
                break
            cursor.execute(sql, arg)
            time.sleep(0.01)
            db.commit()
            if res:
                for rec in cursor:
                    res.put(rec)
                res.put('__last__')
        db.close()
        self.event.set()  # TODO: Question: Do I want the database to finish or end it when the app ends?

    def execute(self, sql, args=None, res=None, priority=2):
        self.queue.put_nowait((priority, sql, args, res))

    def select(self, sql, args=None, priority=2):
        '''
        :param: sql - command to execute
        :param: args - sql arguments
        :param: priority - 2 for system and 1 for user
        '''
        res = Queue()
        self.execute(sql, args, res, priority)
        while True:
            rec = res.get()
            if rec == '__last__':
                break
            yield rec

    def close(self):
        self.execute('__close__')
예제 #13
0
def outlierRejection(graph, K, percent=5.0, max_dist=5.0):
    """ 
    Examine graph and remove some top percentage of outliers 
    and those outside a certain radius. 
    """

    # iterate through all points
    pq = PriorityQueue()
    marked_keys = []
    for key, entry in graph["3Dmatches"].iteritems():

        X = entry["3Dlocs"]

        # mark and continue if too far away from the origin
        if np.linalg.norm(X) > max_dist:
            marked_keys.append(key)
            continue

        # project into each frame
        errors = []
        for frame, x in zip(entry["frames"], entry["2Dlocs"]):
            frame -= graph["frameOffset"]
            Rt = graph["motion"][frame]

            proj = fromHomogenous(K * Rt * toHomogenous(X))
            diff = proj - x

            err = np.sqrt(np.multiply(diff, diff).sum())
            #print (frame, err)

            errors.append(err)

        # get mean error and add to priority queue
        # (priority is reciprocal of error since this is a MinPQ)
        mean_error = np.array(errors).mean()
        pq.put_nowait((1.0 / mean_error, key))

    # remove worst keys
    N = max(0, int((percent/100.0) * len(graph["3Dmatches"].keys())) - len(marked_keys))
    for i in range(N):
        score, key = pq.get_nowait()
        del graph["3Dmatches"][key]
        pq.task_done()

    # remove keys out of range
    for key in marked_keys:
        del graph["3Dmatches"][key]

    print "Removed %d outliers." % (N + len(marked_keys))
예제 #14
0
def getAlphaBeta(game,
                 player,
                 other,
                 reward=AlphaBeta(-100, 100),
                 depth=10,
                 tab=0):
    '''
    alpha: minimum bound of the outcome -- currently, evaluate of best move possible by other
    beta: maximum bound of the outcome -- currently, evaluate of best move possible by player
    
    returns: (alpha, beta, evaluation, move)
    '''
    # evaluation is the current value of board, assuming no more moves in future
    # alpha == beta == finalvalue if we figure out the outcome.
    reward.evaluation = game.evaluate_cached(player)
    move = Move(-1, -1)
    # base case: can't play further, lost
    if (reward.evaluation == reward.beta):
        #         print 'Player', player, 'won!!!'
        return AlphaBetaOfMove(
            AlphaBeta(reward.beta, reward.beta, reward.beta), move)
    # base case: can't evaluate further
    if (depth == 0):
        return AlphaBetaOfMove(reward, move)
    q = PriorityQueue()
    for m in game.next_moves():
        # g2 = copy.deepcopy( game )
        assert game.move(player, m)
        try:
            oponent_reward = AlphaBeta(-reward.beta, -reward.alpha,
                                       -reward.evaluation)
            oponent_reward = getAlphaBeta(game,
                                          other,
                                          player,
                                          reward=oponent_reward,
                                          depth=depth - 1,
                                          tab=tab + 1).alphabeta
            player_reward = AlphaBeta(-oponent_reward.beta,
                                      -oponent_reward.alpha,
                                      -oponent_reward.evaluation)
            q.put_nowait(AlphaBetaOfMove(player_reward, m))
        finally:
            assert game.unmove(player, m)
#             if( tab < 1 ):
#                 print ("\t"*tab), (player,row,col), (other,other_r,other_c), (other_alpha,other_beta,other_evaluation), (next_alpha,next_beta,evaluation), update
    if (q.empty()):
        return AlphaBetaOfMove(reward, Move(-1, -1))
    else:
        return q.get_nowait()
예제 #15
0
class JobsQueue(object):
    """ Holds the jobs planned for execution in memory.

    The Jobs are sorted, the higher the priority is,
    the earlier the jobs are dequeued.
    """
    def __init__(self):
        self._queue = PriorityQueue()

    def enqueue(self, job):
        self._queue.put_nowait(job)

    def dequeue(self):
        """ Take the first job according to its priority
        and return it"""
        return self._queue.get()
예제 #16
0
def ShortestPath(startNode, destinationNodes):
   # Dijkstra w/ priority queue.
   Infinity = 999999999999
   distance = defaultdict(lambda: Infinity)
   predecessor = defaultdict(lambda: None)
   queued = defaultdict(lambda: False)

   nextNodes = PriorityQueue()
   nextNodes.put_nowait((0, startNode))
   queued[startNode] = True
   distance[startNode] = startNode.weight

   while True:
      try:
         priority, node = nextNodes.get_nowait()
         queued[node] = False
      except Empty:
         break

      for neighbor in node.edges:
         alternate = distance[node] + neighbor.weight

         if alternate < distance[neighbor]:
            distance[neighbor] = alternate
            predecessor[neighbor] = node

            if not queued[neighbor]:
               nextNodes.put_nowait((alternate, neighbor))
               queued[neighbor] = True

   destinationDistances = [
    (distance[node], node) for node in destinationNodes]

   bestCost, bestDestination = sorted(destinationDistances)[0]

   # For the best destination node, construct the path taken to get there.

   path = [bestDestination]
   node = bestDestination

   while True:
      node = predecessor[node]
      if node is None:
         break
      path.insert(0, node)

   return bestCost, path
예제 #17
0
class JobsQueue(object):
    """ Holds the jobs planned for execution in memory.

    The Jobs are sorted, the higher the priority is,
    the earlier the jobs are dequeued.
    """

    def __init__(self):
        self._queue = PriorityQueue()

    def enqueue(self, job):
        self._queue.put_nowait(job)

    def dequeue(self):
        """ Take the first job according to its priority
        and return it"""
        return self._queue.get()
예제 #18
0
    def distance(self, i, j):
        lon = self.pixel_lons[i]
        lat = self.pixel_lats[j]

        point = Point(lat, lon)

        elements = PriorityQueue()
        elements.put_nowait((self.grid.distance(point), self.grid))
        # We iterate over the priority queue until the nearest element is a point. While it isn't we add its children to the queue.
        while True:
            (distance, elem) = elements.get_nowait()
            #print "Iterating (%d, %d) distance: %f" % (i, j, distance)
            if isinstance(elem, Point):
                return distance
            else:
                for child in elem.children:
                    elements.put_nowait((child.distance(point), child))
예제 #19
0
	def distance(self, i, j):
		lon = self.pixel_lons[i]
		lat = self.pixel_lats[j]		

		point = Point(lat, lon)

		elements = PriorityQueue()
		elements.put_nowait((self.grid.distance(point), self.grid))
		# We iterate over the priority queue until the nearest element is a point. While it isn't we add its children to the queue.
		while True:
			(distance, elem) = elements.get_nowait()
			#print "Iterating (%d, %d) distance: %f" % (i, j, distance)
			if isinstance(elem, Point):
				return distance
			else:
				for child in elem.children:
					elements.put_nowait((child.distance(point), child))
예제 #20
0
def ShortestPath(startNode, endNode):
   # Dijkstra w/ priority queue.
   Infinity = 999999999999
   distance = defaultdict(lambda: Infinity)
   predecessor = defaultdict(lambda: None)
   queued = defaultdict(lambda: False)

   nextNodes = PriorityQueue()
   nextNodes.put_nowait((0, startNode))
   queued[startNode] = True
   distance[startNode] = startNode.weight

   while True:
      try:
         priority, node = nextNodes.get_nowait()
         queued[node] = False
      except Empty:
         break

      for neighbor in node.edges:
         alternate = distance[node] + neighbor.weight

         if alternate < distance[neighbor]:
            distance[neighbor] = alternate
            predecessor[neighbor] = node

            if not queued[neighbor]:
               nextNodes.put_nowait((alternate, neighbor))
               queued[neighbor] = True

   # Dijkstra done here. Now we process the results.

   cost = distance[endNode]

   # Construct the path taken to get there.

   path = [endNode]
   node = endNode

   while True:
      node = predecessor[node]
      if node is None:
         break
      path.insert(0, node)

   return cost, path
예제 #21
0
파일: bot.py 프로젝트: JnBrymn/minglbot
class JobQueue(object):
    def __init__(self):
        self._priorityQueue = PriorityQueue()
    
    def put(self,job,priority_metric):
        self._priorityQueue.put_nowait((-priority_metric,job))
        
    def get(self):
        try:
            return self._priorityQueue.get_nowait()[1]
        except Exception:
            return None
    
    def __iter__(self):
        job = True
        while job:
            job = self.get()
            yield job
예제 #22
0
def branch_and_bound(graph, start, goal):
    if start == goal:
        return list(start)

    paths = PriorityQueue()
    for node in graph.get_connected_nodes(start):
        paths.put_nowait((path_length(graph, [start, node]), [start, node]))

    while not paths.empty():
        path = paths.get_nowait()
        if path[1][-1] == goal:
            return path[1]
        else:
            cnodes = graph.get_connected_nodes(path[1][-1])
            for node in cnodes:
                if path[1].count(node) == 0:
                    epath = list(path[1])
                    epath.append(node)
                    paths.put_nowait((path_length(graph, epath), epath))
    return []
예제 #23
0
파일: tic.py 프로젝트: aqiu384/cs440
    def possible_moves(self, color):
        anti_color = -1 * color
        moves = PriorityQueue()

        for y in xrange(1, self.size + 1):
            for x in xrange(1, self.size + 1):
                if not self.colors[y, x]:
                    y_cross = YERS + y
                    x_cross = XERS + x

                    my_cols = self.colors[y_cross, x_cross]
                    my_inds = np.array([0, 0, 0, 0, color], dtype=np.int8)

                    if any(my_cols == color):
                        my_inds += (my_cols == anti_color) * 2 * color

                    priority = anti_color * np.dot(my_inds, self.values[y_cross, x_cross])
                    moves.put_nowait((priority, y, x, my_inds))

        return moves
예제 #24
0
    def getTwoBestClusters(self):
        best_clusters = PriorityQueue()
        cnt = 0

        for centroid, stats in self.clusters.iteritems():
            avg_mass = stats["total_mass"] / stats["size"]
            best_clusters.put_nowait((avg_mass, centroid))
            cnt += 1

        if cnt < 2:
            print "Warning. Only found %d clusters." % cnt
            return [], []

        avg_mass, cluster1 = best_clusters.get_nowait(); best_clusters.task_done()
        avg_mass, cluster2 = best_clusters.get_nowait(); best_clusters.task_done()

        centroids = [cluster1, cluster2]
        scores = [self.clusters[cluster1]["total_mass"],
                  self.clusters[cluster2]["total_mass"]]

        return centroids, scores
def queue_sort(queue, pathTuple, opened_nodes, method):
	q = queue
	if method != "depth_limited" or pathTuple[0] != 0:
		for path in opened_nodes:
			value = calculateValue(path, method)
			path.fnValue = value
			pathTuple = (value, path)
			q.put_nowait(pathTuple)
	printLine(q, method)
	if method == "hill_climbing":
		if not q.empty():
			first = q.get_nowait()
			q = PriorityQueue()
			q.put_nowait(first)
	if method == "beam":
		first = False
		second = False
		if not q.empty():
			first = q.get_nowait()
		if not q.empty():
			second = q.get_nowait()
		q = PriorityQueue()
		if first != False:
			q.put_nowait(first)
		if second != False:
			q.put_nowait(second)
	return q
def from_dict_to_prioqueue_urlset(dict,starting_url) :
    prioq = PriorityQueue()
    urlset = set()
    #first key is going to be the start url -> initiate with 0 distance (key)
    is_start_url = True
    for key in dict:
        urlset.add(key.lower())
        if key.lower() == starting_url.lower() and is_start_url:
            key_tnode = TreeNode(0, key.lower())
            prioq.put_nowait(key_tnode)
            prioq.queue.sort(key=attrgetter("key"))
            is_start_url = False
        else:
            key_tnode = TreeNode(float("inf"), key.lower())
            prioq.put_nowait(key_tnode)
        # for each of the urls in its val, check if tnode already exists in circnode ring
        for val_url in dict[key]:
            urlset.add(val_url.lower())
            # if it doesn't exist, add the tnode
            val_tnode = TreeNode(float("inf"), val_url.lower())
            if not pq_member(prioq, val_url.lower()) :
                prioq.put_nowait(val_tnode)
                prioq.queue.sort(key=attrgetter("key"))
            key_tnode.neighbors.append(val_tnode)
    print "\tmin key: %g, url: %s" % (
        prioq.queue[0].key, prioq.queue[0].self_url)
    print "\tsize of prioqueue: %d" % prioq.qsize()
    print "\tsize of dict: %d" % len(dict)
    return prioq, urlset
예제 #27
0
    def predict(self, image):
        result_priority_queue = PriorityQueue()
        results = []

        bbs = self.align.getAllFaceBoundingBoxes(image)

        for bb_index, bb in enumerate(bbs):
            alignedFace = self.align.alignImg("affine", 96, image, bb)
            if alignedFace is None:
                continue

            phash = str(imagehash.phash(Image.fromarray(alignedFace)))
            if phash in self.trained_images:
                identity = self.trained_images[phash].identity
                result_priority_queue.put_nowait((-1.0, identity, bb_index))
            else:
                rep = self.net.forwardImage(alignedFace)
                if self.svm is not None:
                    result_proba_list = self.svm.predict_proba(rep)
                    identity = np.argmax(result_proba_list[0])
                    print str(result_proba_list[0]) + " " + str(bb)
                    for index, prob in enumerate(result_proba_list[0]):
                        result_priority_queue.put_nowait((prob * -1.0, self.identities[index], bb_index))
                else:
                    result_priority_queue.put_nowait((0.0, -1, bb_index))

        matched_identities = []
        matched_bb_indices = []
        threshold = 0.6

        while len(matched_identities) != len(bbs) and result_priority_queue.empty() is False:
            detectedFaceInfo = result_priority_queue.get_nowait()

            identity = detectedFaceInfo[1]
            probability = detectedFaceInfo[0] * -1.0
            bb_index = detectedFaceInfo[2]
            # print detectedFaceInfo

            if identity in matched_identities:
                # print "matched_bbs : " + str(matched_identities)
                continue

            matched_bb_indices.append(bb_index)
            matched_identities.append(identity)

            if probability < threshold:
                results.append((-1, bbs[bb_index], 0.0))
            else:
                results.append((identity, bbs[bb_index], probability))

                # print '+' + str(results[len(results) - 1])

        for bb_index, bb in enumerate(bbs):
            if bb_index in matched_bb_indices:
                continue

            results.append((-1, bb, 0.0))

        return results
예제 #28
0
파일: queue.py 프로젝트: guewen/connectors
class JobsQueue(object):
    """ Implementation """

    job_cls = Job
    instance = None

    def __init__(self):
        self._queue = PriorityQueue()

    def enqueue_resolve_args(self, session, func, *args, **kwargs):
        """Create a Job and enqueue it in the queue"""
        priority = kwargs.pop('priority', None)
        only_after = kwargs.pop('only_after', None)

        return self.enqueue(session, func, args=args,
                            kwargs=kwargs, priority=priority,
                            only_after=only_after)

    def enqueue_job(self, session, job):
        job.state = QUEUED
        job.date_enqueued = datetime.now()
        job.user_id = session.uid
        job.store(session)

        self._queue.put_nowait(job)
        _logger.debug('%s enqueued', job)

    def enqueue(self, session, func, args=None, kwargs=None,
                priority=None, only_after=None):
        job = self.job_cls(func=func, args=args, kwargs=kwargs,
                           priority=priority, only_after=only_after)
        self.enqueue_job(session, job)

    def dequeue(self):
        """ Take the first job from the queue and return it """
        job = self._queue.get()
        _logger.debug('Fetched job %s', job)
        return job
예제 #29
0
class TaskChain(object):

    def __init__(self):
        self.task_chain = PriorityQueue()
        self.task_num = 0

    def put(self, task):
        try:
            self.task_chain.put_nowait((self.task_num, task))
            self.task_num += 1
        except Queue.Full as e:
            raise e

    def get(self):
        try:
            priority, task = self.task_chain.get_nowait()
            self.task_num -= 1
            return task
        except Queue.Empty as e:
            raise  e

    def size(self):
        return self.task_num
예제 #30
0
파일: maze.py 프로젝트: andrewyang96/CS440
 def a_star_ghost(self):
     pq = PriorityQueue(maxsize=0)
     pq.put_nowait((self.manhattan_distance(self.currPos, self.goalPos), (self.currPos, [])))
     visited = set()
     bestPath = None
     bestHeur = None
     numNodes = 0
     backwardsPenalty = len(self.maze) * len(self.maze[0]) / 2 # backwards penalty to allow loitering
     while not pq.empty():
         priority, curr = pq.get_nowait()
         coord, path = curr
         visited.add(coord)
         if bestPath is not None and priority >= bestHeur:
             pass
         elif self.getChar(coord) == '%': # wall
             pass
         else: # recursive case
             if self.getChar(coord) == '.': # goal
                 print "Found a path:", path
                 if bestPath is None or len(path) < len(bestPath):
                     print "Is best path"
                     bestPath = path[:]
                     bestHeur = priority
             for adj, direction in self.adjacent(coord):
                 if self.getChar(adj) != '%':
                     numNodes += 1
                     heur = len(path + direction) + self.manhattan_distance(adj, self.goalPos)
                     if adj in visited:
                         heur += backwardsPenalty
                     if bestPath is None or heur < bestHeur: # preselect based on heuristic
                         if adj != self.getGhostPos(path + direction) and (adj != self.getGhostPos(path) and coord != self.getGhostPos(path + direction)):
                             # check that next step won't put pacman on same square as ghost, or won't cross paths with ghost
                             pq.put_nowait((heur, (adj, path + direction)))
     print "Num Nodes:", numNodes
     print self.debug(bestPath) # debug
     return bestPath
class ThreadPool(object):

    def __init__(self, workersLimit, queueLimit=-1):
        self._jobs = PriorityQueue(queueLimit)
        self._running = False
        self._workers = []
        self._workersLimit = workersLimit

    def start(self):
        for _ in xrange(self._workersLimit):
            worker = self._createNewWorker()
            try:
                worker.start()
            except Exception:
                _logger.error('Worker has not been started properly: %r', worker)
            else:
                self._workers.append(worker)

        self._running = True

    def stop(self):
        self._running = False
        try:
            while True:
                self._jobs.get_nowait()

        except QueueEmptyError:
            pass

        for _ in self._workers:
            self._jobs.put_nowait((_LOW_PRIORITY, TerminateJob()))

        self._workers = []

    def _createNewWorker(self):
        return Worker(self._jobs)

    def putLowPriorityJob(self, job):
        if not self._running:
            _logger.error('Thread pool is not running. Trying to put new job: %r', job)
            return
        self._jobs.put_nowait((_LOW_PRIORITY, job))

    def putJob(self, job):
        if not self._running:
            _logger.error('Thread pool is not running. Trying to put new job: %r', job)
            return
        self._jobs.put_nowait((_DEFAULT_PRIORITY, job))

    def __repr__(self):
        return '%s(workers = %d; jobs = %d)' % (self.__class__.__name__, len(self._workers), self._jobs.qsize())
예제 #32
0
from Queue import PriorityQueue
from fractions import Fraction

nums = {}

seen = set()

pq = PriorityQueue()
frac = Fraction(1, 2), Fraction(1, 2)
pq.put_nowait(((frac[0] + frac[1]).denominator, (frac[0], frac[1])))

while not pq.empty():
    weight, (frac1, frac2) = pq.get_nowait()
    hashing = frac1.denominator, frac2.denominator
    if hashing not in seen:
        seen.add(hashing)
        if weight not in nums:
            nums[weight] = 0

        nums[weight] += 1
        print '{} + {} = {}'.format(frac1, frac2, frac1 + frac2)
        if nums[weight] > 5:
            print weight
            print nums
            break
        fracp = Fraction(1, frac1.denominator + 1), frac2
        fracpp = frac1, Fraction(1, frac2.denominator + 1)
        pq.put_nowait(
            (((fracp[0] + fracp[1]).denominator), (fracp[0], fracp[1])))
        pq.put_nowait(
            (((fracpp[0] + fracpp[1]).denominator), (fracpp[0], fracpp[1])))
class AsyncoreReactor(object):
    _thread = None
    _is_live = False
    logger = logging.getLogger("Reactor")

    def __init__(self):
        self._timers = PriorityQueue()
        self._map = {}

    def start(self):
        self._is_live = True
        self._thread = threading.Thread(target=self._loop, name="hazelcast-reactor")
        self._thread.daemon = True
        self._thread.start()

    def _loop(self):
        self.logger.debug("Starting Reactor Thread")
        Future._threading_locals.is_reactor_thread = True
        while self._is_live:
            try:
                asyncore.loop(count=10000, timeout=0.1, map=self._map)
                self._check_timers()
            except select.error as err:
                # TODO: parse error type to catch only error "9"
                pass
            except:
                self.logger.exception("Error in Reactor Thread")
                # TODO: shutdown client
                return
        self.logger.debug("Reactor Thread exited.")

    def _check_timers(self):
        now = time.time()
        while not self._timers.empty():
            try:
                _, timer = self._timers.queue[0]
            except IndexError:
                return

            if timer.check_timer(now):
                self._timers.get_nowait()
            else:
                return

    def add_timer_absolute(self, timeout, callback):
        timer = Timer(timeout, callback, self._cleanup_timer)
        self._timers.put_nowait((timer.end, timer))
        return timer

    def add_timer(self, delay, callback):
        return self.add_timer_absolute(delay + time.time(), callback)

    def shutdown(self):
        for connection in self._map.values():
            try:
                connection.close(HazelcastError("Client is shutting down"))
            except OSError, connection:
                if connection.args[0] == socket.EBADF:
                    pass
                else:
                    raise
        self._map.clear()
        self._is_live = False
        self._thread.join()
예제 #34
0
class IBusHandler(object):
    def __init__(self):

        self.serial_port = serial.Serial()
        self.serial_port.baudrate = 9600
        self.serial_port.parity = serial.PARITY_EVEN
        self.serial_port.stopbits = serial.STOPBITS_ONE
        self.serial_port.timeout = 0.001
        self.rts_state = False

        self.read_buffer = []
        self.read_lock = Lock()
        self.read_error_counter = 0
        self.read_error_container = []
        self.cancel_read_thread = False
        self.read_thread = Thread(target=self._reading)
        self.read_thread.daemon = True
        self.packet_buffer = Queue()

        self.cts_counter = 0.0
        self.cts_thread = Thread(target=self._cts_watcher)
        self.cts_thread.daemon = True

        self.write_buffer = PriorityQueue()
        self.write_counter = 0

        self.cancel_write_thread = False
        self.write_thread = Thread(target=self._writing)
        self.write_thread.daemon = True

    def __enter__(self):
        self.connect()
        return self

    def __exit__(self, *_args):
        self.disconnect()

    @property
    def ntsc(self):
        return self.rts_state

    @ntsc.setter
    def ntsc(self, value):
        if self.rts_state == value:
            return
        self.serial_port.setRTS(value)
        self.rts_state = value

    @staticmethod
    def _calculate_checksum(packet):
        result = 0
        for value in packet:
            result ^= value
        return result

    def _cut_read_buffer(self, offset=1):
        with self.read_lock:
            self.read_buffer = self.read_buffer[offset:]

    def _wait_free_bus(self, waiting=17, timeout=1000):
        #
        # FIXME Log message doesn't match ``if`` condition.
        #
        if waiting >= timeout:
            log('Error: Waiting Time (%sms) is bigger than Timeout Time (%sms)'
                % (waiting, timeout))
            return False

        for _ in xrange(timeout):
            if self.cts_counter >= waiting:
                return True
            time.sleep(0.001)

        return False

    def _reading(self):
        while not self.cancel_read_thread:
            data = self.serial_port.read(50)
            if data:
                with self.read_lock:
                    self.read_buffer.extend(ord(x) for x in data)
            self._read_bus_packet()

        log('Read/Write Thread finished')

    def _writing(self):
        while not self.cancel_read_thread:
            try:
                prio, write_counter, data = self.write_buffer.get(timeout=1)
                try:
                    while self.cts_counter < 7.0:
                        time.sleep(0.001)

                    self.serial_port.write(data)
                    self.cts_counter = 0.0
                    self.serial_port.flush()
                    self.cts_counter = 0.0
                    log('\033[1;33;40mWRITE:\033[0m %s' %
                        ' '.join('%02X' % i for i in data))
                except serial.SerialException:
                    self.write_buffer.put((prio, write_counter, data))
            except Empty:
                pass

    def _cts_watcher(self):
        while not self.cancel_read_thread:
            if self.serial_port.getCTS():
                self.cts_counter += 0.1
                time.sleep(0.0001)
            else:
                self.cts_counter = 0.0
                #
                # TODO Maybe sleeping a little or move the `sleep()`
                #   in the ``if`` branch after the ``if``/``else``?
                #

        log('CTS Thread finished')

    def set_port(self, device_path):
        self.serial_port.port = device_path

    def connect(self):
        self.serial_port.open()
        self.serial_port.setRTS(0)
        self.cts_thread.start()

        if not self._wait_free_bus(120, 3000):
            log('Error: Can not locate free Bus')
            raise RuntimeError('can not locate free bus')

        self.serial_port.flushInput()

        self.read_thread.start()
        self.write_thread.start()

    def disconnect(self):
        self.cancel_write_thread = True
        self.cancel_read_thread = True
        self.ntsc = False
        time.sleep(0.6)
        self.serial_port.close()
        log('disconnected')

    def read_bus_packet(self):
        try:
            return self.packet_buffer.get(timeout=1)
        except Empty:
            return None

    def _read_bus_packet(self):
        if self.cancel_read_thread:
            return None

        try:
            data_length = self.read_buffer[1]
        except IndexError:
            return None

        if not 3 <= data_length <= 37:
            self.read_error_container.append(self.read_buffer[0])
            self._cut_read_buffer()
            return None

        buffer_len = len(self.read_buffer)
        if buffer_len < 5 or buffer_len < data_length + 2:
            return None

        message = self.read_buffer[:data_length + 2]

        if self._calculate_checksum(message) == 0:
            if self.read_error_container:
                error_hex_string = ' '.join('%02X' % i
                                            for i in self.read_error_container)
                log('READ-ERR: %s' % error_hex_string)
                self.read_error_counter += len(self.read_error_container)
                self.read_error_container = []

            self._cut_read_buffer(data_length + 2)

            self.packet_buffer.put_nowait({
                'src': message[0],
                'len': data_length,
                'dst': message[2],
                'data': message[3:data_length + 1],
                'xor': message[-1]
            })
        else:
            self.read_error_container.append(self.read_buffer[0])
            self._cut_read_buffer()
            return None

    def write_bus_packet(self,
                         src,
                         dst,
                         data,
                         highprio=False,
                         veryhighprio=False,
                         repeat=1):
        #
        # FIXME The ``except`` needs explicit exceptions because it is
        #   unclear under what circumstances the ``except`` is a proper
        #   handling of the exception.
        #
        try:
            packet = [src, len(data) + 2, dst]
            packet.extend(data)
        except:
            packet = [int(src, 16), len(data) + 2, int(dst, 16)]
            packet.extend([int(s, 16) for s in data])

        packet.append(self._calculate_checksum(packet))
        for _ in xrange(repeat):
            self.write_buffer.put_nowait(
                (0 if highprio or veryhighprio else 1,
                 0 if veryhighprio else self.write_counter, bytearray(packet)))
            self.write_counter += 1

    def write_hex_message(self, hexstring):
        hexstring_tmp = hexstring.upper().split(' ')
        src = int(hexstring_tmp[0], 16)
        dst = int(hexstring_tmp[2], 16)
        data = [int(s, 16) for s in hexstring_tmp[3:-1]]
        self.write_bus_packet(src, dst, data)
예제 #35
0
class scanmatcher_localizer(object):
    def __init__(self):
        self._static_map_req = GetMapRequest()
        self._scan_msg = LaserScan()
        self._map_msg = OccupancyGrid()
        self._initialpose_msg = PoseWithCovarianceStamped()
        # parameters
        self._use_static_map = rospy.get_param('~use_static_map', default=True)
        self._init_x = rospy.get_param('~init_x', default=0.0)
        self._init_y = rospy.get_param('~init_y', default=0.0)
        self._init_phi = rospy.get_param('~init_phi', default=0.0)
        # service clients
        try:
            rospy.wait_for_service('/static_map', timeout=3)
        except ROSException as e:
            print(e.message + '. Make sure that service node is running!')
        self._static_map_sc = rospy.ServiceProxy('/static_map', GetMap)
        # tf broadcasters
        self._map_odom_tfb = tf.TransformBroadcaster()
        # tf listeners
        self._odom_base_link_tfl = tf.TransformListener()
        self._base_link_base_laser_tfl = tf.TransformListener()
        # user init
        # processing queue
        self._scan_queue = PriorityQueue(maxsize=2)
        # get static map
        if self._use_static_map:
            static_map_res = self._static_map_sc(self._static_map_req)
            static_map_res.map.data = list(static_map_res.map.data)
            # initialize map with received one
            self._map = pymrpt.maps.COccupancyGridMap2D()
            self._map.from_ROS_OccupancyGrid_msg(static_map_res.map)
            # print output
            print 'received initial STATIC map!'
            print static_map_res.map.info
        else:
            # initialize empty map and wait for published ones
            self._map = pymrpt.maps.COccupancyGridMap2D()
        # setup transformations
        self._tf = tf.TransformerROS()
        self._map_to_odom = pymrpt.poses.CPose2D()
        self._map_to_base = pymrpt.poses.CPose2D()
        self._odom_to_base = pymrpt.poses.CPose2D()
        # setup initial map to base tf ("/odom -> /base_link" also should be (0,0,0))
        self._map_to_base.x = self._init_x
        self._map_to_base.y = self._init_y
        self._map_to_base.phi = self._init_phi
        # setup scanmatcher lock
        self._scanmatcher_lock = Lock()

        # end of user init
        # subscribers
        rospy.Subscriber('/scan',
                         LaserScan,
                         self._scan_callback,
                         queue_size=10)
        rospy.Subscriber('/map',
                         OccupancyGrid,
                         self._map_callback,
                         queue_size=10)
        rospy.Subscriber('/initialpose',
                         PoseWithCovarianceStamped,
                         self._initialpose_callback,
                         queue_size=10)

    def get_odom_base_link_tf(self, stamp=None):
        pose = PoseStamped()
        if stamp is None:
            stamp = rospy.Time(0)
        try:
            trans, rot = self._odom_base_link_tfl.lookupTransform(
                '/odom', '/base_link', stamp)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            # TODO: implement TransformListener.lookupTransform() exception handling!
            rospy.logwarn('Error while listening to /odom -> /base_link tf')
            return pose, False
        pose.pose.position.x = trans[0]
        pose.pose.position.y = trans[1]
        pose.pose.position.z = trans[2]
        pose.pose.orientation.x = rot[0]
        pose.pose.orientation.y = rot[1]
        pose.pose.orientation.z = rot[2]
        pose.pose.orientation.w = rot[3]
        return pose, True

    def get_base_link_base_laser_tf(self, stamp=None):
        pose = PoseStamped()
        if stamp is None:
            stamp = rospy.Time(0)
        try:
            trans, rot = self._base_link_base_laser_tfl.lookupTransform(
                '/base_link', '/base_laser', stamp)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            # TODO: implement TransformListener.lookupTransform() exception handling!
            rospy.logwarn(
                'Error while listening to /base_link -> /base_laser tf')
            return pose, False
        pose.pose.position.x = trans[0]
        pose.pose.position.y = trans[1]
        pose.pose.position.z = trans[2]
        pose.pose.orientation.x = rot[0]
        pose.pose.orientation.y = rot[1]
        pose.pose.orientation.z = rot[2]
        pose.pose.orientation.w = rot[3]
        return pose, True

    def send_map_odom_tf(self, pose):
        translation = pose.pose.position
        rotation = pose.pose.orientation
        self._map_odom_tfb.sendTransform(
            (translation.x, translation.y, translation.z),
            (rotation.x, rotation.y, rotation.z, rotation.w),
            pose.header.stamp, '/odom', '/map')

    def _scan_callback(self, _scan_msg):
        # put scans into queue
        try:
            self._scan_queue.put_nowait(
                [_scan_msg.header.stamp.to_nsec(), _scan_msg])
        except Full:
            rospy.logwarn('Dropped scan. Processing queue is full!')
            pass

    def run(self):
        # create worker thread (we do not .join() since it closes itself by ros shutdown)
        scanmatcher_thread = Thread(target=self.scanmatcher_worker)
        scanmatcher_thread.start()
        # keep it running
        rospy.spin()

    def scanmatcher_worker(self):
        # vars
        has_initial_scan = False
        # initialize ICP
        icp_options = pymrpt.slam.CICP.TConfigParams()
        icp = pymrpt.slam.CICP(icp_options)
        # loop
        while not rospy.is_shutdown():
            # get scan from queue
            try:
                item = self._scan_queue.get(timeout=5)
            except Empty:
                rospy.logwarn(
                    'Got no scan from queue since 5 seconds. Scanmatcher will shutdown now!'
                )
                continue
            self._scan_msg = LaserScan()
            self._scan_msg = item[1]
            self._scan_msg.ranges = list(self._scan_msg.ranges)
            rospy.loginfo('received scan...')
            # get laser scanner pose
            laser_pose, ok = self.get_base_link_base_laser_tf()
            self._sensor_pose = pymrpt.poses.CPose3D()
            self._sensor_pose.from_ROS_Pose_msg(laser_pose.pose)
            # self get odom pose
            odom_pose, ok = self.get_odom_base_link_tf()
            self._odom_to_base = pymrpt.poses.CPose2D()
            self._odom_to_base.from_ROS_Pose_msg(odom_pose.pose)
            # acquire lock
            self._scanmatcher_lock.acquire()
            # update current stamp for publishers
            self._current_stamp = self._scan_msg.header.stamp
            # convert data
            observation = pymrpt.obs.CObservation2DRangeScan()
            observation.from_ROS_LaserScan_msg(self._scan_msg,
                                               self._sensor_pose)
            # set current map from scan
            current_map = pymrpt.maps.CSimplePointsMap()
            current_map.loadFromRangeScan(observation)
            # match maps
            # take current maintained map to base pose as initial guess (absolut pose in map)
            initial_guess = pymrpt.poses.CPosePDFGaussian(self._map_to_base)
            # run ICP algorithm
            aligned_pose, running_time, info = icp.AlignPDF(
                self._map, current_map, initial_guess)
            rospy.loginfo('init.  guess: {}'.format(initial_guess.mean))
            rospy.loginfo('icp goodness: {}'.format(info.goodness))
            rospy.loginfo('icp run_time: {}'.format(running_time))
            rospy.loginfo('aligned pose: {}'.format(aligned_pose.mean))
            # check goodness
            if info.goodness > .8:
                rospy.loginfo('...updating pose in map...')
                # update MRPT pose diff (this is actually the maintained tf)
                self._map_to_base = aligned_pose.mean
                # update last update time
                last_update_time = self._current_stamp
            else:
                # warn if pose lost
                rospy.logwarn('...lost pose in map...')
            # update pose diff
            map_to_base = pymrpt.poses.CPose2D(self._map_to_base)
            map_to_base.inverse()
            #    map_to_base_pose = PoseStamped()
            #    map_to_base_pose.header.frame_id = 'base_link'
            #    map_to_base_pose.header.stamp = self._scan_msg.header.stamp
            #    map_to_base_pose.pose = map_to_base.to_ROS_Pose_msg()
            #    latest_tf_pose = PoseStamped()
            #    try:
            #        latest_tf_pose = self._tf.transformPose('odom', map_to_base_pose)
            #    except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            #        rospy.logwarn('error while pose transformation')
            odom_to_base = pymrpt.poses.CPose2D(self._odom_to_base)
            odom_to_base.inverse()

            self._map_to_odom.x = self._map_to_base.x - self._odom_to_base.x
            self._map_to_odom.y = self._map_to_base.y - self._odom_to_base.y
            self._map_to_odom.phi = self._map_to_base.phi - self._odom_to_base.phi
            self._map_to_odom.normalizePhi()

            print "  /map  -> /base", self._map_to_base, map_to_base
            print "- /odom -> /base", self._odom_to_base, odom_to_base
            print "= /map  -> /odom", self._map_to_odom

            map_to_odom_pose = PoseStamped()
            map_to_odom_pose.pose = self._map_to_odom.to_ROS_Pose_msg()
            # send "/map -> /odom" tf at scan time
            map_to_odom_time = self._scan_msg.header.stamp + rospy.Duration(
                2.0)
            map_to_odom_pose.header.stamp = self._scan_msg.header.stamp
            self.send_map_odom_tf(map_to_odom_pose)
            # release lock
            self._scanmatcher_lock.release()
            # signalize work done
            rospy.loginfo('...task done!')
            self._scan_queue.task_done()

    def _map_callback(self, _map_msg):
        self._map_msg = _map_msg
        if not self._use_static_map:
            # lock scanmatcher
            self._scanmatcher_lock.acquire()
            # update map
            self._map.from_ROS_OccupancyGrid_msg(self._map_msg)
            # unlock scanmatcher
            self._scanmatcher_lock.release()
            # print output
            rospy.loginfo('received new map!')

    def _initialpose_callback(self, _initialpose_msg):
        # lock scanmatcher
        self._scanmatcher_lock.acquire()
        # update map odom pose diff
        self._map_to_base.from_ROS_Pose_msg(_initialpose_msg.pose.pose)
        # unlock scanmatcher
        self._scanmatcher_lock.release()
        # print output
        rospy.loginfo('received initial pose: {}'.format(self._map_to_base))
예제 #36
0
def searchForEyesSVM(gray, svm, scaler, eye_shape, locs):
    """ 
    Explore image on the cell level, reducing HOG calculations.
    Inputs are as follows (besides the obvious)
    * svm -- sklearn svm model; may be provided if it exists
    * scaler -- sklearn preprocessing scaler
    * locs -- list of approximate centers of eyes
    * eye_shape -- size of eye template in pixels (rows, columns)
    """

    tracker = MatchTracker()
    pq = PriorityQueue()

    eye_cells = (eye_shape[0] // 8, eye_shape[1] // 8)
    hog_computed = np.zeros((gray.shape[0] // 8, gray.shape[1] // 8),
                            dtype=np.bool)

    # distribution parameters
    blind_skip = 3

    # adjust locs
    locs[0] = (int(locs[0][0]), int(locs[0][1]))
    locs[1] = (int(locs[1][0]), int(locs[1][1]))

    print locs

    # only compute HOG on subset of image at first
    min_x = min(bf.center2tl(locs[0], eye_shape)[1], 
                bf.center2tl(locs[1], eye_shape)[1])
    max_x = max(bf.center2tl(locs[0], eye_shape)[1], 
                bf.center2tl(locs[1], eye_shape)[1])
    min_y = min(bf.center2tl(locs[0], eye_shape)[0], 
                bf.center2tl(locs[1], eye_shape)[0])
    max_y = max(bf.center2tl(locs[0], eye_shape)[0], 
                bf.center2tl(locs[1], eye_shape)[0])
    
    tl = (min_y - 4*eye_shape[0], min_x - 4*eye_shape[1])
    br = (max_y + 4*eye_shape[0], max_x + 4*eye_shape[1])

    tl_cell = bf.px2cell(tl)
    br_cell = bf.px2cell(br)

    tl = bf.cell2px(tl_cell)
    br = bf.cell2px(br_cell)

    indices = np.index_exp[tl_cell[0]:br_cell[0], tl_cell[1]:br_cell[1], :]
    indices_computed = np.index_exp[tl_cell[0]:br_cell[0], tl_cell[1]:br_cell[1]]

    hog = np.empty((gray.shape[0] // 8, gray.shape[1] // 8, 9), 
                   dtype=np.float)
    hog[indices] = bf.getHog(gray[tl[0]:br[0], tl[1]:br[1]], 
                             normalize=False, flatten=False)
    hog_computed[indices_computed] = True

    # create visited array
    visited = np.zeros((hog.shape[0]-eye_cells[0]+1,
                        hog.shape[1]-eye_cells[1]+1), dtype=np.bool)
 
    # insert provided locations and begin exploration around each one
    for loc in locs:
        tl = bf.center2tl(loc, eye_shape)
        tl = bf.px2cell(tl)

        # only proceed if valid
        if not isValid(hog, tl, eye_cells):
            continue

        # handle this point
        visited[tl[0], tl[1]] = True
        score = testWindow(hog, svm, scaler, eye_cells, tl)[0]
        pq.put_nowait((score, tl))

        if score <= 0:
            tracker.insert(score, tl)

    # search
    greedySearch(hog, hog_computed, svm, scaler, 
                 eye_cells, visited, tracker, pq)
    if tracker.isDone():
        tracker.printClusterScores()
        clusters, scores = tracker.getBigClusters()
        centers = cellTLs2ctrs(clusters, eye_shape)
        return centers, scores

    # # if needed, repeat above search technique, but with broader scope
    # print "Searching blindly."

    # hog = bf.getHog(gray, normalize=False, flatten=False)
    # hog_computed[:, :] = True

    # for i in range(20, visited.shape[0]-20, blind_skip):
    #     for j in range(20, visited.shape[1]-20, blind_skip):
    #         test = (i, j)

    #         # only proceed if valid and not visited
    #         if (not isValid(hog, test, eye_cells)) or visited[i, j]:
    #             continue

    #         # handle this point
    #         visited[i, j] = True
    #         score = testWindow(hog, svm, scaler, eye_cells, test)[0]
    #         pq.put_nowait((score, test))

    #         if score <= 0:
    #             tracker.insert(score, test)

    # greedySearch(hog, hog_computed, svm, scaler, 
    #              eye_cells, visited, tracker, pq) 
    # if tracker.isDone():
    #     tracker.printClusterScores()
    #     clusters, scores = tracker.getBigClusters()
    #     centers = cellTLs2ctrs(clusters, eye_shape)
    #     return centers, scores

    print "Did not find two good matches."
    clusters, scores = tracker.getTwoBestClusters()
    if len(clusters) == 2:
        centers = cellTLs2ctrs(clusters, eye_shape)
        return centers, scores
    else:
        return locs, [-0.1, -0.1]
예제 #37
0
파일: learn.py 프로젝트: y-cccc/dtextract
def learnDT(learnDTNode, func, dist, initCons, params):
    # Step 1: Initialize decision tree, score, and worklist

    # The decision tree is represented by dt, which is a map of type
    #
    #  type params:
    #    I : internal node
    #    L : leaf node
    #
    #  types:
    #    dt : {int : (I * _DT_INTERNAL) | (L * _DT_LEAF) }
    dt = {}
    worklist = PriorityQueue()
    index = 0
    depth = 1
    (dtInternalData, dtInternalScore, dtLeafData,
     dtLeafScore) = learnDTNode(func, dist, initCons)
    gain = dtInternalScore - dtLeafScore
    worklist.put_nowait((-gain, dtInternalData, dtLeafData, index, depth))
    score = dtLeafScore
    size = 1

    # Step 2: Iterate through the worklist and construct internal nodes
    while True:
        # Step 2a: Get the next element (break if worklist is empty)
        if worklist.empty():
            break
        (minusGain, dtInternalData, dtLeafData, index,
         depth) = worklist.get_nowait()
        gain = -minusGain
        log('Internal node index: ' + str(index), INFO)

        # Step 2b: Get the internal data, and add to decision tree
        if dtInternalData is None:
            log('No internal data!', INFO)
            worklist.put_nowait(
                (2.0, dtInternalData, dtLeafData, index, depth))
            if gain < -1.5:
                log('No internal nodes remaining, ending!', INFO)
                break
            else:
                continue

        (dtInternalNode, lcons, rcons) = dtInternalData
        dt[index] = (dtInternalNode, _DT_INTERNAL)

        # Step 2c: Learn the left and right children
        (dtInternalDataLeft, dtInternalScoreLeft, dtLeafDataLeft,
         dtLeafScoreLeft) = learnDTNode(func, dist, lcons)
        (dtInternalDataRight, dtInternalScoreRight, dtLeafDataRight,
         dtLeafScoreRight) = learnDTNode(func, dist, rcons)
        gainLeft = dtInternalScoreLeft - dtLeafScoreLeft
        gainRight = dtInternalScoreRight - dtLeafScoreRight

        # Step 2d: Add children to worklist
        worklist.put_nowait((-gainLeft, dtInternalDataLeft, dtLeafDataLeft,
                             2 * index + 1, depth + 1))
        worklist.put_nowait((-gainRight, dtInternalDataRight, dtLeafDataRight,
                             2 * index + 2, depth + 1))

        # Step 2e: Compute score
        score += gain
        size += 2
        log('Current gain: ' + str(gain), INFO)
        log('Current score: ' + str(score), INFO)
        log('Current size: ' + str(size), INFO)

        # Step 2f: Check stopping conditions
        if not params.minGain is None and gain < params.minGain:
            log('Gain too small, ending!', INFO)
            break
        if not params.tgtScore is None and score >= params.tgtScore:
            log('Achieved target score, ending!', INFO)
            break
        if not params.maxSize is None and size >= params.maxSize:
            log('Reached maximum size, ending!', INFO)
            break
        if gain < -1.5:
            log('No internal nodes remaining, ending!', INFO)
            break

    # Step 3: Iterate through remaining nodes and construct leaf nodes
    while not worklist.empty():
        (minusGain, dtInternalData, dtLeafData, index,
         depth) = worklist.get_nowait()
        gain = -minusGain
        if dtInternalData is None and gain > 0.0:
            raise Exception('None node with non-zero gain: ' +
                            str(dtInternalData))
        log('Leaf node index: ' + str(index), INFO)
        dt[index] = (dtLeafData, _DT_LEAF)

    # Step 4: Construct the decision tree
    return DT(_learnDTHelper(dt, 0))
예제 #38
0
def start_pool(config_file):
    manager = SyncManager()
    manager.start(sync_manager_init)
    status_dict = manager.dict()
    pr_queue = PriorityQueue()
    with open(config_file, 'r') as f:
        options = yaml.load(f)

    dev_macs = options.get('devices', None)
    if not dev_macs:
        print("No device found in the config file")
        return

    for dev_mac in dev_macs:
        if not is_mac_valid(dev_mac):
            raise ValueError("{} is not a valid MAC address!".format(dev_mac))
        pr_queue.put_nowait((0, dev_mac))
        status_dict[dev_mac] = 0

    battery_warn = options.get('battery_warn', 0) or 20
    log_dir = options.get('log_dir', 0) or './logs/'
    data_dir = options.get('data_dir', 0) or './data/'
    max_process = options.get('max_process', 0) or 3
    raw = options.get('raw', False)
    fname = os.path.join(data_dir, options.get('data_prefix', 'WED_data'))
    min_logs = options.get('min_logs', 1000)

    common_kwargs = {'command': Commands.DOWNLOAD,
                     'backend_lock': backend_lock,
                     'fname': fname,
                     'battery_warn': battery_warn,
                     'raw': raw,
                     'status_dict': status_dict,
                     'min_logs': min_logs,
                     }
    process_list = []
    max_process = min(max_process, len(dev_macs))
    retries = {d: 0 for d in dev_macs}

    def get_next_process():
        mac_address = pr_queue.get_nowait()[1]
        stop_event = Event()
        wake_up = Event()
        log_file = os.path.join(log_dir, "log_%s.log" % mac_address.replace(':', ''))
        kwargs = {'mac_address': mac_address,
                  'log_file': log_file,
                  'wake_up': wake_up,
                  'stop_event': stop_event,
                  }
        kwargs.update(common_kwargs)
        p = Process(target=start_command, kwargs=kwargs)
        return p, mac_address, status_dict[mac_address], wake_up, stop_event

    for i in range(max_process):
        process_list.append(get_next_process())

    for p in process_list:
        p[0].start()
    try:
        while len(process_list) > 0:
            p = process_list.pop(0)
            if p[0].is_alive():
                process_list.append(p)
            else:
                last_checked = status_dict[p[1]]
                if last_checked == p[2]:
                    retries[p[1]] += 1
                    if retries[p[1]] > 3:
                        last_checked = (datetime.now() - datetime(1970, 1, 1)).total_seconds()
                        retries[p[1]] = 0
                else:
                    retries[p[1]] = 0
                pr_queue.put_nowait((last_checked, p[1]))
                new_process = get_next_process()
                new_process[0].start()
                process_list.append(new_process)
            time.sleep(2)

    except (KeyboardInterrupt, SystemExit):
        delay = 4 * len([p for p in process_list if p[0].is_alive()])
        print("\nCancelling downloads..............\nWaiting %d seconds for all devices to clean up....\n" % delay)
        time.sleep(delay)
        for p in process_list:
            p[0].terminate()
        sys.exit(0)
    except:
        raise
예제 #39
0
파일: frontier.py 프로젝트: mt3/Spyder
class AbstractBaseFrontier(object, LoggingMixin):
    """
    A base class for implementing frontiers.

    Basically this class provides the different general methods and
    configuration parameters used for frontiers.
    """

    def __init__(self, settings, log_handler, front_end_queues, prioritizer,
        unique_hash='sha1'):
        """
        Initialize the frontier and instantiate the
        :class:`SQLiteSingleHostUriQueue`.

        The default frontier we will use the `sha1` hash function for the
        unique uri filter. For very large crawls you might want to use a
        larger hash function (`sha512`, e.g.)
        """
        LoggingMixin.__init__(self, log_handler, settings.LOG_LEVEL_MASTER)
        # front end queue
        self._prioritizer = prioritizer
        self._front_end_queues = front_end_queues
        # checkpointing
        self._checkpoint_interval = settings.FRONTIER_CHECKPOINTING
        self._uris_added = 0

        # the heap
        self._heap = PriorityQueue(maxsize=settings.FRONTIER_HEAP_SIZE)
        self._heap_min_size = settings.FRONTIER_HEAP_MIN

        # a list of uris currently being crawled.
        self._current_uris = dict()
        # dns cache
        self._dns_cache = DnsCache(settings)
        # unique uri filter
        self._unique_uri = UniqueUriFilter(unique_hash)
        for url in self._front_end_queues.all_uris():
            assert not self._unique_uri.is_known(url, add_if_unknown=True)

        # the sinks
        self._sinks = []

        # timezone
        self._timezone = settings.LOCAL_TIMEZONE
        self._logger.info("frontier::initialized")

    def add_sink(self, sink):
        """
        Add a sink to the frontier. A sink will be responsible for the long
        term storage of the crawled contents.
        """
        self._sinks.append(sink)

    def add_uri(self, curi):
        """
        Add the specified :class:`CrawlUri` to the frontier.

        `next_date` is a datetime object for the next time the uri should be
        crawled.

        Note: time based crawling is never strict, it is generally used as some
        kind of prioritization.
        """
        if self._unique_uri.is_known(curi.url, add_if_unknown=True):
            # we already know this uri
            self._logger.debug("frontier::Trying to update a known uri... " + \
                    "(%s)" % (curi.url,))
            return

        self._logger.info("frontier::Adding '%s' to the frontier" % curi.url)
        self._front_end_queues.add_uri(self._uri_from_curi(curi))
        self._maybe_checkpoint()

    def update_uri(self, curi):
        """
        Update a given uri.
        """
        self._front_end_queues.update_uri(self._uri_from_curi(curi))
        self._maybe_checkpoint()

    def get_next(self):
        """
        Return the next uri scheduled for crawling.
        """
        if self._heap.qsize() < self._heap_min_size:
            self._update_heap()

        try:
            (_next_date, next_uri) = self._heap.get_nowait()
        except Empty:
            # heap is empty, there is nothing to crawl right now!
            # maybe log this in the future
            raise

        return self._crawluri_from_uri(next_uri)

    def close(self):
        """
        Close the underlying frontend queues.
        """
        self._front_end_queues.checkpoint()
        self._front_end_queues.close()

    def _add_to_heap(self, uri, next_date):
        """
        Add an URI to the heap that is ready to be crawled.
        """
        self._heap.put_nowait((next_date, uri))
        (url, _etag, _mod_date, _next_date, _prio) = uri
        self._current_uris[url] = uri
        self._logger.debug("frontier::Adding '%s' to the heap" % url)

    def _reschedule_uri(self, curi):
        """
        Return the `next_crawl_date` for :class:`CrawlUri`s.
        """
        (prio, delta) = self._prioritizer.calculate_priority(curi)
        now = datetime.now(self._timezone)
        return (prio, time.mktime((now + delta).timetuple()))

    def _ignore_uri(self, curi):
        """
        Ignore a :class:`CrawlUri` from now on.
        """
        self._front_end_queues.ignore_uri(curi.url, curi.status_code)

    def _uri_from_curi(self, curi):
        """
        Create the uri tuple from the :class:`CrawlUri` and calculate the
        priority.

        Overwrite this method in more specific frontiers.
        """
        etag = mod_date = None
        if curi.rep_header:
            if "Etag" in curi.rep_header:
                etag = curi.rep_header["Etag"]
            if "Last-Modified" in curi.rep_header:
                mod_date = time.mktime(deserialize_date_time(
                    curi.rep_header["Last-Modified"]).timetuple())
            if not mod_date and 'Date' in curi.rep_header:
                mod_date = time.mktime(deserialize_date_time(
                    curi.rep_header["Date"]).timetuple())

        if mod_date:
            # only reschedule if it has been crawled before
            (prio, next_crawl_date) = self._reschedule_uri(curi)
        else:
            (prio, next_crawl_date) = (1,
                    time.mktime(datetime.now(self._timezone).timetuple()))

        return (curi.url, etag, mod_date, next_crawl_date, prio)

    def _crawluri_from_uri(self, uri):
        """
        Convert an URI tuple to a :class:`CrawlUri`.

        Replace the hostname with the real IP in order to cache DNS queries.
        """
        (url, etag, mod_date, _next_date, prio) = uri

        parsed_url = urlparse(url)

        # dns resolution and caching
        port = parsed_url.port
        if not port:
            port = PROTOCOLS_DEFAULT_PORT[parsed_url.scheme]

        effective_netloc = self._dns_cache["%s:%s" % (parsed_url.hostname,
            port)]

        curi = CrawlUri(url)
        curi.effective_url = url.replace(parsed_url.netloc, "%s:%s" %
                effective_netloc)
        curi.current_priority = prio
        curi.req_header = dict()
        if etag:
            curi.req_header["Etag"] = etag
        if mod_date:
            mod_date_time = datetime.fromtimestamp(mod_date)
            curi.req_header["Last-Modified"] = serialize_date_time(
                    mod_date_time)

        curi.optional_vars = dict()
        if parsed_url.username and parsed_url.password:
            curi.optional_vars[CURI_SITE_USERNAME] = \
                parsed_url.username.encode()
            curi.optional_vars[CURI_SITE_PASSWORD] = \
                parsed_url.password.encode()

        return curi

    def _update_heap(self):
        """
        Abstract method. Implement this in the actual Frontier.

        The implementation should really only add uris to the heap if they can
        be downloaded right away.
        """
        pass

    def _maybe_checkpoint(self, force_checkpoint=False):
        """
        Periodically checkpoint the state db.
        """
        self._uris_added += 1
        if self._uris_added > self._checkpoint_interval or force_checkpoint:
            self._front_end_queues.checkpoint()
            self._uris_added = 0

    def process_successful_crawl(self, curi):
        """
        Called when an URI has been crawled successfully.

        `curi` is a :class:`CrawlUri`
        """
        self.update_uri(curi)

        if curi.optional_vars and CURI_EXTRACTED_URLS in curi.optional_vars:
            for url in curi.optional_vars[CURI_EXTRACTED_URLS].split("\n"):
                if len(url) > 5 and not self._unique_uri.is_known(url):
                    self.add_uri(CrawlUri(url))

        del self._current_uris[curi.url]

        for sink in self._sinks:
            sink.process_successful_crawl(curi)

    def process_not_found(self, curi):
        """
        Called when an URL was not found.

        This could mean, that the URL has been removed from the server. If so,
        do something about it!

        Override this method in the actual frontier implementation.
        """
        del self._current_uris[curi.url]
        self._ignore_uri(curi)

        for sink in self._sinks:
            sink.process_not_found(curi)

    def process_redirect(self, curi):
        """
        Called when there were too many redirects for an URL, or the site has
        note been updated since the last visit.

        In the latter case, update the internal uri and increase the priority
        level.
        """
        del self._current_uris[curi.url]

        if curi.status_code in [301, 302]:
            # simply ignore the URL. The URL that is being redirected to is
            # extracted and added in the processing
            self._ignore_uri(curi)

        if curi.status_code == 304:
            # the page has not been modified since the last visit! Update it
            # NOTE: prio increasing happens in the prioritizer
            self.update_uri(curi)

        for sink in self._sinks:
            sink.process_redirect(curi)

    def process_server_error(self, curi):
        """
        Called when there was some kind of server error.

        Override this method in the actual frontier implementation.
        """
        del self._current_uris[curi.url]
        self._ignore_uri(curi)

        for sink in self._sinks:
            sink.process_server_error(curi)
예제 #40
0
class scanmatcher_odom(object):

    def __init__(self):
        self._scan_msg = LaserScan()
        self._odom_msg = Odometry()
        # parameters
        self._send_odom_base_tf = rospy.get_param('~send_odom_base_tf', default=True)
        # publishers
        self._odom_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
        # tf broadcasters
        self._odom_base_link_tfb = tf.TransformBroadcaster()
        # tf listeners
        self._base_link_base_laser_tfl = tf.TransformListener()
        # user init
        # processing queue
        self._scan_queue = PriorityQueue(maxsize=2)
        # absolute odometry pose and current velocities
        self._current_pose = pymrpt.poses.CPose2D()
        self._v = .0
        self._w = .0
        # initialize odometry
        self._odom_msg.header.frame_id = 'odom'
        self._odom_msg.child_frame_id = 'base_link'
        # end of user init
        # subscribers
        rospy.Subscriber('/scan', LaserScan, self._scan_callback, queue_size=10)

    def get_base_link_base_laser_tf(self, stamp=None):
        pose = PoseStamped()
        if stamp is None:
            stamp = rospy.Time(0)
        try:
            trans, rot = self._base_link_base_laser_tfl.lookupTransform('/base_link', '/base_laser', stamp)
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            # TODO: implement TransformListener.lookupTransform() exception handling!
            rospy.logwarn('Error while listening to /base_link -> /base_laser tf')
            return pose, False
        pose.pose.position.x = trans[0]
        pose.pose.position.y = trans[1]
        pose.pose.position.z = trans[2]
        pose.pose.orientation.x = rot[0]
        pose.pose.orientation.y = rot[1]
        pose.pose.orientation.z = rot[2]
        pose.pose.orientation.w = rot[3]
        return pose, True

    def send_odom_base_link_tf(self, pose):
        translation = pose.pose.position
        rotation = pose.pose.orientation
        self._odom_base_link_tfb.sendTransform(
            (translation.x, translation.y, translation.z),
            (rotation.x, rotation.y, rotation.z, rotation.w),
            pose.header.stamp,
            '/base_link',
            '/odom')

    def run(self):
        # create worker thread (we do not .join() since it closes itself by ros shutdown)
        scanmatcher_thread = Thread(target=self.scanmatcher_worker)
        scanmatcher_thread.start()
        # keep it running
        rospy.spin()

    def _scan_callback(self, _scan_msg):
        # put scans into queue
        try:
            self._scan_queue.put_nowait([_scan_msg.header.stamp.to_nsec(), _scan_msg])
        except Full:
            rospy.logwarn('Dropped scan. Processing queue is full!')
            pass

    def scanmatcher_worker(self):
        # vars
        has_initial_scan = False
        # initialize ICP
        icp_options = pymrpt.slam.CICP.TConfigParams()
        icp = pymrpt.slam.CICP(icp_options)
        # create last and current laser maps
        last_map = pymrpt.maps.CSimplePointsMap()
        # setup last update time
        last_update_time = rospy.Time.now()
        # loop
        while not rospy.is_shutdown():
            self._scan_msg = LaserScan()
            # get scan from queue
            try:
                item = self._scan_queue.get(timeout=5)
            except Empty:
                rospy.logwarn('Got no scan from queue since 5 seconds. Scanmatcher will shutdown now!')
                continue
            self._scan_msg = item[1]
            self._scan_msg.ranges = list(self._scan_msg.ranges)
            rospy.loginfo('received scan...')
            # update current stamp for publishers
            self._current_stamp = self._scan_msg.header.stamp
            # get laser scanner pose
            laser_pose, ok = self.get_base_link_base_laser_tf()
            self._sensor_pose = pymrpt.poses.CPose3D()
            self._sensor_pose.from_ROS_Pose_msg(laser_pose.pose)
            # convert data
            observation = pymrpt.obs.CObservation2DRangeScan()
            observation.from_ROS_LaserScan_msg(self._scan_msg, self._sensor_pose)
            # set current map from scan
            current_map = pymrpt.maps.CSimplePointsMap()
            current_map.loadFromRangeScan(observation)
            # match maps
            if has_initial_scan:
                # no initial guess (pure incremental)
                initial_guess = pymrpt.poses.CPosePDFGaussian()
                # run ICP algorithm
                pose_change, running_time, info = icp.AlignPDF(last_map, current_map, initial_guess)
                rospy.loginfo('icp goodness: {}'.format(info.goodness))
                rospy.loginfo('icp run_time: {}'.format(running_time))
                rospy.loginfo('pose  change: {}'.format(pose_change.mean))
                # check goodness
        #        if info.goodness > .8:
                rospy.loginfo('...updating odometry.')
                # get time delta
                d_t = (last_update_time - self._current_stamp).to_sec()
                # update current pose and velocities
                self._current_pose += pose_change.mean
                dist = pose_change.mean.norm()
                self._v = dist / d_t
                self._w = pose_change.mean.phi / d_t
                self.publish_odom()
                # update last update time
                last_update_time = self._current_stamp
                # update last map
                last_map = current_map
        #        else:
        #            rospy.logwarn('...lost odometry...')
            else:
                rospy.loginfo('...is inital one!')
                # load initial scan to last map
                last_map = current_map
                # mark initial as received
                has_initial_scan = True
        #    rospy.loginfo('...task done!')
            # signalize work done
            self._scan_queue.task_done()

    def publish_odom(self):
        """
        Publish odometry and optionally "odom -> base_link" tf.
        """
        # setup odometry message
        self._odom_msg.header.stamp = self._current_stamp
        self._odom_msg.pose.pose = self._current_pose.to_ROS_Pose_msg()
        self._odom_msg.twist.twist.linear.x = self._v
        self._odom_msg.twist.twist.angular.z = self._w
        # publish odom
        self._odom_pub.publish(self._odom_msg)
        # publish "odom -> base_link" tf on demand
        if self._send_odom_base_tf:
           pose_msg = PoseStamped()
           pose_msg.header.stamp = self._current_stamp
           pose_msg.pose = self._odom_msg.pose.pose
           self.send_odom_base_link_tf(pose_msg)
예제 #41
0
class scanmatcher_odom(object):
    def __init__(self):
        self._scan_msg = LaserScan()
        self._odom_msg = Odometry()
        # parameters
        self._send_odom_base_tf = rospy.get_param('~send_odom_base_tf',
                                                  default=True)
        # publishers
        self._odom_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
        # tf broadcasters
        self._odom_base_link_tfb = tf.TransformBroadcaster()
        # tf listeners
        self._base_link_base_laser_tfl = tf.TransformListener()
        # user init
        # processing queue
        self._scan_queue = PriorityQueue(maxsize=2)
        # absolute odometry pose and current velocities
        self._current_pose = pymrpt.poses.CPose2D()
        self._v = .0
        self._w = .0
        # initialize odometry
        self._odom_msg.header.frame_id = 'odom'
        self._odom_msg.child_frame_id = 'base_link'
        # end of user init
        # subscribers
        rospy.Subscriber('/scan',
                         LaserScan,
                         self._scan_callback,
                         queue_size=10)

    def get_base_link_base_laser_tf(self, stamp=None):
        pose = PoseStamped()
        if stamp is None:
            stamp = rospy.Time(0)
        try:
            trans, rot = self._base_link_base_laser_tfl.lookupTransform(
                '/base_link', '/base_laser', stamp)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            # TODO: implement TransformListener.lookupTransform() exception handling!
            rospy.logwarn(
                'Error while listening to /base_link -> /base_laser tf')
            return pose, False
        pose.pose.position.x = trans[0]
        pose.pose.position.y = trans[1]
        pose.pose.position.z = trans[2]
        pose.pose.orientation.x = rot[0]
        pose.pose.orientation.y = rot[1]
        pose.pose.orientation.z = rot[2]
        pose.pose.orientation.w = rot[3]
        return pose, True

    def send_odom_base_link_tf(self, pose):
        translation = pose.pose.position
        rotation = pose.pose.orientation
        self._odom_base_link_tfb.sendTransform(
            (translation.x, translation.y, translation.z),
            (rotation.x, rotation.y, rotation.z, rotation.w),
            pose.header.stamp, '/base_link', '/odom')

    def run(self):
        # create worker thread (we do not .join() since it closes itself by ros shutdown)
        scanmatcher_thread = Thread(target=self.scanmatcher_worker)
        scanmatcher_thread.start()
        # keep it running
        rospy.spin()

    def _scan_callback(self, _scan_msg):
        # put scans into queue
        try:
            self._scan_queue.put_nowait(
                [_scan_msg.header.stamp.to_nsec(), _scan_msg])
        except Full:
            rospy.logwarn('Dropped scan. Processing queue is full!')
            pass

    def scanmatcher_worker(self):
        # vars
        has_initial_scan = False
        # initialize ICP
        icp_options = pymrpt.slam.CICP.TConfigParams()
        icp = pymrpt.slam.CICP(icp_options)
        # create last and current laser maps
        last_map = pymrpt.maps.CSimplePointsMap()
        # setup last update time
        last_update_time = rospy.Time.now()
        # loop
        while not rospy.is_shutdown():
            self._scan_msg = LaserScan()
            # get scan from queue
            try:
                item = self._scan_queue.get(timeout=5)
            except Empty:
                rospy.logwarn(
                    'Got no scan from queue since 5 seconds. Scanmatcher will shutdown now!'
                )
                continue
            self._scan_msg = item[1]
            self._scan_msg.ranges = list(self._scan_msg.ranges)
            rospy.loginfo('received scan...')
            # update current stamp for publishers
            self._current_stamp = self._scan_msg.header.stamp
            # get laser scanner pose
            laser_pose, ok = self.get_base_link_base_laser_tf()
            self._sensor_pose = pymrpt.poses.CPose3D()
            self._sensor_pose.from_ROS_Pose_msg(laser_pose.pose)
            # convert data
            observation = pymrpt.obs.CObservation2DRangeScan()
            observation.from_ROS_LaserScan_msg(self._scan_msg,
                                               self._sensor_pose)
            # set current map from scan
            current_map = pymrpt.maps.CSimplePointsMap()
            current_map.loadFromRangeScan(observation)
            # match maps
            if has_initial_scan:
                # no initial guess (pure incremental)
                initial_guess = pymrpt.poses.CPosePDFGaussian()
                # run ICP algorithm
                pose_change, running_time, info = icp.AlignPDF(
                    last_map, current_map, initial_guess)
                rospy.loginfo('icp goodness: {}'.format(info.goodness))
                rospy.loginfo('icp run_time: {}'.format(running_time))
                rospy.loginfo('pose  change: {}'.format(pose_change.mean))
                # check goodness
                #        if info.goodness > .8:
                rospy.loginfo('...updating odometry.')
                # get time delta
                d_t = (last_update_time - self._current_stamp).to_sec()
                # update current pose and velocities
                self._current_pose += pose_change.mean
                dist = pose_change.mean.norm()
                self._v = dist / d_t
                self._w = pose_change.mean.phi / d_t
                self.publish_odom()
                # update last update time
                last_update_time = self._current_stamp
                # update last map
                last_map = current_map
        #        else:
        #            rospy.logwarn('...lost odometry...')
            else:
                rospy.loginfo('...is inital one!')
                # load initial scan to last map
                last_map = current_map
                # mark initial as received
                has_initial_scan = True
        #    rospy.loginfo('...task done!')
        # signalize work done
            self._scan_queue.task_done()

    def publish_odom(self):
        """
        Publish odometry and optionally "odom -> base_link" tf.
        """
        # setup odometry message
        self._odom_msg.header.stamp = self._current_stamp
        self._odom_msg.pose.pose = self._current_pose.to_ROS_Pose_msg()
        self._odom_msg.twist.twist.linear.x = self._v
        self._odom_msg.twist.twist.angular.z = self._w
        # publish odom
        self._odom_pub.publish(self._odom_msg)
        # publish "odom -> base_link" tf on demand
        if self._send_odom_base_tf:
            pose_msg = PoseStamped()
            pose_msg.header.stamp = self._current_stamp
            pose_msg.pose = self._odom_msg.pose.pose
            self.send_odom_base_link_tf(pose_msg)
    def find_feasible_schedule(para, rtn):
        """find a feasible schedule"""
        # todo: do not work for G6
        # todo or, expand the time horizon and assign a bigM price for t>96
        # todo the following is simply a greedy alg
        # todo offset to the most cheapst hours
        # todo need test for 3EAF,2AOD,4LF,1CC

        rtn_casters = rtn.steel_rtn.casters

        task_start = [-100] + [-1] * para.num_tasks
        heat_ready_q2 = PriorityQueue()

        # get caster start asap by arranging heat priority
        # group_time_q = PriorityQueue()
        # for group_ in range(object.num_groups):
        # total_slot = 0
        # for heat in object.optmath.steel_rtn.group2heats[group_+1]:
        #         for s in object.process_sequence:
        #             total_slot += object.optmath.steel_rtn.task_length[object.optmath.steel_rtn.tasks[s][heat-1]]
        #     total_slot += max([object.optmath.steel_rtn.task_cleanup_length[object.optmath.steel_rtn.tasks[unit][group_]] for unit in casters])
        #     group_time_q.put_nowait((total_slot,group_+1))
        # equip_count = 0
        # num_eaf = object.optmath.steel_rtn.stage2units['1']['EAF']
        # while not group_time_q.empty():
        #     (total_slot, group) = group_time_q.get_nowait()
        #     for heat in object.optmath.steel_rtn.group2heats[group]:
        #         heat_ready_q2.put_nowait((math.floor(equip_count/num_eaf),heat-1))
        #         equip_count += 1

        for heat_ in range(para.num_heats):
            heat_ready_q2.put_nowait((0, heat_))
        for seq in [0, 2, 4]:
            heat_ready_q = heat_ready_q2
            heat_ready_q2 = PriorityQueue()
            task_type = para.heat_sequence[seq]
            equip_time = [0] * para.unit2num[task_type]
            equip_id = 0
            while not heat_ready_q.empty():
                (ready_t, heat_) = heat_ready_q.get_nowait()
                task = rtn.steel_rtn.tasks[task_type][heat_]
                equip_time[equip_id] = max(equip_time[equip_id], ready_t)
                task_start[task] = equip_time[equip_id]
                equip_time[equip_id] += rtn.steel_rtn.task_length[task]
                trans_time = rtn.steel_rtn.task_length[task + para.num_heats]
                heat_ready_q2.put_nowait((equip_time[equip_id] + trans_time, heat_))
                equip_id = (equip_id + 1) % len(equip_time)
        # casting
        heat_ready_list = [-1] * para.num_heats
        while not heat_ready_q2.empty():
            (read_t, heat_) = heat_ready_q2.get_nowait()
            heat_ready_list[heat_] = read_t
        group_ready_t = dict()
        caster_time = [0] * len(rtn_casters)
        for caster in rtn_casters:
            group_ready_t[caster] = PriorityQueue()
        # group ready time
        for group_ in range(para.num_groups):
            heats = rtn.steel_rtn.group2heats[group_ + 1]
            for caster in rtn_casters:
                read_t = [heat_ready_list[heat - 1] - rtn.steel_rtn.cast_heat_rel_slot[caster][heat] for heat
                          in heats]
                group_ready_t[caster].put_nowait((max(read_t), group_))
        scheduled_groups = []
        while len(scheduled_groups) < para.num_groups:
            caster_id = np.argmin(caster_time)
            (read_t, group_) = group_ready_t[rtn_casters[caster_id]].get_nowait()
            while group_ in scheduled_groups:
                (read_t, group_) = group_ready_t[rtn_casters[caster_id]].get_nowait()
            scheduled_groups.append(group_)
            schedule_time = max(read_t, caster_time[caster_id])
            schedule_task = rtn.steel_rtn.tasks[rtn_casters[caster_id]][group_]
            caster_time[caster_id] = schedule_time + rtn.steel_rtn.task_cleanup_length[schedule_task]
            task_start[schedule_task] = schedule_time

        task_time = [(-100, -100)] + [(-1, -1)] * rtn.steel_rtn.num_tasks  # task counts from 1
        for task in range(1, para.num_tasks + 1):
            if task_start[task] < 0:
                continue
            task_time[task] = (task_start[task], task_start[task] + 1)
        return task_time
예제 #43
0
    def find_feasible_schedule(para, rtn):
        """find a feasible schedule"""
        # todo: do not work for G6
        # todo or, expand the time horizon and assign a bigM price for t>96
        # todo the following is simply a greedy alg
        # todo offset to the most cheapst hours
        # todo need test for 3EAF,2AOD,4LF,1CC

        rtn_casters = rtn.steel_rtn.casters

        task_start = [-100] + [-1] * para.num_tasks
        heat_ready_q2 = PriorityQueue()

        # get caster start asap by arranging heat priority
        # group_time_q = PriorityQueue()
        # for group_ in range(object.num_groups):
        # total_slot = 0
        # for heat in object.optmath.steel_rtn.group2heats[group_+1]:
        #         for s in object.process_sequence:
        #             total_slot += object.optmath.steel_rtn.task_length[object.optmath.steel_rtn.tasks[s][heat-1]]
        #     total_slot += max([object.optmath.steel_rtn.task_cleanup_length[object.optmath.steel_rtn.tasks[unit][group_]] for unit in casters])
        #     group_time_q.put_nowait((total_slot,group_+1))
        # equip_count = 0
        # num_eaf = object.optmath.steel_rtn.stage2units['1']['EAF']
        # while not group_time_q.empty():
        #     (total_slot, group) = group_time_q.get_nowait()
        #     for heat in object.optmath.steel_rtn.group2heats[group]:
        #         heat_ready_q2.put_nowait((math.floor(equip_count/num_eaf),heat-1))
        #         equip_count += 1

        for heat_ in range(para.num_heats):
            heat_ready_q2.put_nowait((0, heat_))
        for seq in [0, 2, 4]:
            heat_ready_q = heat_ready_q2
            heat_ready_q2 = PriorityQueue()
            task_type = para.heat_sequence[seq]
            equip_time = [0] * para.unit2num[task_type]
            equip_id = 0
            while not heat_ready_q.empty():
                (ready_t, heat_) = heat_ready_q.get_nowait()
                task = rtn.steel_rtn.tasks[task_type][heat_]
                equip_time[equip_id] = max(equip_time[equip_id], ready_t)
                task_start[task] = equip_time[equip_id]
                equip_time[equip_id] += rtn.steel_rtn.task_length[task]
                trans_time = rtn.steel_rtn.task_length[task + para.num_heats]
                heat_ready_q2.put_nowait(
                    (equip_time[equip_id] + trans_time, heat_))
                equip_id = (equip_id + 1) % len(equip_time)
        # casting
        heat_ready_list = [-1] * para.num_heats
        while not heat_ready_q2.empty():
            (read_t, heat_) = heat_ready_q2.get_nowait()
            heat_ready_list[heat_] = read_t
        group_ready_t = dict()
        caster_time = [0] * len(rtn_casters)
        for caster in rtn_casters:
            group_ready_t[caster] = PriorityQueue()
        # group ready time
        for group_ in range(para.num_groups):
            heats = rtn.steel_rtn.group2heats[group_ + 1]
            for caster in rtn_casters:
                read_t = [
                    heat_ready_list[heat - 1] -
                    rtn.steel_rtn.cast_heat_rel_slot[caster][heat]
                    for heat in heats
                ]
                group_ready_t[caster].put_nowait((max(read_t), group_))
        scheduled_groups = []
        while len(scheduled_groups) < para.num_groups:
            caster_id = np.argmin(caster_time)
            (read_t,
             group_) = group_ready_t[rtn_casters[caster_id]].get_nowait()
            while group_ in scheduled_groups:
                (read_t,
                 group_) = group_ready_t[rtn_casters[caster_id]].get_nowait()
            scheduled_groups.append(group_)
            schedule_time = max(read_t, caster_time[caster_id])
            schedule_task = rtn.steel_rtn.tasks[rtn_casters[caster_id]][group_]
            caster_time[
                caster_id] = schedule_time + rtn.steel_rtn.task_cleanup_length[
                    schedule_task]
            task_start[schedule_task] = schedule_time

        task_time = [
            (-100, -100)
        ] + [(-1, -1)] * rtn.steel_rtn.num_tasks  # task counts from 1
        for task in range(1, para.num_tasks + 1):
            if task_start[task] < 0:
                continue
            task_time[task] = (task_start[task], task_start[task] + 1)
        return task_time
예제 #44
0
class AsyncoreReactor(object):
    _thread = None
    _is_live = False
    logger = logging.getLogger("Reactor")

    def __init__(self):
        self._timers = PriorityQueue()
        self._map = {}

    def start(self):
        self._is_live = True
        self._thread = threading.Thread(target=self._loop, name="hazelcast-reactor")
        self._thread.daemon = True
        self._thread.start()

    def _loop(self):
        self.logger.debug("Starting Reactor Thread")
        Future._threading_locals.is_reactor_thread = True
        while self._is_live:
            try:
                asyncore.loop(count=1000, timeout=0.01, map=self._map)
                self._check_timers()
            except select.error as err:
                # TODO: parse error type to catch only error "9"
                pass
            except:
                self.logger.exception("Error in Reactor Thread")
                # TODO: shutdown client
                return
        self.logger.debug("Reactor Thread exited.")

    def _check_timers(self):
        now = time.time()
        while not self._timers.empty():
            try:
                _, timer = self._timers.queue[0]
            except IndexError:
                return

            if timer.check_timer(now):
                self._timers.get_nowait()
            else:
                return

    def add_timer_absolute(self, timeout, callback):
        timer = Timer(timeout, callback, self._cleanup_timer)
        self._timers.put_nowait((timer.end, timer))
        return timer

    def add_timer(self, delay, callback):
        return self.add_timer_absolute(delay + time.time(), callback)

    def shutdown(self):
        for connection in self._map.values():
            try:
                connection.close(HazelcastError("Client is shutting down"))
            except OSError, connection:
                if connection.args[0] == socket.EBADF:
                    pass
                else:
                    raise
        self._map.clear()
        self._is_live = False
        self._thread.join()
예제 #45
0
파일: frontier.py 프로젝트: truemped/Spyder
class AbstractBaseFrontier(object, LoggingMixin):
    """
    A base class for implementing frontiers.

    Basically this class provides the different general methods and
    configuration parameters used for frontiers.
    """
    def __init__(self,
                 settings,
                 log_handler,
                 front_end_queues,
                 prioritizer,
                 unique_hash='sha1'):
        """
        Initialize the frontier and instantiate the
        :class:`SQLiteSingleHostUriQueue`.

        The default frontier we will use the `sha1` hash function for the
        unique uri filter. For very large crawls you might want to use a
        larger hash function (`sha512`, e.g.)
        """
        LoggingMixin.__init__(self, log_handler, settings.LOG_LEVEL_MASTER)
        # front end queue
        self._prioritizer = prioritizer
        self._front_end_queues = front_end_queues
        # checkpointing
        self._checkpoint_interval = settings.FRONTIER_CHECKPOINTING
        self._uris_added = 0

        # the heap
        self._heap = PriorityQueue(maxsize=settings.FRONTIER_HEAP_SIZE)
        self._heap_min_size = settings.FRONTIER_HEAP_MIN

        # a list of uris currently being crawled.
        self._current_uris = dict()
        # dns cache
        self._dns_cache = DnsCache(settings)
        # unique uri filter
        self._unique_uri = UniqueUriFilter(unique_hash)
        for url in self._front_end_queues.all_uris():
            assert not self._unique_uri.is_known(url, add_if_unknown=True)

        # the sinks
        self._sinks = []

        # timezone
        self._timezone = settings.LOCAL_TIMEZONE
        self._logger.info("frontier::initialized")

    def add_sink(self, sink):
        """
        Add a sink to the frontier. A sink will be responsible for the long
        term storage of the crawled contents.
        """
        self._sinks.append(sink)

    def add_uri(self, curi):
        """
        Add the specified :class:`CrawlUri` to the frontier.

        `next_date` is a datetime object for the next time the uri should be
        crawled.

        Note: time based crawling is never strict, it is generally used as some
        kind of prioritization.
        """
        if self._unique_uri.is_known(curi.url, add_if_unknown=True):
            # we already know this uri
            self._logger.debug("frontier::Trying to update a known uri... " + \
                    "(%s)" % (curi.url,))
            return

        self._logger.info("frontier::Adding '%s' to the frontier" % curi.url)
        self._front_end_queues.add_uri(self._uri_from_curi(curi))
        self._maybe_checkpoint()

    def update_uri(self, curi):
        """
        Update a given uri.
        """
        self._front_end_queues.update_uri(self._uri_from_curi(curi))
        self._maybe_checkpoint()

    def get_next(self):
        """
        Return the next uri scheduled for crawling.
        """
        if self._heap.qsize() < self._heap_min_size:
            self._update_heap()

        try:
            (_next_date, next_uri) = self._heap.get_nowait()
        except Empty:
            # heap is empty, there is nothing to crawl right now!
            # maybe log this in the future
            raise

        return self._crawluri_from_uri(next_uri)

    def close(self):
        """
        Close the underlying frontend queues.
        """
        self._front_end_queues.checkpoint()
        self._front_end_queues.close()

    def _crawl_now(self, uri):
        """
        Convinience method for crawling an uri right away.
        """
        self._add_to_heap(uri, 3000)

    def _add_to_heap(self, uri, next_date):
        """
        Add an URI to the heap that is ready to be crawled.
        """
        self._heap.put_nowait((next_date, uri))
        (url, _etag, _mod_date, _next_date, _prio) = uri
        self._current_uris[url] = uri
        self._logger.debug("frontier::Adding '%s' to the heap" % url)

    def _reschedule_uri(self, curi):
        """
        Return the `next_crawl_date` for :class:`CrawlUri`s.
        """
        (prio, delta) = self._prioritizer.calculate_priority(curi)
        now = datetime.now(self._timezone)
        return (prio, time.mktime((now + delta).timetuple()))

    def _ignore_uri(self, curi):
        """
        Ignore a :class:`CrawlUri` from now on.
        """
        self._front_end_queues.ignore_uri(curi.url, curi.status_code)

    def _uri_from_curi(self, curi):
        """
        Create the uri tuple from the :class:`CrawlUri` and calculate the
        priority.

        Overwrite this method in more specific frontiers.
        """
        etag = mod_date = None
        if curi.rep_header:
            if "Etag" in curi.rep_header:
                etag = curi.rep_header["Etag"]
            if "Last-Modified" in curi.rep_header:
                mod_date = time.mktime(
                    deserialize_date_time(
                        curi.rep_header["Last-Modified"]).timetuple())
            if not mod_date and 'Date' in curi.rep_header:
                mod_date = time.mktime(
                    deserialize_date_time(curi.rep_header["Date"]).timetuple())

        if mod_date:
            # only reschedule if it has been crawled before
            (prio, next_crawl_date) = self._reschedule_uri(curi)
        else:
            (prio,
             next_crawl_date) = (1,
                                 time.mktime(
                                     datetime.now(self._timezone).timetuple()))

        return (curi.url, etag, mod_date, next_crawl_date, prio)

    def _crawluri_from_uri(self, uri):
        """
        Convert an URI tuple to a :class:`CrawlUri`.

        Replace the hostname with the real IP in order to cache DNS queries.
        """
        (url, etag, mod_date, _next_date, prio) = uri

        parsed_url = urlparse(url)

        # dns resolution and caching
        port = parsed_url.port
        if not port:
            port = PROTOCOLS_DEFAULT_PORT[parsed_url.scheme]

        effective_netloc = self._dns_cache["%s:%s" %
                                           (parsed_url.hostname, port)]

        curi = CrawlUri(url)
        curi.effective_url = url.replace(parsed_url.netloc,
                                         "%s:%s" % effective_netloc)
        curi.current_priority = prio
        curi.req_header = dict()
        if etag:
            curi.req_header["Etag"] = etag
        if mod_date:
            mod_date_time = datetime.fromtimestamp(mod_date)
            curi.req_header["Last-Modified"] = serialize_date_time(
                mod_date_time)

        curi.optional_vars = dict()
        if parsed_url.username and parsed_url.password:
            curi.optional_vars[CURI_SITE_USERNAME] = \
                parsed_url.username.encode()
            curi.optional_vars[CURI_SITE_PASSWORD] = \
                parsed_url.password.encode()

        return curi

    def _update_heap(self):
        """
        Abstract method. Implement this in the actual Frontier.

        The implementation should really only add uris to the heap if they can
        be downloaded right away.
        """
        pass

    def _maybe_checkpoint(self, force_checkpoint=False):
        """
        Periodically checkpoint the state db.
        """
        self._uris_added += 1
        if self._uris_added > self._checkpoint_interval or force_checkpoint:
            self._front_end_queues.checkpoint()
            self._uris_added = 0

    def process_successful_crawl(self, curi):
        """
        Called when an URI has been crawled successfully.

        `curi` is a :class:`CrawlUri`
        """
        self.update_uri(curi)

        if curi.optional_vars and CURI_EXTRACTED_URLS in curi.optional_vars:
            for url in curi.optional_vars[CURI_EXTRACTED_URLS].split("\n"):
                if len(url) > 5 and not self._unique_uri.is_known(url):
                    self.add_uri(CrawlUri(url))

        del self._current_uris[curi.url]

        for sink in self._sinks:
            sink.process_successful_crawl(curi)

    def process_not_found(self, curi):
        """
        Called when an URL was not found.

        This could mean, that the URL has been removed from the server. If so,
        do something about it!

        Override this method in the actual frontier implementation.
        """
        del self._current_uris[curi.url]
        self._ignore_uri(curi)

        for sink in self._sinks:
            sink.process_not_found(curi)

    def process_redirect(self, curi):
        """
        Called when there were too many redirects for an URL, or the site has
        note been updated since the last visit.

        In the latter case, update the internal uri and increase the priority
        level.
        """
        del self._current_uris[curi.url]

        if curi.status_code in [301, 302]:
            # simply ignore the URL. The URL that is being redirected to is
            # extracted and added in the processing
            self._ignore_uri(curi)

        if curi.status_code == 304:
            # the page has not been modified since the last visit! Update it
            # NOTE: prio increasing happens in the prioritizer
            self.update_uri(curi)

        for sink in self._sinks:
            sink.process_redirect(curi)

    def process_server_error(self, curi):
        """
        Called when there was some kind of server error.

        Override this method in the actual frontier implementation.
        """
        del self._current_uris[curi.url]
        self._ignore_uri(curi)

        for sink in self._sinks:
            sink.process_server_error(curi)
예제 #46
0
from Queue import PriorityQueue

A = []

next_a = 3
next_a_s = next_a * next_a
pq = PriorityQueue()
pq.put_nowait((2, (2, 1)))
while pq.not_empty:
    ans, (a, n) = pq.get_nowait()
    # print 'checking: {}^{} = {}'.format(a, n, ans)
    if ans > next_a:
        pq.put_nowait((ans, (a, n)))
        pq.put_nowait((next_a_s, (next_a, 2)))
        next_a += 1
        next_a_s = next_a * next_a
    else:
        if len(str(ans)) > 1 and a == sum(map(int, str(ans))):
            A.append(ans)
            print '{}^{} = {}'.format(a, n, ans)
        pq.put_nowait((ans * a, (a, n + 1)))