示例#1
0
文件: LAB4CO.py 项目: SckeeDoo/LAB4CO
class AStarAlgorithm(object):
    def __init__(self, start, goal):
        self.path = []
        self.visitedQueue = []
        self.PriorityQueue = PriorityQueue()
        self.start = start
        self.goal = goal

    def Solve(self):
        startState = StateString(self.start, 0, self.start, self.goal)
        count = 0
        self.PriorityQueue.put((0, count, startState))
        while(not self.path and sel.PriorityQueue.qsize()):
            closestChild = self.PriorityQueue.get()[2]
            closestChild.GenerateChildren()
            slef.visitedQueue.append(closestChild.value)
            for child in closestChild.children:
                if child.value not in self.visitedQueue:
                    count += 1
                    if not child.distance:
                        self.path = child.path
                        break
                    self.PriorityQueue.put((child.distance, count, child))
        if not self.path:
            print ("Goal of " + self.goal + "is not possible")
        return self.path
示例#2
0
文件: classifier.py 项目: PET-UnB/pet
    def classify(self):
        classified = dict()
        ordered_packages = PriorityQueue()
        for p in self.packages:
            if p.ready_for_upload:
                cls = 'ready_for_upload'
            elif p.has_rc_bugs:
                cls = 'rc_bugs'
            elif p.missing_tag:
                cls = 'missing_tag'
            elif not p.tags:
                cls = 'new'
            elif p.newer_upstream:
                cls = 'new_upstream'
            elif p.watch_problem:
                cls = 'watch_problem'
            elif p.bugs:
                cls = 'bugs'
            elif not p.is_tagged:
                cls = 'wip'
            else:
                cls = 'other'

            package_popcon = popcon.package(p.name)
            if package_popcon:
                ordered_packages.put((-package_popcon[p.name], cls, p))
        while ordered_packages.empty() is False:
            _, cls, package = ordered_packages.get()
            classified.setdefault(cls, []).append(package)

        return classified
class EventQueue(object):
    def __init__(self):
        """Event queue for executing events at 
        specific timepoints.

	In current form it is NOT thread safe."""
        self.q = PriorityQueue()

    def schedule(self, f, ts):
        """Schedule f to be execute at time ts"""
        self.q.put(EqItem(ts, f))

    def schedule_recurring(self, f, interval):
        """Schedule f to be run every interval seconds.

	It will be run for the first time interval seconds
        from now"""

        def recuring_f():
            f()
            self.schedule(recuring_f, time.time() + interval)

        self.schedule(recuring_f, time.time() + interval)

    def run(self):
        """Execute events in the queue as timely as possible."""
        while True:
            event = self.q.get()
            event.f()
示例#4
0
 def parse(self, input, fail, pmatch, **kwargs):
   inputs = input.fork(len(self.children))
   queue = PriorityQueue(len(self.children))
   for i, (input,child) in enumerate(zip(inputs,self.children)):
     queue.put((0,i,partial(child.parse,input=input,pmatch=copy(pmatch),**kwargs)))
   current = (-1, -1, badcall)
   
   @assertCont
   def ccont(fail, **kwargs):
     nonlocal current
     return partial(current[2],fail=cfail)
   
   @assertFail
   def cfail(value, cont, **kwargs):
     global DIE
     nonlocal fail, queue, current, ccont
     last = current[0]
     if value is not None:
       queue.put((last+value,current[1],cont))
     if not queue.qsize():
       return partial(fail,value=None,cont=DIE)
     current = queue.get()
     if current[0] == last:
       return partial(current[2],fail=cfail)
     return partial(fail,value=last-current[0],cont=ccont)
   return partial(DIE,fail=cfail)
class JobQueue(object):
    def __init__(self):
        self.queue = PriorityQueue()
        self.last_enqueued = None
        self.logger = logging.getLogger(self.__class__.__name__)

    def put(self, job, next_t=0):
        self.logger.debug("Putting a {} with t={}".format(job.__class__.__name__, next_t))
        re_enqueued_last = self.last_enqueued == job
        self.queue.put((next_t, job))
        self.last_enqueued = job
        return re_enqueued_last

    def tick(self):
        now = time.time()

        self.logger.debug("Ticking jobs with t={}".format(now))
        while not self.queue.empty():
            t, j = self.queue.queue[0]
            self.logger.debug("Peeked a {} with t={}".format(j.__class__.__name__, t))

            if t < now:
                self.queue.get()
                self.logger.debug("About time! running")
                j.run()
                self.put(j, now + j.INTERVAL)
                continue

            self.logger.debug("Next task isn't due yet. Finished!")
            break
示例#6
0
class MultiThreadedWeatherDatabase(Thread):
    def __init__(self, file):
        super(MultiThreadedWeatherDatabase, self).__init__()
        self.file = file
        self.queue = PriorityQueue()
        self.event = Event()
        self.create_tables = False
        if not os.path.isfile(file):
            self.create_tables = True
        self.start()  # Threading module start

    def run(self):
        super(MultiThreadedWeatherDatabase, self).run()
        db = sqlite3.connect(self.file)
        cursor = db.cursor()
        if self.create_tables:
            self.create_all_tables()
        while True:
            if self.queue.empty():
                sleep(0.1)  # So the thread doesnt use all of the processor
                continue
            job, sql, arg, result = self.queue.get_nowait()
            if sql == '__close__':
                break
            if arg is None:
                arg = ''
            cursor.execute(sql, arg)
            db.commit()
            if result:
                for rec in cursor:
                    result.put(rec)
                result.put('__last__')
        db.close()
        self.event.set()

    def execute(self, sql, args=None, res=None, priority=2):
        self.queue.put_nowait((priority, sql, args, res))

    def select(self, sql, args=None, priority=2):
        res = Queue()
        self.execute(sql, args, res, priority)
        while True:
            rec = res.get()
            if rec == '__last__':
                break
            yield rec

    def close(self):
        self.execute('__close__')

    def create_all_tables(self):
        command1 = '''CREATE TABLE location (location_id INTEGER PRIMARY KEY , town TEXT, country TEXT, lat REAL, lon REAL, dateadded INTEGER, timezone INTEGER)'''
        self.execute(command1)
        command2 = '''CREATE TABLE "forecast" (forecast_id INTEGER PRIMARY KEY, location_id INTEGER, time INTEGER, temp REAL, pressure INTEGER, humidity INTEGER, clouds INTEGER, windspeed REAL, winddirection INTEGER, symbol INTEGER, FOREIGN KEY (location_id) REFERENCES location (location_id) DEFERRABLE INITIALLY DEFERRED)'''
        self.execute(command2)


    def remove_old_forecasts(self):
        command = '''DELETE FROM forecast WHERE forecast.time < STRFTIME('%s', 'now')'''
        self.execute(command)
示例#7
0
    def next_enrichment(self, cookie: Cookie) -> Optional[Enrichment]:
        """
        Loads the next set of data not present in the known data given (the "enrichment").

        Returns `None``if all enrichments have already been applied to the cookie.
        :param cookie: the data already known
        :return: the loaded enrichment
        """
        enrichment_loaders_priority_queue = PriorityQueue()
        for enrichment_loader in self.enrichment_loaders:
            enrichment_loaders_priority_queue.put(enrichment_loader)

        while not enrichment_loaders_priority_queue.empty():
            enrichment_loader = enrichment_loaders_priority_queue.get()

            enrich = False
            try:
                enrich = enrichment_loader.can_enrich(cookie)
            except Exception as e:
                logging.error("Error checking if enrichment can be applied to cookie; Enrichment loader: %s;"
                              "Target Cookie: %s; Error: %s" % (enrichment_loader, cookie.identifier, e))

            if enrich:
                try:
                    return enrichment_loader.load_enrichment(cookie)
                except Exception:
                    logging.error("Error loading enrichment; Enrichment loader: %s; Target Cookie: %s; Error: %s"
                                  % (enrichment_loader, cookie.identifier, traceback.format_exc()))

        return None
示例#8
0
    def _process_nonrealtime_stop(self, state):
        import supriya.patterns

        if not state["has_stopped"]:
            state["has_stopped"] = True
        self._debug("UNWINDING")
        assert state["event_queue"].qsize() == 1

        event_tuple = state["event_queue"].get()
        if event_tuple.iterator_index not in state["visited_iterators"]:
            self._debug("    DISCARDING, UNVISITED", event_tuple)
        elif not isinstance(event_tuple.event, supriya.patterns.CompositeEvent):
            self._debug("    DISCARDING, NON-COMPOSITE", event_tuple)
        elif not event_tuple.event.get("is_stop"):
            self._debug("    DISCARDING, NON-STOP", event_tuple)
        else:
            self._debug("    PRESERVING", event_tuple)
            state["event_queue"].put(event_tuple._replace(offset=0.0))

        iterator_queue = PriorityQueue()
        while not state["iterator_queue"].empty():
            iterator_tuple = state["iterator_queue"].get()
            iterator_tuple = iterator_tuple._replace(offset=0.0)
            iterator_queue.put(iterator_tuple)
        state["iterator_queue"] = iterator_queue
示例#9
0
    def test_starts_actions_and_adds_back_to_queue(self):
        # given
        start_time = 0
        deadline = 10
        action_to_start = Action(start_time, deadline+1)
        action_to_start.agent = Mock(name="agent")
        action_to_start.is_applicable = Mock(return_val=True)
        action_to_start.apply = Mock(name="apply")
        model = Mock(name="model")
        execution_queue = PriorityQueue()
        execution_queue.put(ActionState(action_to_start, start_time, ExecutionState.pre_start))

        # when
        actual, _stalled = simulator.execute_action_queue(model, execution_queue,
            break_on_new_knowledge=False, deadline=deadline)

        # then
        assert_that(execution_queue.queue, has_length(1))
        time, state, action = execution_queue.queue[0]
        assert_that(time, equal_to(action_to_start.end_time))
        assert_that(state, equal_to(ExecutionState.executing))
        assert_that(action, equal_to(action_to_start))
        assert_that(actual.executed, is_(empty()))
        assert_that(is_not(action_to_start.apply.called))
        assert_that(actual.simulation_time, equal_to(start_time))
示例#10
0
def findWayByAStar(start, isFinish, getDistance, getIncidenceList, getHeuristicCostEstimate):
    """
    start - start vertex, isFinish - function which returns True only on finish
    getIncidenceList(vertex) - returns incidence list of vertex,
    getDistance(first_vertex, second_vertex) - returns distance from first_vertex to second_vertex,
    getHeuristicCostEstimate(vertex).
    findWayByAStar returns path(list) from start to finish
    """
    processed_vertices = set()
    waiting_vertices = {start, }

    node_queue = PriorityQueue()
    node_storage = dict()

    node_storage[start] = _AStarNode(start)
    node_storage[start].hce = getHeuristicCostEstimate(start)
    node_storage[start].updateSum()
    node_queue.put_nowait(tuple(((node_storage[start].sum, 0), node_storage[start].vertex)))

    while len(waiting_vertices) != 0:
        processing_vertex = node_queue.get()[1]  # item = ((priority number, priority_index), data).
        while processing_vertex in processed_vertices:
            processing_vertex = node_queue.get_nowait()[1]

        if isFinish(processing_vertex):
            return _createPath(processing_vertex, node_storage)

        _processVertex(processing_vertex, getDistance, getIncidenceList, getHeuristicCostEstimate,
                       processed_vertices, waiting_vertices, node_storage, node_queue)

    raise Exception("Path doesn't exist")
示例#11
0
    def max_spanning_tree(self):
        """
        Kruskal's algorithm for min spanning tree, to get most important
        connections in graph.
        -> Graph
        """
        # TODO: Test
        # TODO: Add unit tests
        # TODO: FIx bug, use disjoint set during the test of connection
        pq = PriorityQueue()

        for conn in self.get_connections():
            # Hack negative number used to get use priority queue in inverse order
            # (to get max values first)
            pq.put((-self.get_connection_weight(conn), self.connection_key(conn)))

        min_tree = Graph()
        while not pq.empty():
            curr_weight, curr_connection = pq.get()
            curr_weight = -curr_weight # Hack with negative number
            if min_tree.is_node_in_graph(curr_connection[0]) and \
                min_tree.is_node_in_graph(curr_connection[1]):
                continue

            min_tree.add_connection(*curr_connection)
            min_tree.set_connection_weight(curr_connection, curr_weight)

        for node in self.get_nodes():
            min_tree.set_node_weight(node, self.get_node_weight(node))

        return min_tree
示例#12
0
def giveConclusion():       
    foodmap = mapFood()
    print("Foodmap =", foodmap)
    heatmap = mapHeat()
    foodheat = {}
    foods = PriorityQueue()
    for (food, distance) in foodmap:
            foodheat[food] = heatmap[food[1]][food[0]]
            if foodheat[food] < calculateLimit(heatmap):
                foods.put((distance, food))
    if not foods.empty():
        good_food = foods.get()[1]
        head = snakes[speler_nummer].head
        path = givePath(head, good_food)
        direction = giveDirection(path[0], path[1])
    else:
        minimum = wall_value
        direction = -1
        head = snakes[speler_nummer].head
        backuplist = []
        for coordinate in neighbours(head):
            if heatmap[coordinate[1]][coordinate[0]] < minimum:
                direction = giveDirection(head, coordinate)
                minimum = heatmap[coordinate[1]][coordinate[0]]
            elif level[coordinate[1]][coordinate[0]] in ['.','x']:
                backuplist.append(coordinate)
        if direction == -1:
                if len(backuplist)!= 0:
                    direction = giveDirection(head,backuplist[0])
                else:
                    print("Goodbye, cruel world!")
                    direction = 'r'
    return direction
示例#13
0
def Astar(start, goal, cost_map, heuristic_map, vehicle, cursor, motion_primitives):
    """
    Open Aera Motion Planning, Static Environment
    """
    pq = PriorityQueue()
    pq.put(start)
    node_dict = {start.index:start, goal.index:goal}
    edge_dict = {}
    # graph = {} # {(state1, state2):trajectory}
    times = 0
    while times<200 and not goal.reach and not pq.empty():
        times += 1
        current = pq.get()
        current.extend = True
        State.RushTowardGoal(current=current, goal=goal, cursor=cursor, edge_dict=edge_dict, pq=pq, cost_map=cost_map, vehicle=vehicle)
        # if traj_g is not None:
        #     edge_dict[(current, goal)] = traj_g
        #     pq.put(goal)
        State.ControlSet(current=current,motion_primitives=motion_primitives, pq=pq, node_dict=node_dict, edge_dict=edge_dict, cost_map=cost_map, heuristic_map=heuristic_map,vehicle=vehicle, goal=goal)
        # control_set = State.ControlSet(current=current,motion_primitives=motion_primitives, cost_map=cost_map, heuristic_map=heuristic_map,vehicle=vehicle)
        # for (successor, traj) in control_set:
        #     edge_dict[(current, successor)] = traj
        #     pq.put(successor)
    if goal.reach:
        return True, node_dict, edge_dict
    else:
        return False, node_dict, edge_dict
示例#14
0
    def _test_loaded_in_correct_order(
        self, enrichment_manager: EnrichmentManager, enrichment_loaders: Iterable[EnrichmentLoader]
    ):
        """
        Tests that the given enrichment manager applies enrichments defined be the given loaders in the correct order.
        :param enrichment_manager: enrichment manager
        :param enrichment_loaders: enrichment loaders
        """
        logging.root.setLevel(logging.CRITICAL)
        cookie = Cookie("the_identifier")

        enrichment_loaders_priority_queue = PriorityQueue()
        for enrichment_loader in enrichment_loaders:
            if enrichment_loader.can_enrich(cookie):
                enrichment_loaders_priority_queue.put(enrichment_loader)

        enrichment = enrichment_manager.next_enrichment(cookie)
        while enrichment is not None:
            expected_enrichment_loader = enrichment_loaders_priority_queue.get()  # type: EnrichmentLoader
            expected_enrichment = expected_enrichment_loader.load_enrichment(cookie)
            self.assertEqual(enrichment, expected_enrichment)
            cookie.enrich(enrichment)
            expected_enrichment_loader.can_enrich = MagicMock(return_value=False)
            enrichment = enrichment_manager.next_enrichment(cookie)
        self.assertTrue(enrichment_loaders_priority_queue.empty())
示例#15
0
 def find_path(self, start, end):
     start = start  # .upper()
     # end = list(set([x.upper() for x in end]))  # list comprehension, convert tuple to list. Booyeah.
     end = end  # [x.upper() for x in end]
     while (start in end):
         end.remove(start)
     if (not start in self.graph.keys()) or (not set(end) <= set(self.graph.keys())) or (start == end):
         return []
     self.__reset_visited__()
     from queue import PriorityQueue
     pq = PriorityQueue()
     self.__assign__(pq, [[start, 0]], self.__get_adjacent__(start))
     while not pq.empty():
         shortest = pq.get()
         if (shortest.head() in end):
             end.remove(shortest.head())
             self.__reset_visited__()
             pq = PriorityQueue()
             '''print(shortest.head())
             print(shortest.path())
             print(end)'''
             self.__assign__(pq, shortest.path(), [])
             if not end:
                 return shortest.path()
         self.__assign__(pq, shortest.path(), self.__get_adjacent__(shortest.head()))
     '''print("second")'''
     return []
示例#16
0
class Phase(object):
    def __init__(self):
        self._priority = 0
        self._actions = PriorityQueue()
        self._log = []
        self._ended = False
        self._checking = False

    def add_action(self, action):
        self._actions.put(action)
        self.check_queue()

    def check_queue(self):
        if not self._checking:
            self._checking = True
            while(not self._actions.empty()
                  and self._actions.queue[0].get_priority() <= self._priority):
                action = self._actions.get()
                action.resolve()
                self._log.append(action)
            self._checking = False

    def increase_priority(self, priority=100):
        self._priority += priority
        self.check_queue()

    def is_ended(self):
        return self._ended

    def is_checking(self):
        return self._checking
示例#17
0
 def substitute_once(self, string, indices):
     assert isinstance(string, str)
     indices = list(indices)
     assert len(string) == len(indices)
     n = len(string)
     i = 0
     result_string = []
     result_indices = []
     match = re.search(self.pattern, string)
     if match:
         q = PriorityQueue()
         for key, value in self.replacement.items():
             span = match.span(key)
             assert span
             q.put((span, value))
         while not q.empty():
             (start, end), value = q.get()
             assert start >= i
             assert start < len(string)
             # TODO: Allow align to be set in value
             align = self.align
             result_string.append(string[i:start])
             result_indices.append(indices[i:start])
             i = start
             if align == self.Align.left:
                 pretend = indices[start]
             else:
                 pretend = indices[end - 1]
             result_string.append(value)
             result_indices.append([pretend] * len(value))
             i = end
     result_string.append(string[i:n])
     result_indices.append(indices[i:n])
     return ''.join(result_string), itertools.chain.from_iterable(result_indices)
def solve(state):
    solved = False;
    queue = PriorityQueue();
    visited = [];

    visited.append(state);

    # Main loop
    while solved == False:
        empty = state.getEmpty();
        gn = state.getDepth() + 1;

        # Move left
        if 1 <= empty and empty % 3 != 0:
            left = state.move('l', gn);
            left.setParent(state);
            if not any(i == left for i in visited):
                queue.put((left.getValue(), time.time(), left));
                if(left.solved() == True):
                    solved = True;
                    state = left;

        # Move right
        if empty < 8 and empty % 3 != 2 and solved == False:
            right = state.move('r', gn );
            right.setParent(state);
            if not any(i == right for i in visited):
                queue.put((right.getValue(), time.time(), right));
                if(right.solved() == True):
                    solved = True;
                    state = right;

        # Move up
        if 3 <= empty and solved == False:
            up = state.move('u', gn);
            up.setParent(state);
            if not any(i == up for i in visited):
                queue.put((up.getValue(), time.time(), up));
                if(up.solved() == True):
                    solved = True;
                    state = up;

        # Move down
        if empty <= 5 and solved == False:
            down = state.move('d', gn);
            down.setParent(state);
            if not any(i == down for i in visited):
                queue.put((down.getValue(), time.time(), down));
                if(down.solved() == True):
                    solved = True;
                    state = down;

        # Move to next state
        if solved == False:
            state = queue.get()[2];
            visited.append(state);

    # Print steps to solution
    printPath(state);
示例#19
0
 def _setup_state(self):
     queue = PriorityQueue()
     for index, pattern in enumerate(self._patterns, 1):
         iterator = iter(pattern)
         payload = ((0.0, index), iterator)
         queue.put(payload)
     state = (queue,)
     return state
def update_market(market):
    new_market = PriorityQueue()
    for i in range(economy[market].qsize()):
        listing = economy[market].get()
        seller = listing[2]
        if seller.alive:
            new_market.put(listing)
    economy[market] = new_market
def main():
    # Figure out the URL to use
    source_arg = sys.argv[1]
    if source_arg.startswith('http://'):
        url = source_arg
    elif source_arg.startswith('www.'):
        url = 'http://'+source_arg
    elif source_arg.startswith('darklyrics.com'):
        url = 'http://www.'+source_arg
    else:
        source_arg = source_arg.lower().replace(' ', '')
        url = 'http://www.darklyrics.com/{}/{}.html'.format(source_arg[0], source_arg)

    # Read artist page
    print('Accessing {}'.format(url), file=sys.stderr)

    with urlopen(url) as ufd:
        artist_html = ufd.read().decode('utf-8')

    artist_re = re.compile(r'".*#1"')
    artist_mo = artist_re.findall(artist_html)

    album_urls = [s.replace('..', 'http://www.darklyrics.com')[1:-3] for s in artist_mo]

    # Create threads to download and scrape each page
    q = PriorityQueue()
    threads = []
    for i, url in enumerate(album_urls):
        thread = threading.Thread(target=get_url, args=(q, url, i))
        thread.daemon = True
        thread.start()
        threads.append(thread)

    # Wait for all urls to download
    for thread in threads:
        thread.join()

    # File to store output in
    file_name = None
    if len(sys.argv) == 2:
        artist_soup = BeautifulSoup(artist_html, "html.parser")
        file_name = artist_soup.find("title").get_text() + ".txt"
    else:
        file_name = sys.argv[2]
    fd = open(file_name, "w")

    # Go through the queue
    while not q.empty():
        try:
            index, album_html = q.get()
            text = scrape_from_html(album_html)
            print(text, file=fd)
        except Exception as e:
            print("Error scraping from html: {}".format(album_html), file=sys.stderr)
            print(e, file=sys.stderr)


    fd.close()
示例#22
0
文件: todotest.py 项目: cceleri/todo
 def test_priority(self):
     from queue import PriorityQueue
     q = PriorityQueue()
     tiP1 = tt.TodoItem('vacuum bedroom', priority=1)
     tiP2 = tt.TodoItem('play video games', priority=2)
     q.put(tiP1)
     q.put(tiP2)
     self.assertEqual(q.get(), tiP1)
     self.assertTrue(q.get(), tiP2)
示例#23
0
class MemStorage(object):
    def __init__(self):
        self.urls = PriorityQueue()

    def read_next(self):
        return self.urls.get_nowait()

    def write(self, timestamp, url):
        self.urls.put((timestamp, url))
示例#24
0
def movesInCol(board, c):
    mQueue = PriorityQueue()
    for r in range(len(board)):
        if isnan(board[r][c]):
            continue
        v = board[r][c]
        move = Move(r,c,v)
        mQueue.put(move)
    return q2list(mQueue)
示例#25
0
def queue_hamming_distances(hex_string):
    queue = PriorityQueue()
    for keysize in range(2, 40):
        byteset1 = hex_string[0: 4*keysize]
        byteset2 = hex_string[4*keysize: 8*keysize]
        hamming_distance = ChallengeUtils.hamming_distance_hex(byteset1,
                                                               byteset2)
        hamming_distance_normalised = hamming_distance / keysize
        queue.put((hamming_distance_normalised, keysize))
    return queue
示例#26
0
文件: workQueue.py 项目: Braedon/up
    def __init__(self, maxsize=0, prefsize=5, maxthreads=20, minthreads=1):
        PriorityQueue.__init__(self, maxsize)
        self.threadLock = RLock()
        self.threads = []
        self.prefsize = prefsize
        self.maxthreads = maxthreads
        self.minthreads = minthreads

        for _ in range(minthreads):
            self.__createThread()
示例#27
0
def movesInRow(board, r):
    mQueue = PriorityQueue()
    row = board[r]
    for c in range(len(row)):
        if isnan(row[c]):
            continue
        v = row[c]
        move = Move(r,c,v)
        mQueue.put(move)
    return q2list(mQueue)
示例#28
0
    def find_closest(location_list, location, dist):
        q = PriorityQueue()

        for i,j in enumerate(location_list):
            # prevent dictionary comparison by inserting the index
            q.put((dist(j, location) + weight(j), i, j))

        prio, i, result = q.get()

        return result
示例#29
0
def breed_it(ca):
    c_temp = PriorityQueue()
    result_out('[.]\tBreeding Next Generation...')
    while len(ca) > 0:
        if len(ca) == 1:
            cq = ca.pop(0)
            c_temp.put((cq.score, cq))
            return c_temp
        a = ca.pop(0)
        a1 = a.genome[0:len(a.genome) / 2]
        a2 = a.genome[len(a.genome):]
        # pull a random partner to mate with
        # it's a free society, after all
        b = ca.pop(random.randint(0, len(ca) - 1))
        b1 = b.genome[0:len(b.genome)]
        b2 = b.genome[len(b.genome):]
        c = Creature(0, a1 + b2)
        d = Creature(0, b1 + a2)
        e = Creature(0, mutate(a1 + b2))
        f = Creature(0, mutate(b1 + a2))
        a.modified = 0
        b.modified = 0
        c.modified = 1
        d.modified = 1
        e.modified = 1
        f.modified = 1
        c_temp.put((0, a))
        c_temp.put((0, b))
        c_temp.put((0, c))
        c_temp.put((0, d))
        c_temp.put((0, e))
        c_temp.put((0, f))
    result_out('[.]\tSuccess')
    return c_temp
示例#30
0
文件: solve.py 项目: Zach41/LeetCode
    def kthSmallest(self, matrix, k):
        # BFS
        """
        :type matrix: List[List[int]]
        :type k: int
        :rtype: int
        """
        q = PriorityQueue()
        n = len(matrix)
        visited = {}
        q.put((matrix[0][0], (0, 0)))
        visited[0] = True

        while not q.empty():
            k -= 1
            top = q.get()
            if k == 0:
                return top[0]
            x, y = top[1][0], top[1][1]
            if x < n - 1 and not ((x + 1) * n + y) in visited:
                q.put((matrix[x + 1][y], (x + 1, y)))
                visited[(x + 1) * n + y] = True
            if y < n - 1 and not (x * n + y + 1) in visited:
                q.put((matrix[x][y + 1], (x, y + 1)))
                visited[x * n + y + 1] = True
def verify_shortest_paths(config, possible_values_for_p1):
    # create the adjacency lists from the set of edges
    nodes = set()
    neighbours = {}  # nodes adjacent to a given node
    graph = {}  # weighed edges starting from a given node
    for a, b in config:
        nodes.add(a)
        nodes.add(b)
        neighbours[a] = set()
        neighbours[b] = set()
        graph[a] = []
        graph[b] = []

    for a, b in config:
        neighbours[a].add(b)
        neighbours[b].add(a)

    # initialize the weights in the adjacency lists
    for a in nodes:
        for b in neighbours[a]:
            if manhattan(a, b) == 1:
                graph[a].append((b, SquareRootNumber(1, 0)))  # length 1
            elif manhattan(a, b) == 2:
                graph[a].append((b, SquareRootNumber(0, 1)))  # length √2
            else:
                print(a, b)
                raise ValueError('Some edges are longer than sqrt(2)')

    # Dijkstra's algorithm
    for p1 in possible_values_for_p1:
        distances = {}
        pq = PriorityQueue()
        pq.put((SquareRootNumber(0, 0), p1))

        number_of_close_points = 0  # number of points with Euclidean distance at most √5 from p1

        while not pq.empty(
        ) and number_of_close_points < 21:  # there are 21 possible values for p2
            cur_dist, cur_point = pq.get()

            if not cur_point in distances:
                if dist_squared(p1, cur_point) <= 5:
                    number_of_close_points += 1

                distances[cur_point] = cur_dist
                for neigh, edge_length in graph[cur_point]:
                    if not neigh in distances:
                        pq.put((cur_dist + edge_length, neigh))

        x1, y1 = p1
        for dx in [-2, -1, 0, 1, 2]:
            for dy in [-2, -1, 0, 1, 2]:
                p2 = (x1 + dx, y1 + dy)
                if dist_squared(p1, p2) <= 5:
                    square_dist_p1_p2 = SquareRootNumber(
                        dist_squared(p1, p2), 0
                    )  # we have to convert this integer explicitly to a SquareRootNumber
                    if not (distances[p2]**2 <=
                            (SquareRootNumber(1, 1)**2) * square_dist_p1_p2):
                        raise ValueError(
                            'Some dilations are greater than sqrt(2): ' +
                            str(p1) + ', ' + str(p2))
class UniformCost:
    def __init__(self, initial_state, rows, cols, id):
        self.open_list = PriorityQueue()
        self.close_list = {}
        self.goal_node = ()
        self.initial_state = initial_state
        self.rows = rows
        self.cols = cols
        self.id = str(id)

    def run(self, log_result_to_csv = False):
        print('start algo for: ' + self.initial_state)
        if log_result_to_csv:
            self.initialize_txt_files()
        search_count = 0
        start = time.time()
        found_goal = False
        goal_state_1 = self.calculate_goal_state_1()
        goal_state_2 = self.calculate_goal_state_2()
        self.set_state_open_list(self.initial_state, 0) # nodes that need to be visited (priority queue or dictionairy)
        self.close_list = {}
        while not self.open_list.empty():
            # finds the smallest cost path and visits it
            current_node = self.open_list.get()
            current_node_cost = current_node[0]
            current_node_key = current_node[1]
            current_node_prev_key = current_node[2]
            cost_of_current_move = current_node[3]
            tile_that_was_moved = current_node[4]
            search_count += 1

            if current_node_key not in self.close_list or self.close_list[current_node_key][0] > current_node_cost:
                self.close_list[current_node_key] = (current_node_cost, current_node_prev_key, cost_of_current_move, tile_that_was_moved)

                # writes search path to txt file
                if log_result_to_csv:
                    with open(self.id + "_ucs_search.txt", "a") as ucs_search:
                        ucs_search.write(self.id + " " + str(current_node_cost) +" 0 " + current_node_key+"\n")
                
                # checks if goal state was reached
                if(current_node_key == goal_state_1 or current_node_key == goal_state_2):
                    self.goal_node = current_node
                    found_goal = True
                    break

                # now we need to add the new nodes to the open list
                open_list = self.set_state_open_list(current_node_key, current_node_cost)

        end = time.time()
        print('I finished running in: ' + str(end - start) + " seconds")
        solution_array = self.get_algorithm_stats(str(end - start))

        if log_result_to_csv:
            if solution_array == -1:
                with open(self.id+"_ucs_search.txt", "a") as ucs_search:
                        ucs_search.write("No solution")
            else:
                with open(self.id+"_ucs_solution.txt", "a") as ucs_solution:
                        for line in solution_array:
                            ucs_solution.write(line+"\n")

        total_cost = self.goal_node[0] if found_goal else None
        solution_path_len = len(list(solution_array)) - 1
        return {'total cost': total_cost, 'found_a_solution': found_goal, 'solution_path_length': solution_path_len, 'search_path_length': search_count, 'execution_time': (end - start)}


    def normal_move_action_string(self, state, action):
        puzzle = XPuzzle(self.rows, self.cols, state)
        tile_moved = puzzle.regular_move(action)
        if( tile_moved != -1):
            return {'key': puzzle.current_state_to_string(), 'tile_moved': tile_moved}
        else:
            return -1

    def wrapping_move_action_string(self, state, wrap_col = False):
        puzzle = XPuzzle(self.rows, self.cols, state)
        tile_moved = puzzle.wrapping_move(wrap_col)
        if( tile_moved != -1):
            return {'key': puzzle.current_state_to_string(), 'tile_moved': tile_moved}
        else:
            return -1
    
    def diagonal_move_action_string(self, state, is_wrapping):
        puzzle = XPuzzle(self.rows, self.cols, state)
        tile_moved = puzzle.diagonal_move(is_wrapping)
        if( tile_moved != -1):
            return {'key': puzzle.current_state_to_string(), 'tile_moved': tile_moved}
        else:
            return -1
    
    def set_state_open_list(self, current_state, base_amount):
        # 1 cost actions: up, down, left, right
        # 2 cost actions: wrapping_move
        # 3 cost actions: diagonal_move, diagonal_move(wrap)
        # total of 7 actions

        # open list = (total_cost, current_key, previous_key, cost, tile_moved)
        up_action = self.normal_move_action_string(current_state, "up")
        if(up_action != -1): self.open_list.put(((1 + base_amount), up_action['key'], current_state, 1, up_action['tile_moved']))

        down_action = self.normal_move_action_string(current_state, "down")
        if(down_action != -1): self.open_list.put(((1 + base_amount), down_action['key'], current_state, 1, down_action['tile_moved']))

        left_action = self.normal_move_action_string(current_state, "left")
        if(left_action != -1): self.open_list.put(((1 + base_amount), left_action['key'], current_state, 1, left_action['tile_moved']))

        right_action = self.normal_move_action_string(current_state, "right")
        if(right_action != -1): self.open_list.put(((1 + base_amount), right_action['key'], current_state, 1, right_action['tile_moved']))

        wrapping_action = self.wrapping_move_action_string(current_state, False)
        if(wrapping_action != -1): self.open_list.put(((2 + base_amount), wrapping_action['key'], current_state, 2, wrapping_action['tile_moved']))

        diagonal_action = self.diagonal_move_action_string(current_state, False)
        if(diagonal_action != -1): self.open_list.put(((3 + base_amount), diagonal_action['key'], current_state, 3, diagonal_action['tile_moved'])) 

        diagonal_action_wrap = self.diagonal_move_action_string(current_state, True)
        if(diagonal_action_wrap != -1): self.open_list.put(((3 + base_amount), diagonal_action_wrap['key'], current_state, 3, diagonal_action['tile_moved']))

        if self.rows > 2:
            wrapping_action_col = self.wrapping_move_action_string(current_state, True)
            if(wrapping_action_col != -1): self.open_list.put(((2 + base_amount), wrapping_action_col['key'], current_state, 2, wrapping_action_col['tile_moved']))

    def get_algorithm_stats(self, time):
        if(not self.goal_node):
            print('no goal state found')
            return -1
        else:
            summ = "TIME: " + time + "s, COST: " + str(self.goal_node[0])
            order = [summ]
            previous_node = self.goal_node[1]
            print("Found Goal Node")
            print(summ)
            while(previous_node != self.initial_state):
                # 3 = tile moved, 2 = cost of move, 1 = previous move
                value = str(self.close_list[previous_node][3]) + " " + str(self.close_list[previous_node][2]) +" "+ str(previous_node)
                order.append(value)
                previous_node = self.close_list[previous_node][1]
            return reversed(order)

    def initialize_txt_files(self):
        uniform_cost_search_file = open(self.id+"_ucs_search.txt", "w")
        uniform_cost_search_file.write("0 0 0 " + self.initial_state + "\n")
        uniform_cost_search_file.close()
        uniform_cost_solution_file = open(self.id+"_ucs_solution.txt", "w")
        uniform_cost_solution_file.write("0 0 " + self.initial_state + "\n")
        uniform_cost_solution_file.close()

    def calculate_goal_state_1(self):
        goal_string = ''
        for i in range((self.cols * self.rows) - 1):
            goal_string += str(i + 1) + ' '
        goal_string += '0'
        return goal_string
    
    def calculate_goal_state_2(self):
        goal_string = ''
        even = ''
        odd = ''
        for i in range((self.cols * self.rows) - 1):
            if((i + 1) % 2 == 0):
               even += str(i + 1) + " "
            else:
                odd += str(i + 1) + " "
        goal_string = odd + even + "0"
        return goal_string
                

#1 0 3 6 5 2 7 4
#6 3 4 7 1 2 5 0
#3 0 1 4 2 6 5 7
# algo1 = UniformCost("1 0 3 6 5 2 7 4", 2, 4, 0)
# algo2 = UniformCost("6 3 4 7 1 2 5 0", 2, 4, 1)
# algo3 = UniformCost("3 0 1 4 2 6 5 7", 2, 4, 2)
# algo4 = UniformCost("1 2 3 4 0 5 6 7 8 9 10 11", 3, 4, 3)

# algo1.run()
# algo2.run()
# algo3.run()
# algo4.run()
示例#33
0
def runAlgorithm(window, grid, start, end, showVis, scoreDisplay):
    startTime = timer()
    gScores = dict()

    for row in grid:
        for square in row:
            square.updateNeighbours(grid)
            gScores[square] = float('inf')

    gScores[start] = 0
    openSet = PriorityQueue()
    path = dict()
    counter = 0
    openSet.put((h_score(start, end), counter, start))
    openSetTracker = {start}

    while len(openSetTracker):
        currentNode = openSet.get()[2]
        if showVis: currentNode.color = COLORS['green']
        openSetTracker.remove(currentNode)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                return False, -1

        if currentNode == end:
            current = end
            while current != start:
                current.color = COLORS['purple']
                current = path[current]

            start.color = COLORS['orange']
            end.color = COLORS['pink']
            draw_screen(window, grid)
            endTime = timer()
            return True, endTime - startTime

        for ng in currentNode.neighbours:
            if not ng.isChecked:
                h = h_score(ng, end)
                g = gScores[currentNode] + 1
                f = h + g
                ng.isChecked = True

                if g < gScores[ng]:
                    gScores[ng] = g
                    counter += 1
                    path[ng] = currentNode

                    if not ng in openSetTracker:
                        openSet.put((f, counter, ng))
                        openSetTracker.add(ng)
                        if showVis: ng.color = COLORS['red']
                        if scoreDisplay:
                            if scoreDisplay == 1: ng.scoreInfo = f
                            elif scoreDisplay == 2: ng.scoreInfo = g
                            elif scoreDisplay == 3: ng.scoreInfo = h
                start.color = COLORS['orange']
            if showVis: draw_screen(window, grid)

    endTime = timer()
    return False, endTime - startTime
示例#34
0
class _TaskScheduler(object):
    def __init__(self, timeout):
        self._task_queue = PriorityQueue()
        self._task_done = {}
        self._start_time = time.time()
        self._event_queue = []
        self._task_killed = {}
        self._keepalive = []
        self._timeout = timeout

    def submit(self, task):
        self._keepalive.append(task.name)
        self._task_queue.put(task)

    def recv(self, ev):
        self._event_queue.append(ev)

    def _handle_events(self, name, task):
        for e in self._event_queue:
            if e.name == name:
                e.handle(task)

    def bye(self):
        while not self._task_queue.empty():
            task = self._task_queue.get()
            if task.is_alive():
                task.kill(True)

    def run(self):
        while not self._task_queue.empty():
            task = []
            done = {}
            while not self._task_queue.empty():
                next_task = self._task_queue.get()
                logging.info("handle queue: %s", next_task.name)
                if not next_task.is_alive():
                    done[next_task.name] = next_task
                    continue
                self._handle_events(next_task.name, next_task)
                if next_task.is_alive():
                    task.append(next_task)
                else:
                    done[next_task.name] = next_task
            for t in task:
                self._task_queue.put(t)

            for k, v in done.items():
                if k in self._keepalive:
                    if v._task.exitcode != 0:
                        v.start()
                        self._task_queue.put(v)
                        continue
                self._task_done[k] = v
            time.sleep(1)
            if self._timeout + self._start_time < time.time():
                logging.info("stop!!!!!")
                return
class GameArtificialIntelligence(object):
    def __init__(self, heuristic_fn):
        self.heuristic = heuristic_fn

    @lru_cache(maxsize=2**10)
    def move_search(self, starting_node, depth, current_player, other_player):
        self.player = current_player
        self.other_player = other_player
        possible_moves = starting_node.get_valid_moves(current_player)
        if len(possible_moves) == 1:
            return list(possible_moves)[0]

        score = -sys.maxsize
        move = None
        self.queue = PriorityQueue(len(possible_moves))

        (new_move,
         new_score) = self.alpha_beta_wrapper(starting_node, depth,
                                              current_player, other_player)
        if new_move is not None:
            move = new_move
            score = new_score
            # print "Got to Depth:", depth
        return move

    def alpha_beta_wrapper(self, node, depth, current_player, other_player):
        alpha = -sys.maxsize - 1
        beta = sys.maxsize
        if self.queue.queue:
            children = self.queue.queue
            self.queue = PriorityQueue(self.queue.maxsize)
            for (x, child, move) in children:
                new_alpha = self.alpha_beta_search(child, depth - 1,
                                                   other_player,
                                                   current_player, alpha, beta,
                                                   False)
                if new_alpha is None:
                    return (None, None)
                else:
                    self.queue.put((-new_alpha, child, move))
                if new_alpha > alpha:
                    alpha = new_alpha
                    best_move = move
                #print "Possible move:", move, "Score:", new_alpha
        else:
            children = node.get_afterstates(current_player)
            # Shuffle order of moves evaluated to prevent playing the same game every time
            random.shuffle(children)
            for (child, move) in children:
                new_alpha = self.alpha_beta_search(child, depth - 1,
                                                   other_player,
                                                   current_player, alpha, beta,
                                                   False)
                if new_alpha is None:
                    return (None, None)
                else:
                    self.queue.put((-new_alpha, child, move))
                if new_alpha > alpha:
                    alpha = new_alpha
                    best_move = move
                #print "Possible move:", move, "Score:", new_alpha
        return (best_move, alpha)

    def keyify(self, node, player):
        from hashlib import sha1
        return sha1(node.board.data).hexdigest()

    def alpha_beta_search(self,
                          node,
                          depth,
                          current_player,
                          other_player,
                          alpha=-sys.maxsize - 1,
                          beta=sys.maxsize,
                          maximizing=True):
        if depth == 0 or node.game_won() is not None:
            return self.heuristic(node, self.player, self.other_player)

        children = node.get_afterstates(current_player)
        if maximizing:
            if len(children) == 0:
                new_alpha = self.alpha_beta_search(node, depth - 1,
                                                   other_player,
                                                   current_player, alpha, beta,
                                                   False)
                if new_alpha is None:
                    return None
                alpha = max(alpha, new_alpha)
            else:
                for (child, move) in children:
                    new_alpha = self.alpha_beta_search(child, depth - 1,
                                                       other_player,
                                                       current_player, alpha,
                                                       beta, False)
                    if new_alpha is None:
                        return None
                    alpha = max(alpha, new_alpha)
                    if alpha >= beta:
                        break
            return alpha
        else:
            if len(children) == 0:
                new_beta = self.alpha_beta_search(node, depth - 1,
                                                  other_player, current_player,
                                                  alpha, beta)
                if new_beta is None:
                    return None
                beta = min(beta, new_beta)
            else:
                for (child, move) in children:
                    new_beta = self.alpha_beta_search(child, depth - 1,
                                                      other_player,
                                                      current_player, alpha,
                                                      beta)
                    if new_beta is None:
                        return None
                    beta = min(beta, new_beta)
                    if beta <= alpha:
                        break
            return beta
示例#36
0
def search(state):

    print("searching for goal...")
    pq = PriorityQueue()
    searchX = spaceX
    searchY = spaceY
    start = []
    start = state
    pq.put(start, manhattanDistance(start))
    visited = []
    size = pq.qsize()
    count = 0
    path = {str(start): (str(start), 'h')}
    while size > 0:
        u = pq.get()
        #print("checking state ", u)
        visited.append(u)
        if checkGoal(u) == 1:
            goalFound = 1
            print("goalFound")
            print("GOAL IS ", u)
            break
        for y in range(0, 3):
            for x in range(0, 3):
                if u[y][x] == -1:
                    searchX = x
                    searchY = y

    #up direction
        if searchY != 0:
            up = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
            for y in range(0, 3):
                for x in range(0, 3):
                    up[y][x] = u[y][x]

            up[searchY][searchX] = up[searchY - 1][searchX]
            up[searchY - 1][searchX] = -1
            if up not in visited:
                pq.put(up, manhattanDistance(up))

                path[str(up)] = (str(u), 'w')

        #left direction
        if searchX != 0:
            left = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
            for y in range(0, 3):
                for x in range(0, 3):
                    left[y][x] = u[y][x]
            left[searchY][searchX] = left[searchY][searchX - 1]
            left[searchY][searchX - 1] = -1
            if left not in visited:
                pq.put(left, manhattanDistance(left))
                path[str(left)] = (str(u), 'a')
        #right direction
        if searchX != tileMax:
            right = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
            for y in range(0, 3):
                for x in range(0, 3):
                    right[y][x] = u[y][x]
            right[searchY][searchX] = right[searchY][searchX + 1]
            right[searchY][searchX + 1] = -1
            if right not in visited:
                pq.put(right, manhattanDistance(right))
                id = count + 3
                path[str(right)] = (str(u), 'd')

        #down direction
        if searchY != tileMax:
            down = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
            for y in range(0, 3):
                for x in range(0, 3):
                    down[y][x] = u[y][x]
            down[searchY][searchX] = down[searchY + 1][searchX]
            down[searchY + 1][searchX] = -1
            if down not in visited:
                pq.put(down, manhattanDistance(down))
                path[str(down)] = (str(u), 's')

        size = pq.qsize()

    #trace back path

    curr = str(goalArray)
    movePath = []
    while path[curr][1] != 'h':

        movePath.append(path[curr][1])
        curr = path[curr][0]

    return movePath
示例#37
0
    def leastInterval(self, tasks: List[str], n: int) -> int:

        tasks_map = {}
        tasks_cooldown = {}
        tasks_queue = PriorityQueue()

        for task in tasks:
            if task not in tasks_map.keys():
                tasks_map[task] = 1
                tasks_cooldown[task] = 0
            else:
                tasks_map[task] += 1

        for task, value in tasks_map.items():
            tasks_queue.put((-value, task))

        result = 0
        while tasks_queue.qsize() > 0:
            pop_list = []
            now_most = tasks_queue.get()
            pop_list.append(now_most[1])
            while tasks_queue.qsize() > 0 and tasks_cooldown[now_most[1]] > 0:
                now_most = tasks_queue.get()
                pop_list.append(now_most[1])

            if tasks_cooldown[now_most[1]] > 0:

                for task_cooldown, time in tasks_cooldown.items():
                    if time > 0:
                        tasks_cooldown[task_cooldown] = time - 1

                for one_task in pop_list:
                    if tasks_map[one_task] > 0:
                        tasks_queue.put((-tasks_map[one_task], one_task))

                result += 1
                continue
            else:

                tasks_map[now_most[1]] -= 1

                for task_cooldown, time in tasks_cooldown.items():
                    if task_cooldown == now_most[1]:
                        tasks_cooldown[now_most[1]] += n
                    elif time > 0:
                        tasks_cooldown[task_cooldown] = time - 1

                for one_task in pop_list:
                    if tasks_map[one_task] > 0:
                        tasks_queue.put((-tasks_map[one_task], one_task))

                result += 1

        return result
示例#38
0
#!/usr/bin/env python3
from queue import PriorityQueue

__version__ = "0.0.1"

import sys

if sys.version_info[0] < 3:
    raise Exception("I need Python 3 for living")

import optparse
import threading
from restserver import RestServer
from worker import workerThread

runQueue = PriorityQueue()


class FillerApplication():
    """docstring for ClassName"""
    def __init__(self):
        self._parse_args()

    def _parse_args(self):
        usage = "usage: %prog [options]"
        version = "Disk Filler version: %s" % __version__

        parser = optparse.OptionParser(usage=usage, version=version)
        parser.add_option("-v",
                          "--verbose",
                          action="store_true",
示例#39
0
from queue import PriorityQueue

n, m = map(int, input().split())

link = [[] for _ in range(n + 1)]
indegree = [0 for _ in range(n + 1)]
pqueue = PriorityQueue()
answer = []

for i in range(m):
    a, b = map(int, input().split())
    link[a].append(b)
    indegree[b] += 1

for i in range(1, n + 1):
    if indegree[i] == 0:
        pqueue.put((i, i))

while pqueue.qsize() != 0:

    value, problemNo = pqueue.get()

    answer += [problemNo]

    for i in link[problemNo]:
        indegree[i] -= 1

        if indegree[i] == 0:
            pqueue.put((i, i))

for i in answer:
示例#40
0
def replan_robot(true_state,
                 robot,
                 time,
                 global_plan=None,
                 G=None,
                 look_ahead=4):

    #In case of AI planning robot is allowed to step where humans are initially located
    if G != None:
        preds = []
        for pred in true_state.predicates:
            if (type(pred) == AgentAt) and (type(pred.agent) == human):
                preds.append(Not(pred))
        nal, nol = true_state.update_locations(preds)
        new_predicates = true_state.update_predicates(preds)
        state = State(true_state.map, true_state.g, true_state.t,
                      new_predicates, nal, nol)
    else:
        state = true_state

    frontier = PriorityQueue()
    e_state = explored_state(state.predicates, [robot], state.action, state.t)
    #explored = {e_state}
    visited = {e_state: state.g}

    #if global_plan!=None:
    #    look_ahead = 1 #corresponding to 0 timesteps
    full_path = robot.plan_to_path(global_plan)
    goal_index = [full_path[1:].index(coors) + 1 for coors in robot.goal]
    print(goal_index)
    for g in goal_index:
        if ((g - time) < 6) and ((g - time) > 0):
            look_ahead = g - time
    path = full_path[time + look_ahead:]
    heuristic = manhattan_to_path_robot(robot, path[:25])
    children = get_children(state, robot, G, time)

    for child in children:
        child_explored = explored_state(child.predicates, [robot],
                                        child.action, child.t)
        visited[child_explored] = child.g
        node = (heuristic(child) + child.g, child)
        frontier.put(node)

    while frontier.qsize() > 0:
        _, leaf = frontier.get()
        #print('time: ' + str(leaf.g))
        #print('agnet locations: ' + str([(agent,leaf.agent_locations[agent]) for agent in agents_in_conflict]))
        #print('paths ' + str([(agent, paths[i]) for i, agent in enumerate(agents_in_conflict)]))
        #leaf_explored = explored_state(leaf.predicates, [robot], leaf.action, leaf.t)
        #if leaf_explored in explored:
        #    continue
        #else: explored.add(leaf_explored)
        actions = backtrack(leaf, time)
        if set(leaf.agent_locations[robot]) in path:
            #counting the number of steps in the path that can be discarded
            back_at_path = path.index(set(
                leaf.agent_locations[robot])) + look_ahead
            old_path = full_path[:time + 1] + full_path[time + back_at_path:]
            actions = backtrack(leaf, time)
            destinations = [action[0].destination for action in actions]
            #print(leaf.agent_locations[robot])
            #print(leaf.t)
            #print(actions)
            #print(destinations)
            #print(old_path)
            if (len(actions) > 2) and all(
                [goal in old_path + destinations for goal in robot.goal]):
                #print('agents: ' + str(agents_in_conflict))
                ##print('back at path: ' + str(back_at_path))
                #print('cost of final plan ' + str(leaf.g))
                return robot, actions, back_at_path

        children = get_children(leaf, robot, G, time)
        #if all([coor in leaf.agent_locations[robot] for coor in [(2, 1), (2, 2), (1, 1), (1, 2)]]):
        #    print('robot on goal')
        #    print([(s.action, s.g) for s in children])
        for child in children:
            child_explored = explored_state(child.predicates, [robot],
                                            child.action, child.t)
            if (child.t <
                (time + 25)) and ((child_explored not in visited) or
                                  (child.g < visited[child_explored])):
                visited[child_explored] = child.g
                node = (heuristic(child) + child.g, child)
                frontier.put(node)

    return robot, None, 0
示例#41
0
    while not fringe.empty():
        
        successor_fcn(curr, fringe, visited)
        currkey = fringe.get()[1]
        curr = nodeFringe[currkey]
        if(goal_test(curr.data, goal)):
            return curr
        
        
    return curr
        
        

graph = [['S', 'a', 'b'],['c','d','e'],['f','h','G']]
dict_cost = {'S':0,'a':1,'b':1,'c':2,'d':2,'e':3,'f':3,'h':3,'G':1}
start = 'S'
goal = 'G'
fringe = PriorityQueue()
nodeFringe = {}
visited = []
result = []
root = Node(start, dict_cost.get(start))



goalNode = UCS(root)
extract_plan(root, goalNode)
print(result)
print("visited: ", visited)

import cv2
import numpy as np
import time
import math
import random 
from sklearn.neighbors import KDTree
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
from scipy import spatial
from queue import PriorityQueue 
q = PriorityQueue() 
plt.ion()

length = 10
breadth = 10
height = 10
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlim3d(0, 10)
ax.set_ylim3d(0,10)
ax.set_zlim3d(0,10)

def boundary_check(i, j, k):
    if (i < 0) or (j < 0) or (k < 0) or (i >= length) or (j >= breadth) or (k >= height):
        return True
    else:
        return False

def generate_seed():
    x = round(random.uniform(0 , length)*2)/2
    y = round(random.uniform(0 , breadth)*2)/2
示例#43
0
def addedges(graph: [[int]], v: int, visited: [bool], pq: PriorityQueue) -> None:
    visited[v] = True
    for t in graph[v]:
        u, w = t[0], t[1]
        if not visited[u]:
            pq.put((w, (v, u)))
示例#44
0
class Memmory:
    """
    1. append(exprience):
    2. get_sample
    """
    def __init__(self, index, n_action, max_seq_len):
        self.index = index
        self.n_action = n_action

        self.log = logging.getLogger('StarCraftII')

        # self.trajectories = deque()
        self.trajectories = PriorityQueue()
        self.max_n_trajextories = 100
        self.max_seq_len = max_seq_len
        self.max_score = 0
        self.episode_index = 0

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }

        self.preprocess = {
            'joint_action': {
                'action': self.get_action,
                'action_onehot': self.action_onehot
            }
        }

    def action_onehot(self, joint_action):
        onehot = np.zeros(self.n_action)
        onehot[joint_action[self.index]] = 1
        return onehot

    def get_action(self, joint_action):
        return joint_action[self.index]

    def append(self, exprience):
        for key in exprience:
            if key in self.current_trajectory:
                self.current_trajectory[key].append(exprience[key])
            if key in self.preprocess:
                for new_key in self.preprocess[key]:
                    preprocesser = self.preprocess[key][new_key]
                    self.current_trajectory[new_key].append(
                        preprocesser(exprience[key]))

    def end_trajectory(self, exprience):
        self.append(exprience)
        self.current_trajectory['score'] = exprience['eps_reward']
        # print(self.index)
        # for key in self.current_trajectory:
        #     print(key,": len:", len(self.current_trajectory[key]))

        self.trajectories.put((exprience['eps_reward'], self.episode_index,
                               copy.deepcopy(self.current_trajectory)))
        self.episode_index += 1
        self.current_trajectory.clear()

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }
        self.max_score = max(self.max_score, exprience['eps_reward'])

        if self.trajectories.queue.__len__() > self.max_n_trajextories:
            self.trajectories.get()

    def get_sample(self, batch_size=16):
        """
        目前来看 seq_len 都是max,但后面会不会有不同 如果不足就需要补充
        :param batch_size:
        :return:
        """
        obs_batch = []
        avail_batch = []
        act_batch = []
        rew_batch = []
        action_onehot_batch = []
        mask_batch = []
        max_trajectory_len = 0

        samlpe_new_memory = batch_size // 2
        new_memory = self.trajectories.queue.__len__() // 4

        for i in range(samlpe_new_memory):
            e = -rd.randint(1, new_memory)
            _, _, trajectory = self.trajectories.queue[e]
            obs_batch.append(trajectory['observation'])
            act_batch.append(trajectory['action'])
            rew_batch.append(trajectory['reward'])
            avail_batch.append(trajectory['available_action'])
            action_onehot_batch.append(trajectory['action_onehot'])
            trajectory_len = len(trajectory['observation'])
            done_mask = [0] * (trajectory_len - 1)
            done_mask[-1] = 1
            mask_batch.append(done_mask)
            max_trajectory_len = max(max_trajectory_len, trajectory_len)

        for i in range(batch_size - samlpe_new_memory):
            e = rd.randint(0, self.trajectories.queue.__len__() - 1)
            _, _, trajectory = self.trajectories.queue[e]
            obs_batch.append(trajectory['observation'])
            act_batch.append(trajectory['action'])
            rew_batch.append(trajectory['reward'])
            avail_batch.append(trajectory['available_action'])
            trajectory_len = len(trajectory['observation'])
            action_onehot_batch.append(trajectory['action_onehot'])
            done_mask = [0] * (trajectory_len - 1)
            done_mask[-1] = 1
            mask_batch.append(done_mask)
            max_trajectory_len = max(max_trajectory_len, trajectory_len)

        batch = {
            'observation': th.FloatTensor(obs_batch),
            'available_action': th.FloatTensor(avail_batch),
            'action': th.LongTensor(act_batch),
            'reward': th.FloatTensor(rew_batch),
            'done': th.FloatTensor(mask_batch),
            'len': max_trajectory_len,
            'action_onehot': th.FloatTensor(action_onehot_batch),
            'batch_size': batch_size
        }

        return batch

    def show_memory(self):
        message = "agent {}".format(self.index)
        self.log.info(message)
        Signal.get_signal().emit_signal_str(message)
        for _, _, t in self.trajectories.queue:
            message = "len: {}; score: ".format(len(t['observation']),
                                                t['score'])
            self.log.info(message)
            Signal.get_signal().emit_signal_str(message)

    def get_current_trajectory(self):
        batch = {
            'observation':
            th.FloatTensor([self.current_trajectory['observation']]),
            'action_onehot':
            th.FloatTensor([self.current_trajectory['action_onehot']]),
            'len':
            len(self.current_trajectory['observation']),
            'batch_size':
            1
        }
        return batch
示例#45
0
class Leader_Memmory:
    """
    unimplement!!!
    1. append(exprience):
    2. get_sample
    """
    def __init__(self, env_info):
        self.n_action = env_info["n_actions"]
        self.n_agent = env_info["n_agents"]
        self.obs_shape = env_info['obs_shape']
        self.max_seq_len = env_info['episode_limit']
        self.state_shape = env_info['state_shape']

        self.PQ = False
        if self.PQ:
            self.trajectories = PriorityQueue()
        else:
            self.trajectories = deque()
        self.max_n_trajextories = 500
        self.max_score = 0
        self.episode_index = 0

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }

        self.preprocess = {
            'joint_action': {
                'action_onehot': self.action_onehot
            }
        }

    def action_onehot(self, joint_action):
        one_hot = th.zeros((self.n_agent, self.n_action))
        # print(joint_action.unsqueeze(1))
        one_hot = one_hot.scatter(dim=1,
                                  index=joint_action.unsqueeze(1),
                                  source=1)
        # one_hot = one_hot.scatter(dim=1, index=joint_action.unsqueeze(1), value=1)
        # print(one_hot)
        return one_hot

    def append(self, exprience):
        for key in exprience:
            if key in self.current_trajectory:
                self.current_trajectory[key].append(exprience[key])
            if key in self.preprocess:
                for new_key in self.preprocess[key]:
                    preprocesser = self.preprocess[key][new_key]
                    self.current_trajectory[new_key].append(
                        preprocesser(exprience[key]))

    def end_trajectory(self, exprience):
        self.append(exprience)
        self.current_trajectory['score'] = exprience['eps_reward']

        if self.PQ:
            self.trajectories.put((exprience['eps_reward'], self.episode_index,
                                   copy.deepcopy(self.current_trajectory)))
        else:
            self.trajectories.append(copy.deepcopy(self.current_trajectory))
        self.episode_index += 1
        self.current_trajectory.clear()

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }
        self.max_score = max(self.max_score, exprience['eps_reward'])
        if self.PQ:
            if self.trajectories.queue.__len__() > self.max_n_trajextories:
                self.trajectories.get()
        else:
            if self.trajectories.__len__() > self.max_n_trajextories:
                self.trajectories.popleft()

    def get_item(self, e):
        if self.PQ:
            _, _, trajectory = self.trajectories.queue[e]
        else:
            trajectory = self.trajectories[e]
        trajectory_len = len(trajectory['observation'])
        fill_len = self.max_seq_len + 1 - trajectory_len
        mask = th.zeros(self.max_seq_len)
        mask[:trajectory_len - 1] = 1
        mask = mask.expand(self.n_agent, -1)
        done = th.zeros(self.max_seq_len)
        done[trajectory_len - 2:] = 1
        done = done.expand(self.n_agent, -1)
        observation = th.FloatTensor(trajectory['observation'])
        observation = th.cat(
            (observation, th.zeros((fill_len, self.n_agent, self.obs_shape))))
        reward = th.FloatTensor(trajectory['reward'])
        reward = th.cat((reward, th.zeros(fill_len))).expand(self.n_agent, -1)
        action = th.stack(trajectory['joint_action'])
        action = th.cat(
            (action, th.zeros((fill_len, self.n_agent), dtype=th.long)))
        action_onehot = th.stack(trajectory['action_onehot'])
        action_onehot = th.cat(
            (action_onehot, th.zeros((fill_len, self.n_agent, self.n_action))))
        action_avail = th.FloatTensor(trajectory['available_action'])
        action_avail = th.cat(
            (action_avail, th.zeros((fill_len, self.n_agent, self.n_action))))
        return mask, done, observation, reward, action, action_onehot, action_avail

    def get_sample(self, batch_size=32):
        """
        目前来看 seq_len 都是max,但后面会不会有不同 如果不足就需要补充
        :param batch_size:
        :return:
        """
        obs_batch = []
        avail_batch = []
        act_batch = []
        rew_batch = []
        action_onehot_batch = []
        mask_batch = []
        done_batch = []

        if self.PQ:
            trajectory_len = self.trajectories.queue.__len__()
        else:
            trajectory_len = self.trajectories.__len__()

        samlpe_new_memory = batch_size // 4
        new_memory = trajectory_len // 4

        for i in range(batch_size - samlpe_new_memory):
            e = -rd.randint(1, new_memory)
            mask, done, observation, reward, action, action_onehot, action_avail = self.get_item(
                e)
            mask_batch.append(mask)
            done_batch.append(done)
            obs_batch.append(observation)
            rew_batch.append(reward)
            act_batch.append(action)
            action_onehot_batch.append(action_onehot)
            avail_batch.append(action_avail)

        for i in range(samlpe_new_memory):
            e = rd.randint(0, trajectory_len - 1)
            mask, done, observation, reward, action, action_onehot, action_avail = self.get_item(
                e)
            mask_batch.append(mask)
            done_batch.append(done)
            obs_batch.append(observation)
            rew_batch.append(reward)
            act_batch.append(action)
            action_onehot_batch.append(action_onehot)
            avail_batch.append(action_avail)

        batch = {
            'observation': th.stack(obs_batch),
            'available_action': th.stack(avail_batch),
            'action': th.stack(act_batch),
            'action_onehot': th.stack(action_onehot_batch),
            'reward': th.stack(rew_batch),
            'done': th.stack(done_batch),
            'mask': th.stack(mask_batch),
            'len': self.max_seq_len + 1,
            'batch_size': batch_size
        }

        return batch

    def show_memory(self):
        message = "agent ".format(self.index)
        self.log.info(message)
        Signal.get_signal().emit_signal_str(message)

        if self.PQ:
            for _, _, t in self.trajectories.queue:
                message = "len: {}; score: {}".format(len(t['observation']),
                                                      t['score'])
                self.log.info(message)
                Signal.get_signal().emit_signal_str(message)

    def get_current_trajectory(self):
        if self.current_trajectory['action_onehot'] == []:
            current_action_onehot = []
        else:
            current_action_onehot = th.stack(
                self.current_trajectory['action_onehot']).unsqueeze(0)
        batch = {
            'observation':
            th.FloatTensor([self.current_trajectory['observation']]),
            'action_onehot': current_action_onehot,
            'len': len(self.current_trajectory['observation']),
            'batch_size': 1
        }
        return batch
def get_six_similar(arr, x, k, n):
    print("start")
    six_similar_patches = []

    pq = PriorityQueue()
    for i in range(k):
        pq.put((-abs(arr[i].average_grayscale_value - x), i))
    for i in range(k, n):
        diff = abs(arr[i].average_grayscale_value - x)
        p, pi = pq.get()
        curr = -p
        if diff > curr:
            pq.put((-curr, pi))
            continue
        else:
            pq.put((-diff, i))
    while (not pq.empty()):
        p, q = pq.get()
        six_similar_patches.append(arr[q])

    print("end")
    return six_similar_patches
示例#47
0
 def __init__(self, procs_map) -> None:
     self.events = PriorityQueue()
     self.procs = dict(procs_map)
示例#48
0
def _printKclosest(arr,n,x,k): 
    """
    Print K closest values to a specified value. 

    Parameters
    ----------
    arr : list
        The distribution of values.
    n : int
        Search through the first n values of arr for k closest values.
    x : float
        The reference value for which the closest values are sought.
    k : int
        Number of closest values desired.

    Returns
    -------
    a : list
        The closest k values to x.

    """
    a=[]
    # Make a max heap of difference with  
    # first k elements.  
    pq = PriorityQueue() 
    for neighb in range(k): 
        pq.put((-abs(arr[neighb]-x),neighb)) 
    # Now process remaining elements 
    for neighb in range(k,n): 
        diff = abs(arr[neighb]-x) 
        p,pi = pq.get() 
        curr = -p 
        # If difference with current  
        # element is more than root,  
        # then put it back.  
        if diff>curr: 
            pq.put((-curr,pi)) 
            continue
        else: 
            # Else remove root and insert 
            pq.put((-diff,neighb))           
    # Print contents of heap. 
    while(not pq.empty()): 
        p,q = pq.get() 
        a.append(str("{} ".format(arr[q])))
    return a
示例#49
0
import sys
from math import inf
from queue import PriorityQueue

n = int(sys.stdin.readline())
m = int(sys.stdin.readline())
connect = {node: [] for node in range(1, n + 1)}

for _ in range(m):
    src_node, tgt_node, weight = map(int, sys.stdin.readline().split())
    connect[src_node].append((tgt_node, weight))

src, dst = map(int, sys.stdin.readline().split())

dist = {node: inf for node in range(1, n + 1)}
dist[src] = 0

pq = PriorityQueue()
pq.put([dist[src], src])

while not pq.empty():
    cur_dist, cur_node = pq.get()
    if cur_dist <= dist[cur_node]:
        for node, weight in connect[cur_node]:
            if cur_dist + weight < dist[node]:
                dist[node] = cur_dist + weight
                pq.put([dist[node], node])

sys.stdout.write(str(dist[dst]) + "\n")
示例#50
0
def search_with_Astar(start_row, start_col, goal_row, goal_col):
    global maze, m, n

    time = 0
    count = 0
    q = PriorityQueue()

    root = State(start_row, start_col, None)
    q.put((0, count, root))
    count += 1

    while 1:
        t = q.get()
        s = t[2]
        row, col = s.get_position()
        time += 1

        # is goal?
        if row == goal_row and col == goal_col:
            break

        # left
        if col > 0:
            if maze[row][col - 1] != "1":
                if not s.is_parent(row, col - 1):
                    s.insert_left(row, col - 1)
                    h = abs(goal_row - row) + abs(goal_col - (col - 1))
                    g = s.get_cost() + 1
                    q.put((h + g, count, s.get_left()))
                    count += 1

        # dowm
        if row < m - 1:
            if maze[row + 1][col] != "1":
                if not s.is_parent(row + 1, col):
                    s.insert_down(row + 1, col)
                    h = abs(goal_row - (row + 1)) + abs(goal_col - col)
                    g = s.get_cost() + 1
                    q.put((h + g, count, s.get_down()))
                    count += 1

        # right
        if col < n - 1:
            if maze[row][col + 1] != "1":
                if not s.is_parent(row, col + 1):
                    s.insert_right(row, col + 1)
                    h = abs(goal_row - row) + abs(goal_col - (col + 1))
                    g = s.get_cost() + 1
                    q.put((h + g, count, s.get_right()))
                    count += 1

        # up
        if row > 0:
            if maze[row - 1][col] != "1":
                if not s.is_parent(row - 1, col):
                    s.insert_up(row - 1, col)
                    h = abs(goal_row - (row - 1)) + abs(goal_col - col)
                    g = s.get_cost() + 1
                    q.put((h + g, count, s.get_up()))
                    count += 1

    length = 0
    while s.get_parent() != None:
        row, col = s.get_position()
        if maze[row][col] != '4':
            maze[row][col] = '5'
        length += 1
        s = s.get_parent()

    return time, length
示例#51
0
import sys
from queue import PriorityQueue
result = PriorityQueue()
N, M = map(int, input().split())
find = set()
for i in range(N):
    find.add(sys.stdin.readline().rstrip())
cnt = 0
for i in range(M):
    temp = sys.stdin.readline().rstrip()
    if (temp in find):
        result.put(temp)
        find.remove(temp)
        cnt += 1
print(cnt)
for i in range(cnt):
    print(result.get())
示例#52
0
import tarfile
import threading
import time
import traceback
from queue import Queue, PriorityQueue
import yadisk
from vk_parsing import list_parser as parser
from vk_parsing.tokens import Session, groups

y = yadisk.YaDisk(token="PUT YOUR TOKEN HERE")

s_tkns_n = 5  # Number of service tokens in each group
search_time = 60  # Time spent on one user. Default = 20

row_q = Queue()
users_and_logs_q = PriorityQueue()

last_num, df = parser.read_sample()

for i, row in df.iterrows():  # Put the users ids to the queue
    row_q.put(row)

del df  # Clear the memory

service_groups, users_groups = groups(s_tkns_n)

thread_list = []
for idx in range(len(service_groups)):
    thread = threading.Thread(target=parser.collect_users_data,
                              args=(Session(service_groups[idx],
                                            users_groups[idx]), row_q,
示例#53
0
文件: bayesian.py 项目: zwcdp/nni
    def generate(self, descriptors):
        """Generate new architecture.
        Args:
            descriptors: All the searched neural architectures.
        Returns:
            graph: An instance of Graph. A morphed neural network with weights.
            father_id: The father node ID in the search tree.
        """
        model_ids = self.search_tree.adj_list.keys()

        target_graph = None
        father_id = None
        descriptors = deepcopy(descriptors)
        elem_class = Elem
        if self.optimizemode is OptimizeMode.Maximize:
            elem_class = ReverseElem

        # Initialize the priority queue.
        pq = PriorityQueue()
        temp_list = []
        for model_id in model_ids:
            metric_value = self.searcher.get_metric_value_by_id(model_id)
            temp_list.append((metric_value, model_id))
        temp_list = sorted(temp_list)
        for metric_value, model_id in temp_list:
            graph = self.searcher.load_model_by_id(model_id)
            graph.clear_operation_history()
            graph.clear_weights()
            pq.put(elem_class(metric_value, model_id, graph))

        t = 1.0
        t_min = self.t_min
        alpha = 0.9
        opt_acq = self._get_init_opt_acq_value()
        while not pq.empty() and t > t_min:
            elem = pq.get()
            if self.optimizemode is OptimizeMode.Maximize:
                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
            else:
                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
            ap = math.exp(temp_exp)
            if ap >= random.uniform(0, 1):
                for temp_graph in transform(elem.graph):
                    if contain(descriptors, temp_graph.extract_descriptor()):
                        continue

                    temp_acq_value = self.acq(temp_graph)
                    pq.put(
                        elem_class(temp_acq_value, elem.father_id, temp_graph))
                    descriptors.append(temp_graph.extract_descriptor())
                    if self._accept_new_acq_value(opt_acq, temp_acq_value):
                        opt_acq = temp_acq_value
                        father_id = elem.father_id
                        target_graph = deepcopy(temp_graph)
            t *= alpha

        # Did not found a not duplicated architecture
        if father_id is None:
            return None, None
        nm_graph = self.searcher.load_model_by_id(father_id)
        for args in target_graph.operation_history:
            getattr(nm_graph, args[0])(*list(args[1:]))
        return nm_graph, father_id
示例#54
0
def bfsHash(start, zeroPos, des, step, change_position,cost_swap):
    # 之前采取的是哈希表,由于哈希表会存在冲突问题,然后采取O(n)的后移操作,在面对需要用到大量操作数的时候
    # 算法效率上就会大幅度降低,所以最后用回python自带的字典
    que = PriorityQueue()
    que2 = PriorityQueue()
    first = node(start, 0, zeroPos, des, [], [], 0)
    que.put(first)
    mymap = {}
    s = ""
    for i in start:
        s += str(i)
    mymap[s] = 1
    m = -1

    # 开始搜索
    while not que.empty():
        tempN = que.get()
        # print(list_to_string(tempN.operation))
        temp = tempN.num.copy()
        pos = tempN.zeroPos
        if check_list(des, temp):  # 若为目标局势则跳出
            return tempN
        if len(tempN.operation) == step and tempN.flag == 0:  # 符合强制交换条件,开始执行变换操作
            temp = tempN.num.copy()
            if change_position[0] - 1 == pos:
                pos = change_position[1] - 1
            elif change_position[1] - 1 == pos:
                pos = change_position[0] - 1
            temp[change_position[0] - 1], temp[change_position[1] - 1] = temp[change_position[1] - 1], temp[
                change_position[0] - 1]
            if not check(temp, des):
                swap_listl,swap_listr = getRightChange(temp, des, tempN.step,cost_swap)
                #print(swap_listl)
                #print(swap_listr)
                len1 = len(swap_listl)
                #print(len1)
                for w in range(0,len1):
                    print(w)
                    pos1 = swap_listl[w] - 1
                    pos2 = swap_listr[w] - 1
                    if pos1 == pos:
                        pos = pos2
                    elif pos2 == pos:
                        pos = pos1
                    temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
                    swap = []
                    swap.append(pos1 + 1)
                    swap.append(pos2 + 1)
                    s = ""
                    for j in temp:
                        s += str(j)
                    mymap[s] = 1
                    operation = tempN.operation.copy()
                    temp_step = tempN.step
                    tempN = node(temp, temp_step, pos, des, operation, swap, 1)
                    if check_list(des, temp):  # 若交换后刚好为目标局势那就直接返回
                        operation.append(' ')  # 应测试组要求加上一个字符防止评测判断不到交换这一步
                        tempN = node(temp, temp_step, pos, des, operation, swap, 1)
                        return tempN
                    else:
                        que2.put(tempN)  # 把所有交换后的节点都放在que2队列
                        continue
                print('\n')
            else:
                swap = []
                s = ""
                for i in temp:
                    s += str(i)
                mymap[s] = 1
                operation = tempN.operation.copy()
                temp_step = tempN.step
                tempN = node(temp, temp_step, pos, des, operation, swap, 1)
                if check_list(des, temp):  # 若交换后刚好为目标局势那就直接返回
                    operation.append(' ')  # 应测试组要求加上一个字符防止评测判断不到交换这一步
                    tempN = node(temp, temp_step, pos, des, operation, swap, 1)
                    return tempN
                else:
                    que2.put(tempN)# 把所有交换后的节点都放在que2队列
                    continue

        # cnt用来对付无解情况,四个方向(cnt=4)都无路可走就为无解情况。
        # 如果这个情况出现在强制交换要求的步数前那么我们要添加“反复横跳”操作使得他达到强制交换要求的步数
        cnt = 0
        for i in range(4):
            if changeId[pos][i] != -1:
                pos = tempN.zeroPos
                temp = tempN.num.copy()
                temp[pos], temp[changeId[pos][i]] = temp[changeId[pos][i]], temp[pos]
                s = ""
                for j in temp:
                    s += str(j)
                if s not in mymap:
                    mymap[s] = 1
                    operation = tempN.operation.copy()
                    operation.append(dir[i])
                    temp_step = tempN.step + 1
                    temp_num = temp
                    tempM = node(temp_num, temp_step, changeId[pos][i], des, operation, tempN.swap, tempN.flag)
                    que.put(tempM)
                else:
                    cnt += 1
            else:
                cnt += 1

        if cnt == 4 and tempN.step < step:  # 进行“反复横跳”操作
            # 对于在强制交换前就发现无解的情况,我们直接处理成白块来回摆动的情况让他直接到达目标步数
            temp = tempN.num.copy()
            operation = tempN.operation.copy()
            m = operation[len(operation) - 1]
            delta = step - len(operation)
            pos = tempN.zeroPos
            temp, operation, pos = getOrder(temp, operation, delta, m, pos)  # 添加“反复横跳”的操作序列
            tempM = node(temp, step, pos, des, operation, tempN.swap, tempN.flag)
            que.put(tempM)
    if not que2.empty():
        #print(1)
        return bfsAfterSwap(que2,des,mymap,cost_swap)
示例#55
0
class VanifiedResult:
    node_results: List[WordNode] = attr.ib(factory=list)
    words_queue: PriorityQueue = attr.ib(init=False)
    words_tree: Optional[pygtrie.Trie] = attr.ib(repr=None, default=None)
    _words: List[str] = attr.ib(init=False, factory=list, repr=False)

    max_results: int = 5

    def __attrs_post_init__(self):
        self.words_queue = PriorityQueue(maxsize=0)
        if not self.words_tree:
            root = Path(__file__).parent
            word_list = (root / "words.txt").read_text().splitlines()
            self._words = [w.rstrip().upper() for w in word_list if 9 >= len(w.strip()) > 2]
            self.words_tree = pygtrie.Trie()
            for w in self._words:
                self.words_tree[w] = True

    @property
    def word_results(self) -> List[str]:
        return ["".join(n.as_phonenumber) for n in self.node_results]

    def ensure_put(self, value: WordNode):
        logger.debug("pushing into pqueue: %s", value)
        if self.words_queue.full():
            r = self.words_queue.get_nowait()
            logger.debug("popped item: %s", r)
        self.words_queue.put_nowait(value)

    @staticmethod
    def find_char_prefix(word, index) -> str:
        char_prefix = ""
        while index >= 0 and word[index].isalpha():
            char_prefix = word[index] + char_prefix
            index -= 1
        return char_prefix

    @staticmethod
    def find_word_substrings_with_chars(value: str):
        """Find all word substrings and chars from string.

        Args:
            value: string to extract from.

        Examples:
            >>> VanifiedResult.find_word_substrings_with_chars('1800123APPLE')
            ['A', 'AP', 'APP', 'APPL', 'APPLE']

        Returns:
            List of word substrings and chars.

        """

        all_substrings = []
        substring = ""
        len_word = len(value)
        for index, char in enumerate(value):
            if char.isalpha():
                substring += char
                if index == len_word - 1 or not value[index + 1].isdigit():
                    all_substrings.append(substring)
            else:
                substring = ""
        return all_substrings

    def validate(self, value: str) -> ValidationState:
        """Validate a word node wordified number."""
        substrings = self.find_word_substrings_with_chars(value)

        is_valid = len(substrings) > 0
        max_substring_length = 0
        max_cont_chars = 0

        if is_valid:
            for substring in substrings:
                max_cont_chars = max(len(substring), max_cont_chars)
                sub_substrings = self.find_word_substrings(substring)
                for sub_substring in sub_substrings:
                    max_substring_length = max(len(sub_substring), max_substring_length)
        return ValidationState(
            valid=is_valid, max_cont=max_cont_chars, max_substring_length=max_substring_length
        )

    def is_valid_word_or_prefix(self, value: str) -> bool:
        """Validate if `value` is a valid word or prefix.

        Args:
            value: input value.

        Examples:
            >>> results = VanifiedResult()
            >>> results.is_valid_word_or_prefix('CALLNOW')
            True  # ("CALL" + "NOW" prefix)
            >>> results.is_valid_word_or_prefix('COZL')
            False  # (Not a prefix of anything)
            >>> results.is_valid_word_or_prefix('SUNDAY')
            True  # ("SUNDAY" is a valid word)

        Returns:
            True if valid, False otherwise

        """
        if self.words_tree.has_key(value) or self.words_tree.has_subtrie(value):
            return True
        for idx, _ in enumerate(value):
            if self.words_tree.has_key(value[: idx + 1]) and self.words_tree.has_subtrie(
                value[idx + 1 :]
            ):
                return True
        return False

    def find_word_substrings(self, value: str) -> List[str]:
        """Finds valid sub-words preset in `value`.

        Examples:
            >>> VanifiedResult.find_word_substrings('CALLNOW')
            ['CALL', 'NOW']


        """
        if self.words_tree.has_key(value):
            return [value]
        for idx, _ in enumerate(value):
            right = value[: idx + 1]
            left = value[idx + 1 :]
            if self.words_tree.has_key(left) and self.words_tree.has_key(right):
                return [right, left]
        return []

    def is_valid_word(self, value: str) -> bool:
        return any(self.find_word_substrings(value))

    @classmethod
    def from_phone_number(cls, number: str, *args):
        """Create vanified result from phone number."""
        number_obj = phonenumbers.parse(number, "US")
        parsed_number = phonenumbers.format_number(number_obj, phonenumbers.PhoneNumberFormat.E164)
        return cls.from_numbers(parsed_number.lstrip("+"), *args)

    @classmethod
    def from_numbers(cls, number: str, max_results: int = 5):
        """Convert input numbers to tele-words.

        Args:
            number: input numbers.
            max_results: max results to return.

        Returns:
            VanifiedResult item.

        """
        results = cls(max_results=max_results)

        num_digits = len(number)
        queue: Deque[WordNode] = deque([])

        queue.append(WordNode(number))

        while queue:
            cur_node = queue.popleft()
            cur_wordified = cur_node.current_wordified
            cur_idx = cur_node.current_index

            if cur_idx == num_digits:
                valid_state = results.validate(cur_wordified)

                if not valid_state.valid:
                    continue

                cur_node.update_from_state(valid_state)
                results.ensure_put(cur_node)
                continue

            cur_digit = number[cur_idx]
            cur_n_chars_in_word = cur_node.n_chars

            char_prefix = results.find_char_prefix(cur_wordified, cur_idx - 1)
            len_char_prefix = len(char_prefix)

            for char in PHONE_ALPHA_MAP[cur_digit] + [cur_digit]:
                is_dig_and_prefix_invalid = char.isdigit() and (
                    not len_char_prefix or results.is_valid_word(char_prefix)
                )
                is_alpha_and_valid_word_or_prefix = char.isalpha() and (
                    cur_idx != num_digits - 1
                    and results.is_valid_word_or_prefix(char_prefix + char)
                )
                is_alpha_and_valid_word = char.isalpha() and (
                    cur_idx == num_digits - 1 and results.is_valid_word(char_prefix + char)
                )
                if (
                    is_dig_and_prefix_invalid
                    or is_alpha_and_valid_word_or_prefix
                    or is_alpha_and_valid_word
                ):
                    next_word_num = cur_wordified[:cur_idx] + char + cur_wordified[cur_idx + 1 :]
                    logger.debug("Next word: %s", next_word_num)
                    next_nchars = cur_n_chars_in_word + (1 if char.isalpha() else 0)
                    v_state = results.validate(next_word_num)
                    queue.append(
                        WordNode(
                            next_word_num,
                            current_index=cur_idx + 1,
                            n_chars=next_nchars,
                            max_cont_chars=v_state.max_cont,
                            max_substring_length=v_state.max_substring_length,
                        )
                    )

        # return max word node having most n of cont letters
        if results.words_queue.qsize() > 0:
            node_results = reversed(sorted(results.words_queue.queue, key=lambda n: n.score))
            results.node_results = list(node_results)[: results.max_results]
            return results

        return results
    def find_shortest_path_06(self, grid, start_node, end_node):
        """
        A* search, object wrap
        """
        if not grid:
            return []

        class SearchNode(Node):
            """Wraps the problem's node objects with extra info needed for the search."""

            def __init__(self, node):
                super().__init__(node.position, node.passable)
                self.node = node
                self.x, self.y = self.position
                self.f_score = self.g_score = 99999
                self.parent = None

            def distance_to(self, other):
                return abs(other.x - self.x) + abs(other.y - self.y)

            def get_path(self):
                path = [self.node]
                current = self
                while current.parent:
                    path.append(current.parent.node)
                    current = current.parent
                path.reverse()
                return path

            def neighbours(self):
                for x_offset, y_offset in ((0, 1), (0, -1), (1, 0), (-1, 0)):
                    pos = (self.x + x_offset, self.y + y_offset)
                    if pos in pos_lookup:
                        search_node = pos_lookup[pos]
                        if search_node.passable and search_node not in closed_set:
                            yield search_node

        search_nodes = {node: SearchNode(node) for row in grid for node in row}

        start_node = search_nodes[start_node]
        end_node = search_nodes[end_node]
        start_node.g_score = 0
        start_node.f_score = start_node.distance_to(end_node)

        pos_lookup = {(search_node.x, search_node.y): search_node
                      for search_node in search_nodes.values()}

        closed_set = set()
        open_set = {start_node, }
        que = PriorityQueue()
        que.put((start_node.f_score, start_node))

        while que:
            _, current = que.get()
            if current is end_node:
                return current.get_path()
            open_set.remove(current)
            closed_set.add(current)

            for neighbour in current.neighbours():
                g_score = current.g_score + 1  # All neighbours are distance 1 from current
                f_score = g_score + neighbour.distance_to(end_node)

                if neighbour not in open_set:
                    open_set.add(neighbour)
                    que.put((f_score, neighbour))
                elif g_score > neighbour.g_score:
                    continue

                neighbour.parent = current
                neighbour.g_score = g_score
                neighbour.f_score = f_score

        return []
    def find_shortest_path_05(self, grid, start_node, end_node):
        """
        A* search, hashtab
        """
        if not grid:
            return []

        n_rows, n_cols = len(grid), len(grid[0])
        x_max, y_max = n_rows - 1, n_cols - 1
        closed_set = set()
        open_set = {start_node, }
        parents = dict()
        g_scores = dict()
        f_scores = dict()

        def distance(node1, node2):
            return abs(node1.position.x - node2.position.x) + abs(node1.position.y - node2.position.y)
            # return sqrt((node1.position.x - node2.position.x) ** 2 + (node1.position.y - node2.position.y) ** 2)

        def get_path(current):
            path = [current, ]
            while current in parents:
                current = parents[current]
                path.append(current)
            path.reverse()  # We want start -> end path
            return path

        def neighbors(node):
            x, y = node.position.x, node.position.y
            if y > 0:
                neighbor = grid[x][y - 1]
                if neighbor.passable and neighbor not in closed_set:
                    yield neighbor
            if y < y_max:
                neighbor = grid[x][y + 1]
                if neighbor.passable and neighbor not in closed_set:
                    yield neighbor
            if x > 0:
                neighbor = grid[x - 1][y]
                if neighbor.passable and neighbor not in closed_set:
                    yield neighbor
            if x < x_max:
                neighbor = grid[x + 1][y]
                if neighbor.passable and neighbor not in closed_set:
                    yield neighbor

        # pre-set h_score (distance to end node) and g_score (distance to start node) for all nodes
        for row in grid:
            for node in row:
                g_scores[node] = f_scores[node] = 99999

        g_scores[start_node] = 0
        f_scores[start_node] = distance(start_node, end_node)

        que = PriorityQueue()
        que.put((f_scores[start_node], start_node))

        while open_set:
            _, current = que.get()
            if current is end_node:
                return get_path(end_node)
            open_set.remove(current)
            closed_set.add(current)

            for neighbour in neighbors(current):
                g_score = g_scores[current] + 1
                f_score = g_score + distance(neighbour, end_node)

                if neighbour not in open_set:
                    open_set.add(neighbour)
                    que.put((f_score, neighbour))
                elif g_score > g_scores[neighbour]:
                    continue

                parents[neighbour] = current
                g_scores[neighbour] = g_score
                f_scores[neighbour] = f_score

        return []
示例#58
0
import marble_solitire_env as environment
from collections import defaultdict
from queue import PriorityQueue
import heapq

inital_state = environment.initial_state
explored = defaultdict(tuple)
frontier = PriorityQueue()

root_node = environment.Node(state=inital_state)
frontier.put((root_node.cost,root_node))

current_node = environment.Node(state=inital_state)


def check_frontier(state, inital_state):        # used for updating a node with given state
    f = 0
    for i in frontier.queue:
        # print(i)
        if(i[1].state == state):
            if(i[1].cost > current_node.cost +1):
                i[1].cost = current_node.cost+1
                i[1].parent = current_node     
            
            return True

    return False



    def plan(self, start:np.array, goal:np.array):

        # Step 0: Initialize the parameters
        start = tuple(start.tolist())
        goal = tuple(goal.tolist())
        closed = set()

        # Check if close:
        if self.euclidean_heuristic(start, goal) < 1:
            self.decretize(0.1)

        open_pq = PriorityQueue()
        open_pq.put((self.get_h(start), start))
        min_g_value = defaultdict(lambda: float('inf'))
        min_g_value[start] = 0
        f_values = dict()
        f_values[start] = self.get_h(start)

        # Step 1: Expand N (nodes) ahead
        # Check if the open_pq is empty
        lookahead = self.lookahead

        while not open_pq.empty() and lookahead:

            # Step 1: Take out the node with smallest f value
            _, current = open_pq.get()
            closed.add(current)

            # Step 2: Check if goal arrived
            if self.is_arrived(current, goal):
                break

            # Step 3: Expand the neighbours
            for i, dr_t in enumerate(self.dR_tuple):
                next_node = tuple(map(lambda x, y: round(x + y, 2), current, dr_t))

                # Check if this direction is valid
                if next_node in closed or self.is_out_of_boundary(next_node) or not self.is_segment_collision_free(current, next_node):
                    continue

                # for next_node in graph.neighbors(current):
                new_g_value = min_g_value[current] + self.delta_g[dr_t]#graph.cost(current, next_node)

                # Update: new node OR smaller cost
                if new_g_value < min_g_value[next_node]:
                    min_g_value[next_node] = new_g_value
                    f_value = new_g_value + self.get_h(next_node)
                    f_values[next_node] = f_value
                    open_pq.put((f_value, next_node))
                    self.parents[next_node] = current

            lookahead -= 1

        if open_pq.empty():
            print("Fail 1: Goal not found!")
            exit(1)

        _, next_to_expand = open_pq.get()

        # Step 2.2: Update heuristic in closed
        f_next_node = min_g_value[next_to_expand] + self.get_h(next_to_expand)
        for node in closed:
            self.h[node] = f_next_node - min_g_value[node]

        # Step 3: Move the agent by 1 step
        target = next_to_expand
        path = [target]
        while self.parents[target] != start:
            target = self.parents[target]
            path.append(target)
        return np.array(path[-1])
示例#60
0
    def topKgpa(self, list, k):
        n = len(list)
        if k >= n:
            return list

        from queue import PriorityQueue
        pq = PriorityQueue()
        for i in range(n):
            pq.put((-float(list[i][1]), (i, list[i])))

        pq2 = PriorityQueue()
        for _ in range(k):
            pq2.put(pq.get()[1])

        results = []
        while not pq2.empty():
            results.append(pq2.get()[1])

        return results