def bfs(start_tile, end_tile): """ Breadth-first search algorithm :param start_tile: Tile object, start tile of board :param end_tile: Tile object, end tile of board :return: """ queue = Queue() queue.put(start_tile) came_from = {} came_from[start_tile] = None has_been_next_tile = [] while not queue.empty(): current_tile = queue.get() current_tile.visit() if current_tile == end_tile: break for next_tile in current_tile.neighbours: if next_tile not in has_been_next_tile: has_been_next_tile.append(next_tile) if next_tile not in came_from: queue.put(next_tile) came_from[next_tile] = current_tile current_tile.visit() return came_from, has_been_next_tile
def broadTravel(t): q=Queue() q.put(t) while q.qsize()>0: tmp=q.get() print tmp.value if tmp.left is not None: q.put(tmp.left) if tmp.right is not None: q.put(tmp.right)
from queues import Queue import unittest queue1 = Queue() queue2 = Queue() queue2.put(0) queue2.put(1) queue2.put(2) queue2.put(3) queue2.put(4) queue3 = Queue() queue3.put(3) queue4 = Queue() queue4.put(2) class TestQueue(unittest.TestCase): def test_is_empty(self): self.assertTrue(queue1.is_empty()) def test_is_full(self): self.assertTrue(queue2.is_full()) def test_put(self): with self.assertRaises(ValueError) : queue2.put(5)
db_prep_handler = DBPrepHandler() for i in range(1, 3): prep_worker = Worker(f'Prep Worker{i}', prep_queue) prep_worker.register_handler(default_prep_handler) prep_worker.register_handler(db_prep_handler) prep_worker.register_result_queue(db_queue) pool.apply_async(prep_worker.process) orm_handler = ORMHandler() db_handler = DBHandler() for i in range(1, 5): db_worker = Worker(f'DB Worker{i}', db_queue) db_worker.register_handler(orm_handler) db_worker.register_handler(db_handler) pool.apply_async(db_worker.process) pool.close() # Read files name from data directory and put an item per file for path in get_files_list(Path('data')): file_info = file_model_map.get(path.name) if file_info is not None: model, type, batch_size = file_info item = QueueItem(type, {'path': path}, { 'model': model, 'batch_size': batch_size }) file_queue.put(item) pool.join()