def hot_potato(namelist, num): circle = MyQueue() for name in namelist: circle.add(name) while len(circle) != 1: for i in range(num): circle.add(circle.pop()) circle.pop() return circle.pop()
def setUp(self): # Provide empty queue self.queue = MyQueue() # Provide filled queue self.len_test_data = 5 self.test_data = [i + 1 for i in range(self.len_test_data)] self.filled_queue = MyQueue() for i in self.test_data: self.filled_queue.push(i)
class TestMethods(unittest.TestCase): def setUp(self) -> None: self.test_stack = Stack() self.test_queue = MyQueue() self.test_array = [] def test_stack(self): # проверяю, что создаётся пустым self.assertTrue(self.test_stack.empty()) # добавляю 100 элементов - после каждого добавления проверяю empty # - должен возвращать false - т.к. элементы добавляются for x in range(100): self.test_stack.push(x) self.assertFalse(self.test_stack.empty()) # переопределил __eq__ в Node для сравнения только значений # извлекаю 99 значений, создавая для каждого Node с ожидаемым значением и сравниваю их for x in range(99, 0, -1): equals_node = Node(x) self.assertEqual(self.test_stack.pop(), equals_node) # проверяю, что стек не пустой. Затем достаю последний Node и проверяю ещё раз self.assertFalse(self.test_stack.empty()) self.test_stack.pop() self.assertTrue(self.test_stack.empty()) # проверяю, что пустой стэк при попытки достать элемент - возвращает None self.assertIsNone(self.test_stack.pop()) def test_my_queue(self): # делаю такие же проверки, как для стека # проверяю, что создаётся пустым self.assertTrue(self.test_queue.empty()) # добавляю 100 элементов - после каждого добавления проверяю empty # - должен возвращать false - т.к. элементы добавляются for x in range(100): self.test_queue.enqueue(x) self.assertFalse(self.test_queue.empty()) # извлекаю 99 значений, создавая для каждого Node с ожидаемым значением и сравниваю их for x in range(99): equals_node = Node(x) self.assertEqual(self.test_queue.dequeue(), equals_node) # проверяю, что стек не пустой. Затем достаю последний Node и проверяю ещё раз self.assertFalse(self.test_queue.empty()) self.test_queue.dequeue() self.assertTrue(self.test_queue.empty()) # проверяю, что пустой стэк при попытки достать элемент - возвращает None self.assertIsNone(self.test_queue.dequeue())
class TestMyQueue(unittest.TestCase): def setUp(self): # Provide empty queue self.queue = MyQueue() # Provide filled queue self.len_test_data = 5 self.test_data = [i + 1 for i in range(self.len_test_data)] self.filled_queue = MyQueue() for i in self.test_data: self.filled_queue.push(i) def test_instantiation(self): queue = MyQueue() self.assertIsNotNone(queue) self.assertIsNone(queue.latest.top) self.assertIsNone(queue.oldest.top) def test_len(self): # Push and pop repeatedly, still one stack should always be empty self.assertEqual(0, len(self.queue)) self.assertEqual(0, len(self.queue.latest)) self.assertEqual(0, len(self.queue.oldest)) self.queue.push(1) self.queue.push(1) self.assertEqual(2, len(self.queue)) self.assertEqual(2, len(self.queue.latest)) self.assertEqual(0, len(self.queue.oldest)) self.queue.pop() self.assertEqual(1, len(self.queue)) self.assertEqual(0, len(self.queue.latest)) self.assertEqual(1, len(self.queue.oldest)) def test_push(self): for i in self.test_data: self.queue.push(i) self.assertEqual(i, len(self.queue)) self.assertEqual(i, len(self.queue.latest)) self.assertEqual(0, len(self.queue.oldest)) self.assertEqual(i, self.queue.latest.top.key) def test_pop(self): for i in self.test_data: data = self.filled_queue.pop() self.assertEqual(data, i) self.assertEqual(self.len_test_data - i, len(self.filled_queue)) self.assertEqual(0, len(self.filled_queue.latest)) self.assertEqual(self.len_test_data - i, len(self.filled_queue.oldest)) if (i < self.len_test_data): self.assertEqual(i + 1, self.filled_queue.oldest.top.key)
def list_of_depths(self): """ Create a linked list of all the nodes at each depth. For example, a tree with depth D has D linked lists. """ queue = MyQueue() the_dict = defaultdict(list) level = 0 queue.push((self, level)) while (queue.is_empty() == False): node, level = queue.pop() the_dict[level].append(node.key) if (node.left != None): queue.push((node.left, level + 1)) if (node.right != None): queue.push((node.right, level + 1)) return the_dict
def simulation(total_seconds, pages_per_pages): """ 模拟运行环境 """ printer = Printer(pages_per_pages) printer_queue = MyQueue() time_per_task = [] for current_second in range(total_seconds): if new_task(): printer_queue.add(Task(current_second)) if (len(printer_queue) != 0) and (not printer.isBusy()): current_task = printer_queue.pop() printer.startTask(current_task) time_per_task.append(current_task.waitTime(current_second)) printer.tick() avg_time_per_task = sum(time_per_task) / len(time_per_task) print( 'Average time {:6.2f} secs, total {} tasks, {} tasks remaining'.format( avg_time_per_task, len(time_per_task), len(printer_queue), ))
def bfs(start): # start.distance = 0 # start.previous = None vertex_queue = MyQueue() vertex_queue.enqueue(start) while vertex_queue.size() > 0: current = vertex_queue.dequeue() for nbr in current.connections: if nbr.state == 'unprocessed': nbr.state = 'processing' nbr.distance = current.distance + 1 nbr.previous = current vertex_queue.enqueue(nbr) current.state = 'processed'
def __init__(self, sizes=list(), learning_rate=1.0, mini_batch_size=16, number_of_batches=16, epochs=10, matmul=np.matmul): super().__init__(sizes, learning_rate, mini_batch_size, number_of_batches, epochs, matmul) self.workers = [] self.jobs = mp.Queue() # Queue of tuples (image, label) self.results = ResultQueue() # Queue of tuples (image, label)
def fit(self, training_data, validation_data=None): ''' Override this function to create and destroy workers ''' self._jobs_queue = mp.Queue() self._res_queue = MyQueue() n_cpus = mp.cpu_count() self._processes = [ Worker(self._jobs_queue, self._res_queue) for i in range(n_cpus) ] for p in self._processes: p.start() try: super().fit(training_data, validation_data) except: for p in self._processes: p.terminate() raise for p in self._processes: p.terminate()
def breadth_first_search(graph, start_node, destination_node): """ BFS. Make use of a queue, and make sure to mark nodes as visited! TODO: Return the path and its cost. """ graph.reset_visited() q = MyQueue() start_node.visited = True q.push(start_node) while (q.is_empty() == False): node = q.pop() if (node == destination_node): return True for adjacent in node.adjacents: q.push(adjacent) return False
class IPNeuralNetwork(NeuralNetwork): def fit(self, training_data, validation_data=None): ''' Override this function to create and destroy workers ''' self._jobs_queue = mp.Queue() self._res_queue = MyQueue() n_cpus = mp.cpu_count() self._processes = [ Worker(self._jobs_queue, self._res_queue) for i in range(n_cpus) ] for p in self._processes: p.start() try: super().fit(training_data, validation_data) except: for p in self._processes: p.terminate() raise for p in self._processes: p.terminate() def create_batches(self, data, labels, batch_size): ''' Override this function to return batches created by workers ''' n_samples = len(data) for d, l in zip(data, labels): self._jobs_queue.put((d, l)) result = [] while len(result) != n_samples: result.append(self._res_queue.get()) data = np.asarray([r[0] for r in result]) labels = np.asarray([r[1] for r in result]) return super().create_batches(data, labels, batch_size)
def bfs(graph, start): """ breadth first search """ q = MyQueue() visited_id = set() # avoid visit repeated v = graph.get_vertex(start) if v: visited_id.add(v.id) # add start vertex # add v.nbr in queue for i in v.get_connections(): q.add(i) visited_id.add(i.id) else: raise ValueError('Error! start vertex {start} is not in Graph.') while q: # bfs traverse current_vert = q.pop() print(current_vert.id) # print vertex id by bfs for i in current_vert.get_connections(): if i.id not in visited_id: q.add(i) visited_id.add(i.id)
def simulation(seconds, print_speed): printer = Printer(print_speed) print_queue = MyQueue() wait_time_list = [] for current_second in range(seconds): if Task.new_task(): task = Task(current_second) print_queue.enqueue(task) if (not printer.busy()) and (not print_queue.is_empty()): new_task = print_queue.dequeue() wait_time_list.append(new_task.wait_time(current_second)) printer.start_next(new_task) printer.tick() average_wait_time = sum(wait_time_list) / len(wait_time_list) print( f'Average wait {average_wait_time:6.2f}, {print_queue.size():3d} tasks remaining' )
class IPNeuralNetwork(NeuralNetwork): def __init__(self, sizes=list(), learning_rate=1.0, mini_batch_size=16, number_of_batches=16, epochs=10, matmul=np.matmul): super().__init__(sizes, learning_rate, mini_batch_size, number_of_batches, epochs, matmul) self.workers = [] self.jobs = mp.Queue() # Queue of tuples (image, label) self.results = ResultQueue() # Queue of tuples (image, label) def _n_cpus(self): if platform.system() == 'Windows': return mp.cpu_count( ) # Good for tests, but gets wrong number on CDP servers m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', open('/proc/self/status').read()) num_cpu = bin(int(m.group(1).replace(',', ''), 16)).count('1') return num_cpu def fit(self, training_data, validation_data=None): ''' Override this function to create and destroy workers ''' #Create Workers and set jobs n_workers = self._n_cpus() data = training_data[0] labels = training_data[1] jobs_worker = SetJobsWorker( n_workers, data, labels, self.jobs, n_jobs=(self.number_of_batches * self.mini_batch_size * self.epochs)) jobs_worker.start() for _ in range(n_workers): worker = Worker(self.jobs, self.results) worker.start() self.workers.append(worker) #Call the parent's fit super().fit(training_data, validation_data) #Stop Workers for worker in self.workers: worker.join() self.workers = [] jobs_worker.join() def create_batches(self, data, labels, batch_size): """ Parameters ---------- data : np.array of input data labels : np.array of input labels batch_size : int size of batch Returns ------- list list of tuples of (data batch of batch_size, labels batch of batch_size) """ batches_flat = [ ] # all augmented data in one list, without splitting into batches for k in range(self.number_of_batches * self.mini_batch_size): # Stop condition for results queue: # we know that number of results is the same as number of jobs # so here we don't use None-terminated queue like with jobs batches_flat.append(self.results.get()) batches = [] sz = self.mini_batch_size for i in range(self.number_of_batches): batch = batches_flat[i * sz:(i + 1) * sz] # list of tuples (image, label) batches.append( (np.array([tup[0] for tup in batch]), np.array([tup[1] for tup in batch])) ) # tuple of (data batch of batch_size, labels batch of batch_size) return batches
def setUp(self) -> None: self.test_stack = Stack() self.test_queue = MyQueue() self.test_array = []
def test_instantiation(self): queue = MyQueue() self.assertIsNotNone(queue) self.assertIsNone(queue.latest.top) self.assertIsNone(queue.oldest.top)
from led import MyPiLed from my_queue import MyQueue import os try: yellow = MyPiLed(4) red = MyPiLed(17) q = MyQueue() count = 0 led_choice = 0 os.system("clear") print("Which LED do you want to flash?") print("1: Yellow?") print("2: Red?") led_choice = input("Choose your option: ") os.system("clear") if led_choice == '1': print("You picked the Yellow LED") count = input("How many times do you want to blink?: ") yellow.blink(int(count), 0.5) if led_choice == '2': print("You picked the Red LED") count = input("How many times do you want to blink?: ") red.blink(int(count), 0.5) except:
from my_queue import MyQueue from led import MyPiLed try: # Running code led1 = MyPiLed(MyPiLed.YELLOW) led2 = MyPiLed(MyPiLed.RED) q = MyQueue() q.put([led1, 50, 0.1]) q.put([led2, 10, 0.5]) q.join() q.clear() except KeyboardInterrupt: q.clear() led1.off() led2.off() MyPiLed.reset()
from my_queue import MyQueue from user_interface import * queues = {"a": MyQueue(), "b": MyQueue(), "c": MyQueue()} VERY_PRIVATE_PASS = "******" if __name__ == "__main__": while True: welcome() choice = choose() if choice in queues.keys(): q = queues[choice] place = q.add_element() number_info(choice, place) elif choice == VERY_PRIVATE_PASS: official_welcome() officials_choice = choose() q = queues[officials_choice] place = q.remove_element() you_are_handling(officials_choice, place) else: queue_not_found()
from my_queue import MyQueue k1 = MyQueue(4) k1.add(3) k1.add(3123) k1.add(3) k1.add(123) k1.add(4) k1.add(0) k1.add(2) k1.add(1) print(k1.size()) k1.remove() k1.remove() k1.remove() k1.remove() k1.remove() k1.remove()
def hot_potato(name_list, times): name_queue = MyQueue() for name in name_list: name_queue.enqueue(name) while name_queue.size() > 1: for i in range(times): name_queue.enqueue(name_queue.dequeue()) name_queue.dequeue() return name_queue.dequeue()