def test_WorkThread(): in_q, out_q = Q(1), Q(1) def get_none(): assert wt.get() is None def get_one(): assert wt.get() == 1 wt.task_done() # Returns True on success assert wt.put(17) def out_full(): # Returns False when full assert not wt.put(42) # Empty wt = WorkThread(in_q, out_q, get_none) wt.run() # One element in_q.put(1) wt = WorkThread(in_q, out_q, get_one) wt.run() # Stopped wt = WorkThread(in_q, out_q, get_none) wt.stop() wt.run() # Full wt = WorkThread(in_q, out_q, out_full) wt.run()
def a_star(start_node, h): fringe_nodes = Q() nodes_expanded = 0 fringe_nodes.put((start_node.get_man_heuristic(), start_node)) if fringe_nodes.qsize() == 0: print("Solution not found") return None while fringe_nodes.qsize() > 0: node = fringe_nodes.get()[1] if node.check_goal(node.board): node.print_board(node.board) return ["A*", node.depth, nodes_expanded] node.print_board(node.board) children = node.get_children_nodes() nodes_expanded += 1 for child in children: if h == "m": fringe_nodes.put((child.depth + child.get_man_heuristic(), child)) if h == "c": fringe_nodes.put((child.depth + child.get_cheb_heuristic(), child))
def bfs(initial_state): frontier = Q(maxsize=MAX) f_list = set() f_list.add(tuple(initial_state.config)) explored = set() frontier.put(initial_state) while not frontier.empty(): state = frontier.get() f_list.remove(tuple(state.config)) if test_goal(state): global goal_state goal_state = state break state.expand() global nodes_expanded nodes_expanded += 1 explored.add(tuple(state.config)) for child in state.children: if (tuple(child.config) not in explored) and (tuple(child.config) not in f_list): global max_depth if child.cost >= max_depth: max_depth = child.cost frontier.put(child) f_list.add(tuple(child.config))
def a_star_graph(start_node, h): visited_nodes = {} fringe_nodes = Q() nodes_expanded = 0 fringe_nodes.put((start_node.get_man_heuristic(), start_node)) if fringe_nodes.qsize() == 0: print("Solution not found") return None while fringe_nodes.qsize() > 0: node = fringe_nodes.get()[1] visited_nodes[str(node.board)] = 1 node.print_board(node.board) if node.check_goal(node.board): # Search name, depth, nodes expanded node.print_board(node.board) return ["A* Graph", node.depth, nodes_expanded] node.print_board(node.board) children = node.get_children_nodes() nodes_expanded += 1 for child in children: if visited_nodes.get(str(child.board)) is not None: continue if h == "m": fringe_nodes.put((child.depth + child.get_man_heuristic(), child)) if h == "c": fringe_nodes.put((child.depth + child.get_cheb_heuristic(), child))
def __init__(self, map, end=0, command=None): self.map = None self.start = 0 self.end = end self.ve = 1 self.cmd = command self.auto_pickup = False # self.ve = int(verbosity) # self.direction = direction self.q = Q() self.load_map(map)
def Udp(*args, **kwargs): ''' udp ''' event = kwargs.get('event', None) if not event: return address = (setting.UDP_IP, setting.UDP_PORT) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(address) print("start udp: %s:%s" % address) while True: data, addr = s.recvfrom(setting.UDP_MAX_BITS_RECV_ONE) if not data: print "client has exist" break Q.append(data) if not event.is_set(): event.set() print("received data len: %s, from: %s" % (len(data), addr)) s.close()
def __init__(self): super(Gui, self).__init__() self.setupUi(self) self.table_set() self.bt1.clicked.connect(self.slot) self.bt2.clicked.connect(self.slot_2) self.bt_recognize.clicked.connect(self.recognize) # fourcc = cv2.CV_FOURCC(*'MPJG') self.writer = cv2.VideoWriter( 'output.avi', cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), 20, (800, 600)) self.R_video = Q(maxsize=20 * 60) self.model = Net() assert self.model.load('car.h5'), '加载错误'
def Save(*args, **kwargs): event = kwargs.get('event', None) if not event: return InitAdapter(**CONFIG.__dict__) while True: if Q.count <= 0: event.clear() event.wait(2) datas, count = Q.get() if count <= 0: continue for data in datas: r = __assembleRecordLog__(data) if r: for s in SAVERS: s.save(r)
def ok(val): vis = set() bfs = Q() bfs.put((0, 0)) vis.add((0, 0)) while bfs.qsize(): now = bfs.get() if now[0] == H - 1 and now[1] == W - 1: return True for d in ((1, 0), (0, 1), (-1, 0), (0, -1)): next = (now[0] + d[0], now[1] + d[1]) if H > next[0] >= 0 and W > next[1] >= 0 and \ abs(heights[now[0]][now[1]] - heights[next[0]][next[1]]) <= val and \ next not in vis: bfs.put(next) vis.add(next) return False
def find_room(map, start, end): q = Q() q.put([start]) visited = set() while q.qsize() > 0: path = q.get() last_room = path[-1] if last_room == end: return (path) if last_room not in visited: visited.add(last_room) for next in map[last_room]: if next == 'info': break if map[last_room][next] != '?': new_path = path.copy() new_path.append(str(map[last_room][next])) q.put(new_path)
def __init__(self, queue_name, url=None, req_lock=None, tls_config=None): super(MsgQueueService, self).__init__() if url is None: self.host = '127.0.0.1' self.username = '******' self.password = '******' else: self.server_url = urlparse(url) self.host = self.server_url.hostname self.username = self.server_url.username self.password = self.server_url.password self.queue_name = queue_name self.req_lock = req_lock self.tls_config = tls_config self.publish_q = Q() self.notif_callback = None self.request_callback = None self._running = threading.Event()
def q4bfs(gd, kk, tt): tr, tc = tt tnrl = range(tr - 1, tr + 2) tncl = range(tc - 1, tc + 2) from queue import Queue as Q q = Q() q.put(kk) gd[kk[0] * n + kk[1]] = -1 while not q.empty(): rr, cc = q.get() if (rr in tnrl) and (cc in tncl): return True cl = [(r, c) for r in range(rr - 1, rr + 2) for c in range(cc - 1, cc + 2) if (r >= 0 and c >= 0 and r < n and c < n and gd[r * n + c] > 0)] for r, c in cl: q.put([r, c]) gd[r * n + c] = False #avoid revisit return False
def Solve(temp): at = temp - 1 prev = [None for _ in range(n)] q = Q() q.Enqueue(g[at]) visited[at] = True while (not q.IsEmpty()): node = q.Dequeue() values.append(node.iD) neighbors = node.neighbors if (neighbors == None): continue for x in neighbors: if (not visited[x.iD - 1]): q.Enqueue(x) visited[x.iD - 1] = True prev[x.iD - 1] = node.iD return prev
def region_grow(img, seeds, thresh): new_img = np.zeros_like(img) new_img = new_img[..., 0] visited = set() for seed in seeds: queue = Q() queue.put(seed) x0, y0 = seed # RGB channel of original img img_seed_val = img[y0, x0] new_img[y0][x0] = 1 # Visit each neighbour of seed while not queue.empty(): curr_pixel = queue.get() # Visit each pixel of neighbour for neighbour in get_neighbours(curr_pixel, img): i, j = neighbour # If we haven't checked this neighbour before if neighbour not in visited: visited.add(neighbour) # Check each color channel is_in_thresh = True for c in range(3): seed_val = img_seed_val[c] neighbour_val = img[j][i][c] if seed_val >= neighbour_val: diff = seed_val - neighbour_val else: diff = neighbour_val - seed_val if diff > thresh: is_in_thresh = False if is_in_thresh: # Visit this neighbour later queue.put(neighbour) new_img[j][i] = 1 return new_img
def solve(puzzle): if not puzzle.valid(): print("original puzzle isn't valid") return puzzle.update_count() print(puzzle.count) solution_found = False seen = set() q = Q() q.put(puzzle) c = 0 while not q.empty(): curr = q.get(False) c += 1 if c % 1000 == 0: print(c, curr.count) for x in range(9): for y in range(9): if curr[x][y] == 0: for i in range(1, 10): n = Puzzle(curr, curr.count+1) n[x][y] = i if str(n) in seen: continue seen.add(str(n)) if not n.valid(): continue if n.complete(): print(n) print() solution_found = True else: q.put(n) print(c, "tries") if not solution_found: print("no solution found")
def bfs_search(initial_state: PuzzleState): """BFS search""" global nodes_expanded, max_search_depth, max_ram_usage queue = Q() explored_and_frontier = {initial_state} queue.put(initial_state) while not queue.empty(): # update_ram_usage() state: PuzzleState = queue.get() if hash(state) == GOAL_STATE_HASH: update_ram_usage() return state nodes_expanded += 1 for child in state.expand(): if child not in explored_and_frontier: explored_and_frontier.add(child) queue.put(child) if child.cost > max_search_depth: max_search_depth = child.cost
def main(): """MAIN""" cv2.namedWindow("Test") # Create a named window cv2.moveWindow("Test", 900, 600) # Move it to (40,30) screenWidth, screenHeight = pyautogui.size() st = 'Last command' cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) _, sample_frame = cap.read() # Introduce mark_detector to detect landmarks. mark_detector = MarkDetector() # Setup process and queues for multiprocessing. img_queue = Queue() box_queue = Queue() img_queue.put(sample_frame) box_process = Process(target=get_face, args=( mark_detector, img_queue, box_queue, )) box_process.start() # Setting up process for listening to audio commands voice_command_queue = Q() stt_process = Thread(target=get_voice_command, args=(voice_command_queue, )) stt_process.setDaemon(True) stt_process.start() # Introduce pose estimator to solve pose. Get one frame to setup the # estimator according to the image size. height, width = sample_frame.shape[:2] pose_estimator = PoseEstimator(img_size=(height, width)) # Introduce scalar stabilizers for pose. pose_stabilizers = [ Stabilizer(state_num=2, measure_num=1, cov_process=0.1, cov_measure=0.1) for _ in range(6) ] tm = cv2.TickMeter() while True: # Read frame, crop it, flip it, suits your needs. frame_got, frame = cap.read() if frame_got is False: break # Crop it if frame is larger than expected. # frame = frame[0:480, 300:940] # If frame comes from webcam, flip it so it looks like a mirror. frame = cv2.flip(frame, 2) # Pose estimation by 3 steps: # 1. detect face; # 2. detect landmarks; # 3. estimate pose # Feed frame to image queue. img_queue.put(frame) # Get face from box queue. facebox = box_queue.get() if facebox is not None: # Detect landmarks from image of 128x128. face_img = frame[facebox[1]:facebox[3], facebox[0]:facebox[2]] face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE)) face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB) tm.start() marks = mark_detector.detect_marks([face_img]) tm.stop() # Convert the marks locations from local CNN to global image. marks *= (facebox[2] - facebox[0]) marks[:, 0] += facebox[0] marks[:, 1] += facebox[1] # Uncomment following line to show raw marks. # mark_detector.draw_marks( # frame, marks, color=(0, 255, 0)) # Uncomment following line to show facebox. # mark_detector.draw_box(frame, [facebox]) # Try pose estimation with 68 points. pose = pose_estimator.solve_pose_by_68_points(marks) # Stabilize the pose. steady_pose = [] pose_np = np.array(pose).flatten() for value, ps_stb in zip(pose_np, pose_stabilizers): ps_stb.update([value]) steady_pose.append(ps_stb.state[0]) steady_pose = np.reshape(steady_pose, (-1, 3)) # Uncomment following line to draw pose annotation on frame. # pose_estimator.draw_annotation_box( # frame, pose[0], pose[1], color=(255, 128, 128)) # Uncomment following line to draw stabile pose annotation on frame. pose_estimator.draw_annotation_box(frame, steady_pose[0], steady_pose[1], color=(255, 128, 128)) # Uncomment following line to draw head axes on frame. endpoints = pose_estimator.getEndPoints(frame, steady_pose[0], steady_pose[1]) deltax = endpoints[1][0] - endpoints[0][0] deltay = endpoints[1][1] - endpoints[0][1] xpos = math.floor((deltax + 44) * screenWidth / 88) ypos = math.floor((deltay + 14) * screenHeight / 58) # print(xpos, ypos) pyautogui.moveTo(xpos, ypos) if not voice_command_queue.empty(): command = voice_command_queue.get_nowait() if 'click' in command or 'select' in command: pyautogui.click() st = 'Click' elif 'double' in command or 'in' in command: pyautogui.doubleClick() st = 'Double Click' elif 'right' in command or 'menu' in command or 'light' in command: pyautogui.rightClick() st = 'Right Click' print(command) cv2.putText(frame, st, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 20, 255) scale_percent = 30 # calculate the 50 percent of original dimensions width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) # dsize dsize = (width, height) # resize image output = cv2.resize(frame, dsize) cv2.moveWindow("Test", screenWidth - width, screenHeight - height) # Show preview. cv2.imshow("Test", output) if cv2.waitKey(10) == 27: break # Clean up the multiprocessing process. box_process.terminate() box_process.join()
from queue import Queue as Q n = int(input()) #1000 qq = [int(s)-1 for s in input().split()] kk = [int(s)-1 for s in input().split()] tt = [int(s)-1 for s in input().split()] gd = [True]*(n*n) rc = lambda r,c:r*n+c ir = lambda f,t:max(f[0]-t[0],f[1]-t[1])<=1 for i in range(n*n): r,c = i//n,i%n if r==qq[0] or c==qq[1] or r+c==qq[0]+qq[1]: gd[i] = False # Q for BFS/bfs rq,cq = Q(), Q() rq.put(kk[0]) cq.put(kk[1]) ok = False while not (rq.empty() or cq.empty()): r,c = rq.get(), cq.get() if ir((r,c),tt): ok = True break ll,rr = max(0,c-1),min(n-1,c+1) uu,dd = max(0,r-1),min(n-1,r+1) for r in range(ll,rr+1): for c in range(uu,dd+1): if gd[r*n+c]: rq.put(r) cq.put(c)
def MakeQueueFromTokenz(tokenizedInput): tokenQueue = Q(len(tokenizedInput)) tokIter = iter(tokenizedInput) tokToQueue = lambda token: tokenQueue.put(token) [tokToQueue(next(tokIter)) for i in range(len(tokenizedInput))] return tokenQueue
#王:3x3-1内移动; 后:对角/行/列 #用BFS来做!? from queue import Queue as Q n = int(input()) #1000 qq = [int(s) - 1 for s in input().split()] kk = [int(s) - 1 for s in input().split()] tt = [int(s) - 1 for s in input().split()] gd = [True] * (n * n) ir = lambda f, t: max(abs(f[0] - t[0]), abs(f[1] - t[1])) <= 1 for i in range(n * n): r, c = i // n, i % n if r == qq[0] or c == qq[1] or r + c == qq[0] + qq[1]: gd[i] = False # Q for BFS/bfs q = Q() q.put(kk) ok = False while not q.empty(): r, c = q.get() gd[r * n + c] = False #avoid revisit if ir((r, c), tt): ok = True break uu, dd = max(0, r - 1), min(n - 1, r + 1) ll, rr = max(0, c - 1), min(n - 1, c + 1) [ q.put([r, c]) for r in range(uu, dd + 1) for c in range(ll, rr + 1) if gd[r * n + c] ] print('YES' if ok else 'NO')
ConsumerGrayScaleThread.producerQ = producerQ self.start() def run(self): global finished global finished2 while not finished: if not ConsumerGrayScaleThread.consumerQ.empty(): toGrayscale(ConsumerGrayScaleThread.consumerQ.get(), ConsumerGrayScaleThread.producerQ) else: time.sleep(0.0001) while not ConsumerGrayScaleThread.consumerQ.empty(): toGrayscale(ConsumerGrayScaleThread.consumerQ.get(), ConsumerGrayScaleThread.producerQ) finished2 = True return global finished global finished2 lock = Lock() lock2 = Lock() sharedQueue1 = Q(10, lock) sharedQueue2 = Q(10, lock2) finished = False finished2 = False extractor = ProducerExtractorThread(sharedQueue1) displayer = ConsumerDisplayThread(sharedQueue2) grayscale = ConsumerGrayScaleThread(sharedQueue1, sharedQueue2)
def checkMazes(g1, g2): """ This function takes in two 'MetaBush' graphs, and checks if it is possible for players to escape the maze. a cloud of the gameStates is created then traversed. a games State graph or cloud, consists of nodes which repersent every possible game State Node example: (x1,y1,x2,y2,[State variables]) """ color_code = { "RED": 0, "ORANGE": 1, 'PINK': 2, 'WHITE': 3, 'YELLOW': 4, 'GREEN': 5, 'TEAL': 6, 'BLUE': 7 } q = Q() empty_q = Q() nodes = set() # add the start node to the q along with staring conditions q.append((g1.start, g2.start, 0)) while q != empty_q: #pop the queue curr = q.popleft() A, B, State = curr[0], curr[1], curr[2] OG_State = State #break the if you find the end if A == g1.end and B == g2.end: return True #skip if node already in nodes if curr in nodes: continue nodes.add(curr) #find all the valid nodes # find the class representations of the current nodes Anode, Bnode = g1.translate[A], g2.translate[B] # get the connections list from each node connect1, connect2 = Anode.connect, Bnode.connect #for each connection tuple in connection list for des in connect1: State = OG_State #get the node of the destination des_node = g1.translate[des] # if the destination door is closed skip the destination if isinstance(des_node, Door): if (not bit.testBit(State,color_code[des_node.color]) and not des_node.inverse)\ or (bit.testBit(State,color_code[des_node.color]) and des_node.inverse): #regular door is closed 0, or inverse door and 1 continue if isinstance(Anode, Button): # if start is a button if isinstance(des_node, Door): # and the destination is a door if Anode.color == des_node.color: continue if isinstance(Bnode, Door) and Anode.color == Bnode.color: #player B is on a door and it will be closed by player 1 moving #print("Killed someone getting off of buttons") continue State = bit.toggleBit( State, color_code[Anode.color]) # toggle getting off button # need to calculate State as a result of getting onto something if isinstance(des_node, Button) or isinstance(des_node, Switch): State = bit.toggleBit(State, color_code[des_node.color]) if isinstance(Bnode, Door) and (des_node.color == Bnode.color): # player was in an open door of color that just got momved onto #print("Killed someone getting onto a component") continue q.append((des, B, State)) # moving off of B for des in connect2: State = OG_State des_node = g2.translate[des] if isinstance(Bnode, Button): if isinstance(des_node, Door): if Bnode.color == des_node.color: continue if isinstance(Anode, Door) and Bnode.color == Anode.color: #player B is on a door and it will be closed by player 1 moving #print("Killed someone getting off of buttons") continue State = bit.toggleBit(State, color_code[Bnode.color]) if isinstance(des_node, Door): if (not bit.testBit(State,color_code[des_node.color]) and not des_node.inverse)\ or (bit.testBit(State,color_code[des_node.color]) and des_node.inverse): #regular door is closed, or inverse door and 1 continue # need to calculate State beforehand if isinstance(des_node, Button) or isinstance(des_node, Switch): State = bit.toggleBit(State, color_code[des_node.color]) if isinstance(Anode, Door) and (des_node.color == Anode.color): # player was in an open door of color that just got momved onto #print("Killed someone getting onto a component") continue q.append((A, des, State)) #endpoint hasnt been found and all possibilities have been processed return False
def queue2(): q = Q() q.put(0) q.put(1) q.put(2) q.get()
def __init__(self): self.qr = Q() self.qc = Q()
Queue) from .fileinfo import (generate_page_tags, delete_fileinfo_files, build_pages_fileinfos, build_archives_fileinfos, build_indexes_fileinfos, eval_paths, build_archives_fileinfos_by_mappings) from . import generate_page_text from settings import MAX_BATCH_OPS, LOOP_TIMEOUT import time from threading import Thread t = Thread() t.daemon = True from queue import Queue as Q write_queue = Q() job_type = Struct() job_type.page = 'Page' job_type.index = 'Index' job_type.archive = 'Archive' job_type.include = 'Include' job_type.insert = 'Insert' job_type.control = 'Control' job_type.description = { job_type.page: 'Page entry', job_type.index: 'Index entry', job_type.archive: 'Archive entry', job_type.include: 'Include file', job_type.insert: 'Queue insert job',
def __init__(self): super(GUI, self).__init__() self.setupUi(self) self.bt1.clicked.connect(self.main) self.frame_fifo = Q()
portals[i] = [None, None] assert portals[i][0 if inner else 1] is None portals[i][0 if inner else 1] = p if new_pos.x >= 0 and new_pos.y >= 0 and new_pos.x < xsize and new_pos.y < ysize and lines[new_pos.y + 2][new_pos.x + 2] == '.': nearby.append(new_pos) g[p] = nearby for _, pos in portals.items(): diff[(pos[0], pos[1])] = 1 diff[(pos[1], pos[0])] = -1 g[pos[0]].append(pos[1]) g[pos[1]].append(pos[0]) dist = {} q = Q() q.put(start) dist[start] = 0 while not q.empty(): cur = q.get() steps = dist[cur] for nearby in g.get(cur, []): if nearby not in dist: dist[nearby] = steps + 1 q.put(nearby) print(dist[goal]) dist = {} q = Q() q.put((start, 0))
from queue import Queue as Q Q().get()
from queue import Queue as Q # from queue import PriorityQueue as Q import copy q1 = Q() def printQueue(q): while not q.empty(): print((q.get()), ) print('') q1.put((5, 's')) q1.put((2, 'e')) q1.put((0, 'a')) q1.put((0, 'z')) # printQueue(copy.copy(q1)) # print( 'second') q2 = Q() q2.queue = copy.deepcopy(q1.queue) printQueue(copy.copy(q1)) print('second') printQueue(copy.copy(q2)) # %% import json qa = []