def grab(cam, queue, width, height, fps): global grower, running, recording, path, filename, exposure, isfile, samplenum, vidnum, out, fourcc, setout capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) if grower == "None": grower, ok = QtWidgets.QInputDialog.getText(w, 'Enter grower', 'Who is growing? ') if not isfile: FRHEED.changeSample samplenum, ok = QtWidgets.QInputDialog.getText(w, 'Enter sample name', 'Enter sample name: ') if ok: isfile = True path = str('D:/ElliotYoung/Desktop/FRHEED/'+grower+'/'+samplenum+'/') #### Create folder to save images in if it doesn't already exist #### if not os.path.exists(path): os.makedirs(path) while running: frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img capture.set(cv2.CAP_PROP_EXPOSURE, exposure) if queue.qsize() < 30: queue.put(frame) if recording: if not setout: time.sleep(0.1) setout = True out.write(img) else: print(queue.qsize())
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) # s_fps = int(capture.get(cv2.CAP_PROP_FPS)) s_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) s_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # frame_size = (width, height) codec = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('./outputs/demoF.avi', codec, 20, (s_width, s_height)) apply = Apply_Models() # while running: while True: frame = {} capture.grab() retval, img = capture.retrieve(0) # cv2.imshow('pyqt1', img) img = apply.main(img) out.write(img) frame["img"] = img if queue.qsize() < 10: queue.put(frame) else: print queue.qsize()
def grab(self,cam, queue, width, height, fps): #global running if self.radioButton.isChecked(): cam =0 elif self.radioButton_2.isChecked(): cam =1 else : cam =0 self.capture = cv2.VideoCapture(cam) self.capture.set( cv2.CAP_PROP_FRAME_WIDTH, width ) self.capture.set( cv2.CAP_PROP_FRAME_HEIGHT, height ) self.capture.set( cv2.CAP_PROP_FPS, fps ) while (self.running): frame = {} # Get the original frame from video capture retval, original_frame = self.capture.read() # Resize the frame to fit the imageNet default input size if self.CV_realtimeFlag is not None : self.CV_realtime.frame_to_predict = cv2.resize( original_frame, (224, 224) ) if self.checkBox.isChecked(): # Add text label and network score to the video captue cv2.putText( original_frame, "Label: %s | Score: %.2f" % (self.CV_realtime.label, self.CV_realtime.score), (15, 60), cv2.FONT_HERSHEY_SIMPLEX,0.9, (0, 255, 0), 2 ) cv2.putText(original_frame, "Name: %s| Class: %d " % (self.CV_realtime.grasp_name, self.CV_realtime.grasp_number),(0, 25),cv2.FONT_HERSHEY_SIMPLEX,1,(255, 0, 0),2 ) self.capture.grab() # retval, img = capture.retrieve( 0 ) frame["img"] = original_frame if queue.qsize() < 10: queue.put( frame ) else: print queue.qsize()
def BFS(queue=None, end=None, condition=1): current_index = queue.get() if condition == 2: end.put(current_index) # ???? current_x, current_y = current_index[0], current_index[1] currentSize = queue.qsize() if condition == 1: if stopCondition(paddedMatrix, current_x + 1, current_y + 1) == 1: return current_x, current_y for n in range(current_x - 1, current_x + 2): for m in range(current_y - 1, current_y + 2): if not (n == current_x and m == current_y) and n > -1 and m > -1 and n < matrix.shape[0] and m < \ matrix.shape[1] and (n, m) not in queue.queue: if condition == 1: queue.put((n, m)) else: if matrix[current_x, current_y] > matrix[n, m] and matrix[ n, m] != 0 and clustersMatrix[n, m] == 0: end.put((n, m)) queue.put((n, m)) if condition == 2: if currentSize == queue.qsize(): return end return BFS(queue, end, condition)
def grab(cam, queue, sendqueue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) print("camera Ready") while(running): frame = {} capture.grab() retval, img = capture.retrieve(0) img = cv2.cvtColor(img, cv2.COLOR_BGR2BGR565) #컬러포맷 변경 3->2 byte frame["img"] = img print(img.shape) if queue.qsize() < 10: queue.put(frame) else: print(queue.qsize()) if sendqueue.qsize() < 10: sendqueue.put(frame) else: print(sendqueue.qsize())
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) apply = Apply_Models() reset = 1 while running: frame = {} capture.grab() retval, img = capture.retrieve(0) # cv2.imshow('pyqt1', img) if state == 1: if reset == 1: apply.set_tracker() reset = 0 img = apply.main(img) else: reset = 1 frame["img"] = img if queue.qsize() < 10: queue.put(frame) else: print queue.qsize()
def BFS(queue=None, condition=1): current_index = queue.get() current_x, current_y = current_index[0], current_index[1] currentSize = queue.qsize() if condition == 1: if stopCondition(paddedMatrix, current_x + 1, current_y + 1) == 1: return current_x, current_y elif condition == 2: if matrix[current_x, current_y] < 4 or clustersMatrix[current_x, current_y] != 0: return queue for n in range(current_x - 1, current_x + 2): for m in range(current_y - 1, current_y + 2): if not (n == current_x and m == current_y) and n > -1 and m > -1 and n < matrix.shape[ 0] and m < matrix.shape[1] and (n, m) not in queue.queue: queue.put((n, m)) if condition == 2: if currentSize == queue.qsize(): return queue return BFS(queue, condition)
def test_multi_seq_mutations(self): self.verify_result = self.input.param("verify_result", False) queue = queue.Queue() number_of_times = (self.number_of_documents // self.concurrent_threads) process_list = [] number_of_buckets = len(self.buckets) for x in range(self.concurrent_threads): base_json = self.generate_json_for_nesting() data_set = self.generate_nested(base_json, base_json, 2) json_document = self.generate_nested(base_json, data_set, 10) bucket_number = x % number_of_buckets prefix = self.buckets[bucket_number].name + "_" + str(x) + "_" p = Process(target=self.test_seq_mutations, args=(queue, number_of_times, prefix, json_document, self.buckets[bucket_number])) p.start() process_list.append(p) for p in process_list: p.join() if self.verify_result: filename = "/tmp/" + self.randomDataGenerator.random_uuid( ) + "_dump_failure.txt" queue_size = queue.qsize() if not queue.empty(): self._dump_data(filename, queue) self.assertTrue( queue_size == 0, "number of failures {0}, check file {1}".format( queue.qsize(), filename))
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) apply = Apply_Models() while running: start_time = time.time() frame = {} capture.grab() retval, img = capture.retrieve(0) img[:87, :] = 0 img = apply.main(img) frame["img"] = img fps = 1.0 / (time.time() - start_time) print("FPS : %.3f" % (fps)) if queue.qsize() < 10: queue.put(frame) else: print queue.qsize()
def remove_client(self, clientID): txThread=self._thread.pop(clientID) txThread.stop() queue=self._rxqueue.pop(clientID) while queue.qsize() > 0: queue.get_nowait() queue=self._txqueue.pop(clientID) while queue.qsize() > 0: queue.get_nowait() self._logger.info("Client-ID:{0}; removed; number of clients:{1}".format(clientID, self._clientcounter))
def run(self): global queue while True: if queue.qsize() > 5: for i in range(5): msg = self.name + " consumer " + queue.get( ) + " at " + time.ctime() print(msg) print(queue.qsize()) time.sleep(3)
def print_progress_bar(queue): """Print Progress bar""" import sys num_total_task = queue.qsize() while not queue.empty(): progress = (num_total_task - queue.qsize()) / num_total_task * 100 sys.stdout.write('\r') sys.stdout.write("%.2f%% |[%s]" % (progress, int(progress) * '>')) sys.stdout.flush() time.sleep(0.5) sys.stdout.write('\n')
def grab(cam, queue, q_erosion, q_dilation, q_edge, width, height, fps): global running kernel = np.ones((10, 10), np.uint8) capture = cv2.VideoCapture(0) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) while (running): frame = {} frame_erosion = {} frame_dilation = {} frame_edge = {} try: success, img = capture.read() except Exception as e: print("Grab error") print(type(e)) print(e.args) print(e) img_erosion = cv2.erode(img, kernel, iterations=1) imd_dilation = cv2.dilate(img, kernel, iterations=1) smoothimg = cv2.GaussianBlur(img, (5, 5), 0) edged = cv2.Canny(smoothimg, 70, 150) if success: frame["img"] = img frame_erosion["img"] = img_erosion frame_dilation["img"] = imd_dilation frame_edge["img"] = edged if queue.qsize() < 10: queue.put(frame) else: queue.qsize() if q_erosion.qsize() < 10: q_erosion.put(frame_erosion) else: q_erosion.qsize() if q_dilation.qsize() < 10: q_dilation.put(frame_dilation) else: q_dilation.qsize() if q_edge.qsize() < 10: q_edge.put(frame_edge) else: q_edge.qsize()
def grab(cam, queue, width, height, fps): global running #sets global variable running while(running): img = videoObject.read_next_video_frame() frame = {} frame["img"] = img if queue.qsize() < 10: queue.put(frame) else: print(queue.qsize())
def thread_stats(): '''Output stats regardng threads''' global queue queue_warning = 20 logger('INFO', 'Statistics Thread started') while True: time.sleep(10) if queue.qsize() > queue_warning: logger('WARNING', ' Queue length='+str(queue.qsize())+ " Warning size=" + str(queue_warning)) logger('DEBUG', 'Queue length='+str(queue.qsize()))
def run(self): global queue count = 0 while True: # qsize返回queue内容长度 if queue.qsize() < 60: print(queue.qsize(), '123123') for i in range(100): count = count + 1 msg = '生成产品' + str(count) # put是网queue中放入一个值 queue.put(msg) time.sleep(0.5)
def grab(cam, queue, width, height, fps): global running, start_X, width_X, start_Y, height_Y, cap_X, cap_Y capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) width_X = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height_Y = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap_X = width_X cap_Y = height_Y frame_size = (width_X, height_Y) codec = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('./outputs/top_view_cut.avi', codec, fps, frame_size) apply = Apply_Models() reset = 1 while running: frame = {} capture.grab() retval, img = capture.retrieve(0) img[:int(start_Y) + 1, :] = 0 img[:, :int(start_X) + 1] = 0 img[int(height_Y):, :] = 0 img[:, int(width_X):] = 0 # img = img[int(start_Y):int(height_Y)+1, int(start_X):int(width_X)+1] # cv2.imshow('pyqt1', img) if state == 1: if reset == 1: # print(start_X, width_X, start_Y, height_Y) apply.set_tracker() reset = 0 img = apply.main(img) else: reset = 1 out.write(img) frame["img"] = img if queue.qsize() < 10: queue.put(frame) else: print queue.qsize()
def asignarNevera(self, stack: LifoQueue(), queue: queue.Queue()): if (stack.qsize() < queue.qsize()): print( "No hay suficientes neveras para asignar a todos los almacenes" ) return None lista_de_Almacenes = list() for i in range(queue.qsize()): almacen = queue.get() for j in range(almacen.getCant()): if (stack.empty()): break almacen.getList().append(stack.get()) lista_de_Almacenes.append(almacen) return lista_de_Almacenes
def run(self): global queue count = 0 while True: # qsize返回queue内容长度 if queue.qsize() < 1000: for i in range(100): count = count + 1 msg = "生成产品" + str(count) + "##" #put是往queue中放入一个值 queue.put(msg) print(msg) time.sleep(1) if queue.qsize() > 700: break
def run(self): while True: elem = queue.get() queue.task_done() print("Consumer a elem {}. Now size is {}".format( elem, queue.qsize())) time.sleep(random.random())
def parse_potential_recent_deaths(self, refresh): from queue import Queue, Empty queue = Queue() for name, info in self.chars.items(): if (info.is_online() or time.time() - info.last_online() < 1200) and info.vocation != 'N': queue.put(name) task_count = queue.qsize() def get_info(): while True: try: name = queue.get(block=False) tasks_left = queue.qsize() except Empty: return info = tibiacom.char_info(name) self.chars[name].deaths = info["deaths"] refresh() queue.task_done() print("pzlock update: %d/%d" % ((task_count - tasks_left), task_count)) threads = [] for i in range(10): thrd = threading.Thread(target=get_info) thrd.start() threads.append(thrd) queue.join() for t in threads: t.join()
def run(self): while True: elem = random.randrange(100) queue.put(elem) print("Producer a elem {}, Now size is {}".format( elem, queue.qsize())) time.sleep(random.random())
def shortest_path(graph, start, goal): ''' Return path shortest from start to goal''' explored = [] queue = PriorityQueue() root = (0, [start]) queue.put(root) if (start == goal): return "Start = goal" while True: if queue.empty(): raise Exception("No way Exception") current_distance, path = queue.get() current_point = path[-1] if current_point == goal: print('total {} path'.format(queue.qsize())) return current_distance, path if current_point not in explored: if current_point not in graph: continue list_point = graph[current_point] # go through all point can see, construct a new path and # push it into the queue for point in list_point: d = distance(current_point, point) new_path = list(path) new_path.append(point) queue.put((current_distance + d, new_path)) # mark point as explored explored.append(current_point) # in case there's no path between the 2 points return None, "There's no path between the 2 points"
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() # Note that this message may not be precise, because of threading. self.log.debug('successfully flushed about %s items.', size)
def producer(queue): while True: time.sleep(1) print("Queue size: %d" % queue.qsize()) print("Put Widget") widget = random.choice(('drum', 'stick', 'trombone', 'swordfish')) queue.put(widget, block=False)
def once_per_second(): global count while True: count = 0 time.sleep(1) print(str(count) + " hz") print("Queue length: " + str(queue.qsize()))
def func(c, queue, start, goal): gy, gx = goal queue.put(start) visited = [[-1 for i in range(len(c[0]))] for i in range(len(c))] count = 0 def search(sy, sx, queue, visited): if c[sy][sx] != "#" and visited[sy][sx] == -1: queue.put([sy, sx]) visited[sy][sx] = 1 return while 1: size = queue.qsize() if size == 0: return -1 for i in range(size): [sy, sx] = queue.get() visited[sy][sx] = count if sy == gy and sx == gx: return count search(sy + 1, sx, queue, visited) search(sy - 1, sx, queue, visited) search(sy, sx + 1, queue, visited) search(sy, sx - 1, queue, visited) count += 1
def search_grid(travelgrid, queue): distgrid = np.full([grid_size, grid_size], 999) traveldist = 0 while queue.qsize() > 0: traveldist += 1 for _ in range(queue.qsize()): checkx, checky = queue.get() for dirx, diry in dir: if travelgrid[checkx + dirx, checky + diry] == True or [ checkx + dirx, checky + diry ] in queue.queue: continue travelgrid[checkx + dirx, checky + diry] = True distgrid[checkx + dirx, checky + diry] = traveldist queue.put([checkx + dirx, checky + diry]) return distgrid
def get_html(queue, mysql, crawl_kw, crawl_num): #获取url页面上的所有url链接 while not queue.empty(): global RES_URL_LIST, CURRENT_URL_COUNT if len(RES_URL_LIST) >= crawl_num: #判断爬取数据的数量是否已经足够 print(len(RES_URL_LIST)) print("总数达到要求,提前结束") return url = queue.get() #从队列中获取url CURRENT_URL_COUNT += 1 print("队列中还有【%s】个URL,当前爬取的是第【%s】个页面,URL:【%s】" % (queue.qsize(), CURRENT_URL_COUNT, url)) page_msg = get_page_msg(url) page_url_list = page_msg[0] page_title = page_msg[1] page_text = page_msg[2] if page_title.find(crawl_kw) != -1 or page_text.find( crawl_kw) != -1: #标题或正文中包含关键字,才往下走 if len(page_title) > 20 and len(page_text) > 300: if url not in RES_URL_LIST: #判断url是否添加过,防止数据重复落库 sql = 'INSERT INTO webpage(url,title,text) VALUES("%s","%s","%s")' % ( url, page_title, page_text) mysql.insert(sql) RES_URL_LIST.append( url) #将url添加到全局变量RES_URL_LIST,用于防止数据重复落库 print("关键字【%s】,目前已爬取到数据【%s】条,距离目标还差【%s】条,当前落库的URL为【%s】" % (crawl_kw, len(RES_URL_LIST), crawl_num - len(RES_URL_LIST), url)) while page_url_list: url = page_url_list.pop() if url not in RES_URL_LIST: queue.put(url.strip()) #将源码中的所有url放到队列中 print("队列没有东西,退出了")
def binaryTreeToLists2(self, root): result = [] if root is None: return result import queue queue = queue.Queue() queue.put(root) dummy = ListNode(0) while not queue.empty(): p = dummy size = queue.qsize() for i in range(size): head = queue.get() p.next = ListNode(head.val) p = p.next if head.left is not None: queue.put(head.left) if head.right is not None: queue.put(head.right) result.append(dummy.next) return result
def func(R, C, c, queue): # ゴールできるかを判断(幅優先探索) sy, sx = [1, 1] gy, gx = [R, C] score = R * C for i in range(1, R + 1): for j in range(1, C + 1): if c[i][j] == "#": score -= 1 queue.put([sy, sx]) visited = [[-1 for i in range(len(c[0]))] for i in range(len(c))] count = 0 def search(sy, sx, queue, visited): if c[sy][sx] != "#" and visited[sy][sx] == -1: queue.put([sy, sx]) visited[sy][sx] = 1 return while 1: size = queue.qsize() if size == 0: return -1 for i in range(size): [sy, sx] = queue.get() visited[sy][sx] = count if sy == gy and sx == gx: return score - count - 1 search(sy + 1, sx, queue, visited) search(sy - 1, sx, queue, visited) search(sy, sx + 1, queue, visited) search(sy, sx - 1, queue, visited) count += 1
def submit_results(queue, submit_url): s = requests.Session() while True: crash_id, result = queue.get() logger.debug('%d results waiting', queue.qsize()) logger.debug('submitting %d', crash_id) logger.info(result) _ = s.post(submit_url % crash_id, data=json.dumps(result), headers={'content-type': 'application/json'}).content
def run(self): global queue while True: if queue.qsize() > 100: for i in range(3): # get是从queue中取出一个值 msg = self.name + '消费了 '+queue.get() print(msg) time.sleep(1)
def queuer_feed_from_rss(url): fp = feedparser.parse(url) for post in fp.entries: if queue.qsize() < buffer_size: myLock.acquire() time.sleep(randint(0,3)) queue.put(Feed(post.title, post.published, post.summary_detail.value, post.link, url)) myLock.release() print ('-> Produced: ', url.split("rss/")[1], '--', str(post.title), ' -- size = ' ,queue.qsize()) else: while(True): if (queue.qsize() < buffer_size): myLock.acquire() time.sleep(randint(0,3)) queue.put(Feed(post.title, post.published, post.summary_detail.value, post.link, url)) myLock.release() print ('-> Produced: ', url.split("rss/")[1], '--', str(post.title), ' -- size = ' ,queue.qsize()) break
def run(self): global queue while True: for i in range(3): if queue.qsize() < 100: pass else: msg = self.name + 'consumer ' + queue.get() print(msg) time.sleep(1) return
def three_more_feeds(request): # Sending 3 more feeds data = [] while len(data) < 3: try: feed = queue.get() data.append(feed.__dict__) print ('<- Consumed', ' -- ', feed.title, ' -- size = ',queue.qsize()) except: print ("there aren't 3 elements yet") return HttpResponse(json.dumps(data), content_type="application/json; charset=utf-8;")
def get_info(): while True: try: name = queue.get(block=False) tasks_left = queue.qsize() except Empty: return info = tibiacom.char_info(name) self.chars[name].deaths = info["deaths"] refresh() queue.task_done() print("pzlock update: %d/%d" % ((task_count - tasks_left), task_count))
def processIncoming(self): """ Handle all the messages currently in the queue (if any). """ while queue.qsize(): try: msg = queue.get(0) # Check contents of message and do what it says # As a test, we simply print it self.editor.insertPlainText(str(msg)) except queue.Empty: pass
def feed_queue(dest_dir, queue, queue_url, campaign_id): s = requests.Session() while True: logger.debug('fetching %s', queue_url) analysis_queue = s.get(queue_url).json()['crashes'] for crash in analysis_queue: crash_name = str(crash['crash_id']) logger.info('downloading %s', crash_name) local_filename = os.path.join(dest_dir, crash_name) urllib_request.urlretrieve(crash['download'], filename=local_filename) logger.debug('%d crashes waiting', queue.qsize()) queue.put((crash['crash_id'], local_filename))
def run(self): global queue count = 0 while True: # qsize返回queue内容长度 if queue.qsize() < 1000: for i in range(100): count = count +1 msg = '生成产品'+str(count) # put是网queue中放入一个值 queue.put(msg) print(msg) time.sleep(0.5)
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) while(running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img frame["1"] = config["1"] frame["2"] = config["2"] blur = get_blur(img, 0.05) frame["blur"] = blur if queue.qsize() < 10: queue.put(frame) else: print(queue.qsize())
def run(self): global queue count = 0 while True: for i in range(100): if queue.qsize() > 1000: pass else: count = count + 1 msg = 'Production of the product ' + str(count) print(msg) queue.put(msg) time.sleep(1) return
def checkqueue(): #print('Q size: '+str(queue.qsize())) while queue.qsize(): try: msg = queue.get(0) #self.listbox.insert('end', msg) #self.progressbar_write.title(msg) print("%complete.."+str(int(((cells_written-1)/total_cells)*100))) progressbar_write['value']=int(((cells_written-1)/total_cells)*100) progress_txt.set(str(int(((cells_written-1)/total_cells)*100))+"% complete") #progress_label.configure(textvariable=progress_txt.get()) except queue.empty: print('Queue is empty..') pass
def run(self): while not queue.empty(): try: name = queue.get(False) except queue.Empty: break with stdout: global done print(self.getName(), name, done, queue.qsize()) done += 1 with open(name) as f: import_from_file(f, dijkstra_lock, mysql_lock)
def receivemessage(): n = 0 html='<p>no messages!</p>' username = request.form["username"] password = request.form["password"] for i in range(queue.qsize()): message = queue.get() if message[2] == username: sentby.append(username) text.append(message) n += 1 queue.task_done() for x in range(0,len(sentby)) htmlmessage += '<div>'+sentby[x]+'</div><div>'+text[x]+'</div>' return render_template("message.html" html=htmlmessage)
def doJob(*args): global queUpdaterWorking global waiting queue = args[0] thdName = args[1] waitingID = args[2] while True: if queue.qsize() == 0: waiting[waitingID] = True if (not False in waiting) and (not queUpdaterWorking): queUpdaterWorking = True global circleStartTime global queStartTime print ('[Queueing] Done que! Spending time = {0}!'.format(datetime.datetime.now()-queStartTime)) dbUpdater() print ('[ALL] Done all! Spending time = {0}!'.format(datetime.datetime.now()-circleStartTime)) queUpdater() queUpdaterWorking = False else: time.sleep(5) if queue.qsize() > 0: job = que.get() job.do(thdName) waiting[waitingID] = False
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() self.log.debug('successfully flushed {0} items.'.format(size))