def worker(req, proxy_port, verbose): global progress global progressbar while True: item = q.get() if item is None: break progressbar.update(queue_max_size - q.qsize()) #function to be filled do_work(req, item) #send the request if proxy_port: response = req.send_request(proxy_port) else: response = req.send_request(0) # function to be filled process_response(response) if verbose: req.display_state() print("Response:", response.read()) q.task_done()
def background_thread_check(): global checker_thread, dic_min_error_rev, thread_lock try: dic_min_error_rev = main.do_auto_check(dic_min_error_rev) file = open("./dic_min_error_rev", "w") file.write(json.dumps(dic_min_error_rev)) file.close() except Exception as ex: # Get current system exception ex_type, ex_value, ex_traceback = sys.exc_info() # Extract unformatter stack traces as tuples trace_back = traceback.extract_tb(ex_traceback) # Format stacktrace stack_trace = list() for trace in trace_back: stack_trace.append( "File : %s , Line : %d, Func.Name : %s, Message : %s" % (trace[0], trace[1], trace[2], trace[3])) printer.errprint("Exception type : %s " % ex_type.__name__) printer.errprint("Exception message : %s" % ex_value) printer.errprint("Stack trace : %s" % stack_trace) printer.errprint("检查意外结束") progressbar.update(0, 0) with thread_lock: print("线程结束") checker_thread = None socketio.emit('checker_state', 0)
def convert_excel_to_dict(wb, sheet_name, progressbar=None): """ :param wb: class:`openpyxl.workbook.Workbook` :param sheet_name: characters :return: list of dict """ header = [] kpi_sum = [] try: sheet_ranges = wb[sheet_name] for index, row in enumerate(sheet_ranges.iter_rows(row_offset=0)): if progressbar is not None: progressbar.update(index) sum_element = {'INDEX': index} for i in range(len(row)): if index == 0: # header if row[i].value is not None: header.append(row[i].value.strip().upper()) else: # add expected data to kpi sum dictionary if i < len(header): sum_element[header[i]] = row[i].value if index > 0: kpi_sum.append(sum_element) except KeyError as keyerror: print(str(keyerror)) return kpi_sum
def on_stop_check(): global checker_thread, thread_lock with thread_lock: if checker_thread is not None: printer.errprint('客户停止了自检\n') stop_thread(checker_thread) checker_thread = None socketio.emit('checker_state', 0) progressbar.update(0, 0) else: printer.errprint('后台并没有正在自检\n')
def downloadVideos(self, downloadUrl, pathToStoreTheFile): """This function downloads all the videos and save those to the path provided """ requiredATag = [] self.logger.info("In downloadVideos() function of class NPTEL Video Downloader.........") soup = self.getSoup(downloadUrl) self.logger.info("=======>Getting soup for given downloadUrl link is successfull") try: rows = soup.find_all("a") for requiredRows in rows: if "mp4" in str(requiredRows): requiredATag.append(requiredRows) self.logger.info("=======>Number of Files to Download: "+str(len(requiredATag))) print("Number of Files to Download: "+str(len(requiredATag))) pbar = tqdm(total=len(requiredATag)) except: self.logger.error("=======>Error in retrieving download urls of video files") return None self.logger.info("=======>Started Downloading Videos...") for i in range(1,len(requiredATag)+1): fileName = None try: requiredDownloadUrl = requiredATag[i-1].get("href") fileName = requiredDownloadUrl.split("=")[-1] requiredDownloadUrl = "https://nptel.ac.in" + requiredDownloadUrl self.logger.info("=======>Downloading video number "+str(i)+" ["+fileName+"]") r = req.get(requiredDownloadUrl, stream=True) if pathToStoreTheFile[-1]!='/': pathToStoreTheFile += '/' fileName = str(i)+" "+fileName with open(pathToStoreTheFile+fileName+".mp4", 'wb') as f: for chunk in r.iter_content(chunk_size=1024 * 1024): if chunk: f.write(chunk) pbar.update(1) self.logger.info("=======>Succesfully downloaded video number " + str(i) + " [" + fileName + "]") except: self.logger.error("=======>Error in Downloading video number " + str(i) + " [" + fileName + "]") return None self.logger.info("All videos downloaded successfully to the path: "+pathToStoreTheFile)
def main(): args = handle_args() input_path = args.i[0] output_path = args.o[0] rotation = args.r[0] output_width = int(args.c[0].split('x')[0]) output_height = int(args.c[0].split('x')[1]) frame_rate = args.f[0] frame_skip_rate = args.k[0] recognize_scale = 0.2 predictor = dlib.shape_predictor('../shapes/shape_predictor_68_face_landmarks.dat') create_output_dir(output_path) capture, video, face_cascade = setupOpenCV(input_path, output_path, frame_rate, output_width, output_height) initGL(output_width, output_height) total_frames = toolbox.getTotalFrames(input_path) frame_count = 0 progressbar.init() last_params = None while True: success, frame = capture.read() if success and frame is not None: frame, points, last_params = processFrame(frame, rotation, frame_skip_rate, face_cascade, predictor, recognize_scale, output_width, output_height, last_params) if frame is not None: if points is not None: frame = getGLFrame(frame, points, output_width, output_height) video.write(frame) frame_count += 1 progress = frame_count / total_frames progressbar.update(progress) if progress >= 0.5: break else: break else: break quit(capture, video)
def on_connect(): global cur_progress, total_progress, dic_min_error_rev, checker_thread, thread_lock print('Client connected\n') for log in message_logs: socketio.emit('server_log', log) checker_state = 0 with thread_lock: if checker_thread is not None: checker_state = 1 checker_list = main.get_checker_name_list() progressbar.update(cur_progress, total_progress) data = { 'checker_list': checker_list, 'checker_state': checker_state, 'dic_err_revs': dic_min_error_rev } emit('ack_init_data', data)
def loop(capture, video): total_frames = toolbox.getTotalFrames(sys.argv[1]) frame_count = 0 last_eye_left = [0, 0, 0, 0] last_eye_right = [0, 0, 0, 0] while True: _, frame = capture.read() if frame is None: return gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 5) for (x, y, w, h) in faces: half = int(w / 2) paddingTop = int(h * 0.2) paddingBottom = int(h * 0.45) if debug: color = (0, 255, 0) top = y+paddingTop bottom = y+h - paddingBottom cv2.rectangle(frame, (x , top), (x+half, bottom), color, 2) cv2.rectangle(frame, (x+half, top), (x+w , bottom), color, 2) def processEye(eye, ex, ey, ew, eh): if debug: cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), (0, 0, 255), 2) eye_copy = cv2.resize(eye, (ew, eh)) roi_eye = frame[ey:ey+ew, ex:ex+ew] overlayImage(roi_eye, eye_copy) def processListOfEyes(eyes, offset, last_eye): for (ex, ey, ew, eh) in eyes: ex, ey = toolbox.getBetterPosition((ex, ey, ew, eh), last_eye) ew, eh = int(w * 0.25), int(w * 0.25) processEye(eye, x+ex+offset, y+ey+paddingTop, ew, eh) last_eye[:] = [] last_eye.extend((ex, ey, ew, eh)) # Define 2 areas for the left and right part of the face face_gray_left = gray[y+paddingTop:y+h - paddingBottom, x:x+half] face_gray_right = gray[y+paddingTop:y+h - paddingBottom, x+half:x+w] # Get 2 lists of detected eyes eyes_left = eye_cascade.detectMultiScale(face_gray_left, 1.02, 5, # minSize=(int(w / 8), int(h / 8)), # maxSize=(int(w / 3), int(h / 3)) ) eyes_right = eye_cascade.detectMultiScale(face_gray_right, 1.02, 5, # minSize=(int(w / 8), int(h / 8)), # maxSize=(int(w / 3), int(h / 3)) ) # Draw eyes from list of detected eyes processListOfEyes(eyes_left, 0, last_eye_left) processListOfEyes(eyes_right, half, last_eye_right) # If no eye was detected draw the eye from the previous frame if last_eye_left is not None and len(eyes_left) == 0: ex, ey, ew, eh = last_eye_left processEye(eye, x+ex, y+ey+paddingTop, ew, eh) if last_eye_right is not None and len(eyes_right) == 0: ex, ey, ew, eh = last_eye_right processEye(eye, x+ex+half, y+ey+paddingTop, ew, eh) # Write the frame to the output video file video.write(frame) # Update the progress bar frame_count += 1 progress = frame_count / total_frames progressbar.update(progress)
def serialize(self, serializer): pass if __name__ == "__main__": import progressbar import time window = 8 batch_size = 10000 sequence_path = "/home/ubuntu/data/word2vec/small/jawiki-wakati-index-sequence.txt" total_size_path = "/home/ubuntu/data/word2vec/small/total_size.pkl" witerator = WindowIterator(window, batch_size, index_sequence_file_path=sequence_path, total_size_path=total_size_path, repeat=True) print("witerator.total_size:{}".format(witerator.total_size)) upper_count = 1 count_scale = 100 progressbar = progressbar.ProgressBar(maxvalue=int(count_scale * upper_count)) indices = [index for index in range(-window, window + 1) if index != 0] for center, context in witerator: progressbar.update(int(count_scale * witerator.epoch_detail)) if upper_count == witerator.epoch_detail: break
pbar.Percentage(), ' ', pbar.Counter(), '/', str(n), ' ', Display(), ' ', pbar.Bar(marker='-'), ' ', pbar.AdaptiveETA(), ' ', ] pbar = pbar.ProgressBar(widgets=widgets, maxval=n).start() # go johnny go, go! for i, it in enumerate(sets): pbar.update(i) size, storage, complexity, codec, level = it number, repeat = number_repeats[size] codec = codecs[codec] codec.configure(complexity_types[complexity](dataset_sizes[size]), storage_types[storage], level) results['compress'][i] = reduce(vtimeit(codec.compress, setup=codec.compress, before=codec.clean, after=sync, number=number, repeat=repeat)) results['ratio'][i] = codec.ratio() codec.deconfigure() results['decompress'][i] = reduce(vtimeit(codec.decompress,
else: raise RuntimeError("No such size: '%s'" % size) codec = codecs[codec] codec.configure(entropy_types[entropy](dataset_sizes[size]), storage_types[storage], level) results['compress'][i] = reduce(vtimeit(codec.compress, setup=codec.compress, before=codec.clean, after=sync, number=number, repeat=repeat)) results['ratio'][i] = codec.ratio() codec.deconfigure() results['decompress'][i] = reduce(vtimeit(codec.decompress, setup=codec.decompress, number=number, repeat=repeat)) results['dc_no_cache'][i] = reduce(vtimeit(codec.decompress, before=drop_caches, number=number, repeat=repeat)) codec.clean() pbar.update(i) pbar.finish() success = True result_csv = result_file_name + '.csv' results.to_csv(result_csv) print 'results saved to: ' + result_csv