async def _get(url, JSON, original=False, once=False): start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') print("start " + start_time) async with aiohttp.ClientSession() as session: async with session.get(url) as response: req = await response.text() get_time = datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S.%f') print("req " + get_time) eel.info( "要求已得到", "送出時間:" + start_time + "\n取得時間:" + get_time, "console", ) if (JSON == True): try: req = json.loads(req) except: print("error!req is not JSON\n") pass if (not once): if (original != False): data.append({"req": req, "org": original}) else: data.append({"req": req}) else: #once if (original != False): return {"req": req, "org": original} else: return req
def handleinput(name): #name = input("Enter the classifier name\n") path = "images/" + name if not os.path.isdir('images'): os.mkdir('images') try: os.mkdir(path) print("Folder created for : ", name, "\n") except: pass cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480)) eel.info("Press Q to stop taking pictures") while (cap.isOpened()): ret, frame = cap.read() if ret == True: frame = cv2.flip(frame, 1) out.write(frame) cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() out.release() cv2.destroyAllWindows() vidcap = cv2.VideoCapture('output.avi') success, image = vidcap.read() count = 0 success = True while success: success, image = vidcap.read() if (success == True): cv2.imwrite('images/' + name + '/frame%d.jpg' % count, image) # save frame as JPEG file print('Read a new frame: ', success) count += 1 else: pass # Expose this function to Javascript eel.info("succesfully saved for " + name)
def detect_faces(): print('detection started') eel.info("Press ESC to stop") size = 4 webcam = cv2.VideoCapture(0) while True: (rval, im) = webcam.read() im = cv2.flip(im, 1, 0) mini = cv2.resize(im, (int(im.shape[1] / size), int(im.shape[0] / size))) cv2.imwrite('testimage.jpg', im) testimage = "testimage.jpg" text = label_image.main(testimage) text = text.title() font = cv2.FONT_HERSHEY_TRIPLEX cv2.putText(im, text, (100, 100), font, 2, (0, 255, 0), 2) cv2.imshow('Capture', im) key = cv2.waitKey(10) if key == 27: break
def train_images(): eel.info("Seat Back and wait.....") eel.mSpinner() try: os.system( 'python3 retrain.py --output_graph=retrained_graph.pb --output_labels=retrained_labels.txt --architecture=MobileNet_1.0_224 --image_dir=images' ) eel.info("Training is completed") eel.mSpinner() eel.mAddTick() except: eel.info("connect to internet..")
for name, file in mods.items(): globals()[name] = file loaded_time = time.process_time() #================================================ _app_is_done = True main.start() info("all done") load_time = str((done_time - start_time) * 1000) all_done_time = str((loaded_time - start_time) * 1000) eel.sleep(3) eel.info( "調試訊息", '啟動時間:' + str(load_time) + "ms\n總加載時間:" + str(all_done_time) + "ms\n使用端口:" + str(ge["port"]), "console") while True: #設置定時執行 eel.sleep(15) #設置config索引 user_path = path + "data/user_config/" config_users_list = os.listdir(user_path) app_users_list = list(main.app_setting("user_config_index").values()) if (config_users_list != app_users_list): print("set user config index") index_file = {} for account in config_users_list:
def login(steamid, lock=False, _app=False, force=False): eel.info("使用模擬登入", "steamid:" + steamid + "\n強制模式:" + ("是" if force else "否"), "console") acc = call_info(steamid) if (force == False and (steamid in get_client_users())): eel.info("模式切換", "偵測到快取模式\n轉為快取模式", "console") auto_login(steamid, acc["name"], lock) return if (acc == False): eel.info("無帳號", "", "error") if (lock != False): lock.release() return "no_account" key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "SOFTWARE\Valve\Steam", 0, winreg.KEY_QUERY_VALUE) exe, t = winreg.QueryValueEx(key, "SteamExe") winreg.CloseKey(key) si = subprocess.STARTUPINFO() si.dwFlags |= subprocess.STARTF_USESHOWWINDOW if (_app == False): #如果沒有到拾取app eel.info("關閉steam", "", "console") close_steam(exe, si) # str replace password = str.replace(acc["password"], "{", "{{}") password = str.replace(password, " ", "{SPACE}") password = str.replace(password, "+", "{+}") password = str.replace(password, "^", "{^}") password = str.replace(password, "%", "{%}") # set RememberPassword key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "SOFTWARE\Valve\Steam", 0, winreg.KEY_SET_VALUE) winreg.SetValueEx(key, "RememberPassword", 0, winreg.REG_DWORD, 0x00000001) winreg.SetValueEx(key, "AutoLoginUser", 0, winreg.REG_SZ, "") winreg.CloseKey(key) # auto login if (_app == False): app = APP().start(exe) else: app = _app login_gui = app.window(title_re='Steam [^Guard].*', class_name='vguiPopupWindow') try: login_gui.wait("ready", 30) except timings.TimeoutError: eel.info("等待超時", "", "console") del app if (lock != False): lock.release() return "error" eel.info("自動登入", "登入頁面 已就緒\n開始自動輸入", "console") login_gui.set_focus() sleep(.1) if (_app == False): eel.info("自動輸入名稱 [未輸入名稱]", "", "console") keyboard.send_keys(acc["name"] + """{TAB}""") eel.info("自動輸入密碼", "", "console") keyboard.send_keys(password) keyboard.send_keys("""{TAB}{ENTER}""") if (acc["se"] == False): #guard eel.info("無guard", "跳過guard登入頁面", "console") else: eel.info("等待guard", "已添加guard\n自動輸入guard", "console") sa = guard.SteamAuthenticator(acc["se"]) guard_gui = app.window(title_re='Steam Guard - .*', class_name='vguiPopupWindow') guard_gui.wait("ready") guard_gui.set_focus() sleep(.1) code = sa.get_code() keyboard.send_keys(code + """{ENTER}""") eel.info("等待登入頁面被關閉", "", "console") login_gui.wait_not("visible", 60000) eel.info("登入完成", "", "console") del app if (lock != False): lock.release()
def auto_login(steamid, name, lock): eel.info("使用快取登入", "使用者名稱:" + name + "\nsteamid:" + steamid, "console") key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "SOFTWARE\Valve\Steam", 0, winreg.KEY_ALL_ACCESS) winreg.SetValueEx(key, "AutoLoginUser", 0, winreg.REG_SZ, name) winreg.SetValueEx(key, "RememberPassword", 0, winreg.REG_DWORD, 0x00000001) exe, t = winreg.QueryValueEx(key, "SteamExe") winreg.CloseKey(key) si = subprocess.STARTUPINFO() si.dwFlags |= subprocess.STARTF_USESHOWWINDOW eel.info("關閉Steam", "", "console") close_steam(exe, si) eel.info("啟動steam", "", "console") app = APP().start(exe) login_gui = app.window(title_re='Steam [^Guard].*', class_name='vguiPopupWindow') eel.info("等待 '主頁面'", "", "console") wait_time = app_setting("wait_steam_start") eel.info("容許等待時間" + wait_time, "", "console") eel.sleep(int(wait_time)) try: login_gui.wait_not("ready", wait_time) #等待介面 except: eel.info("注意!", "無法使用快取登入\n將使用模擬登入", "console") login(steamid, lock, app, True) else: del app lock.release() eel.info("登入成功", "", "console")
def startLabel(movie_lang, gpu_support, display_frame): global video_path try: if video_path != '': eel.mSpinner() eel.info("Movie statutory labeling started") else: eel.info("select video path") os.system('ffmpeg -i ' + video_path + ' -ab 160k -ac 2 -ar 44100 -vn Audio/' + Path(video_path).stem + '-audio.wav') print(video_path, movie_lang, gpu_support, display_frame) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument( "-m", "--model", type=str, default='./human-activity/resnet-34_kinetics.onnx', help="path to trained human activity recognition model") ap.add_argument( "-c", "--classes", type=str, default='./human-activity/action_recognition_kinetics.txt', help="path to class labels file") ap.add_argument("-vo", "--output", type=str, default="./output.avi", help="Video output name") args = vars(ap.parse_args()) # load the contents of the class labels file, then define the sample # duration (i.e., # of frames for classification) and sample size # (i.e., the spatial dimensions of the frame) CLASSES = open(args["classes"]).read().strip().split("\n") SAMPLE_DURATION = 32 SAMPLE_SIZE = 112 labels = [ 'tasting beer', 'smoking', 'drinking beer', 'driving car', 'driving tractor', 'riding a bike', 'riding scooter', 'smoking hookah', 'riding mountain bike', 'motorcycling' ] riding = [ 'motorcycling', 'riding a bike', 'riding scooter', 'riding mountain bike' ] smoking = ['smoking', 'smoking hookah'] alcohol = ['tasting beer', 'drinking beer'] driving = ['driving car', 'driving tractor'] # load the human activity recognition model print("[INFO] loading human activity recognition model...") neth = cv.dnn.readNet(args["model"]) # Load the weights and configutation to form the pretrained YOLOv3 model for smoking detection nethelmet = cv.dnn.readNetFromDarknet( './yolov3-coco/yolov3-helmet.cfg', './yolov3-coco/helmet6000.weights') netsmoking = cv.dnn.readNetFromDarknet( './yolov3-coco/yolov3-smoking.cfg', './yolov3-coco/yolov3hs.weights') netseatbelt = cv.dnn.readNetFromDarknet( './yolov3-coco/yolov3-custom1.cfg', './yolov3-coco/yoloseatbelt.weights') if gpu_support: print("[INFO] setting preferable backend and target to CUDA...") neth.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA) neth.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA) print("[INFO] setting preferable backend and target to CUDA...") nethelmet.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA) nethelmet.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA) def activity_detect(frames): # now that our frames array is filled we can construct our blob blob = cv.dnn.blobFromImages(frames, 1.0, (SAMPLE_SIZE, SAMPLE_SIZE), (114.7748, 107.7354, 99.4750), swapRB=True, crop=True) blob = np.transpose(blob, (1, 0, 2, 3)) blob = np.expand_dims(blob, axis=0) # pass the blob through the network to obtain our human activity # recognition predictions neth.setInput(blob) outputs = neth.forward() z = outputs.argsort()[-5:][0][-5:] activityList = [CLASSES[x] for x in z] print(activityList) print(np.argmax(outputs)) return CLASSES[np.argmax(outputs)], activityList def writeFrame(frame, fps): global writer if writer is None: # Initialize the video writer fourcc = cv.VideoWriter_fourcc(*"MJPG") writer = cv.VideoWriter(args["output"], fourcc, fps, (frame.shape[1], frame.shape[0]), True) writer.write(frame) def elseFrame(frames, display_frame): for frame in frames: if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) def checkActivity(list1, list2): check = any(item in list1 for item in list2) return check # grab a pointer to the input video stream print("[INFO] accessing video stream...") vid = cv.VideoCapture(video_path) fps = vid.get(cv.CAP_PROP_FPS) print("Fps is :", fps) firstLabel = '' secondLabel = '' thirdLabel = '' # loop until we explicitly break from it while True: # initialize the batch of frames that will be passed through the # model frames = [] # loop over the number of required sample frames for i in range(0, SAMPLE_DURATION): # read a frame from the video stream (grabbed, frame) = vid.read() # if the frame was not grabbed then we've reached the end of # the video stream so exit the script if not grabbed: break # otherwise, the frame was read so resize it and add it to # our frames list #frame = imutils.resize(frame, width=400) frames.append(frame) if (len(frames) > 31): firstLabel, activityList1 = activity_detect(frames[:16]) secondLabel, activityList2 = activity_detect(frames[16:]) print(firstLabel) print(secondLabel) else: for frame in frames: writeFrame(frame, fps) break if (checkActivity(labels, activityList1) and checkActivity( labels, activityList2)) or (firstLabel == secondLabel) or ( firstLabel == thirdLabel) or (firstLabel in alcohol) or ( secondLabel in alcohol) or (firstLabel in smoking) or (secondLabel in smoking): thirdLabel = secondLabel print(thirdLabel) label = firstLabel if (label in riding): detect = yolo_detect(frames, label, nethelmet) if detect == 1: eel.info("Riding without helmet detected") for i in range(0, 130): (grabbed, frame) = vid.read() if not grabbed: break frames.append(frame) for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/helmet.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) else: elseFrame(frames, display_frame) elif (firstLabel in smoking) or (secondLabel in smoking): detect = yolo_detect(frames, label, netsmoking) print("detect is :", detect) if detect == 2: eel.info("Smoking detected") for i in range(0, 84): (grabbed, frame) = vid.read() if not grabbed: break frames.append(frame) for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/smoke.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) else: elseFrame(frames, display_frame) elif label in alcohol: detect = yolo_detect(frames, label, netsmoking) for i in range(0, 84): (grabbed, frame) = vid.read() if not grabbed: break frames.append(frame) if detect == 2: eel.info("alcohol & smoking detected") for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/smokealcohol.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) else: for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/alcohol.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) elif label in driving: detect = yolo_detect(frames, label, netseatbelt) print("detect is", detect) if detect == 3: eel.info("driving without seatbelt detection") for i in range(0, 84): (grabbed, frame) = vid.read() if not grabbed: break frames.append(frame) for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/seatbelt.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) else: elseFrame(frames, display_frame) else: elseFrame(frames, display_frame) elif (firstLabel not in smoking) and (secondLabel not in smoking): detect = yolo_detect(frames, label, netsmoking) print("detect is :", detect) if detect == 2: eel.info("Smoking detected") for i in range(0, 84): (grabbed, frame) = vid.read() if not grabbed: break frames.append(frame) for frame in frames: frame = add_warning( frame, 'Images/statutory/' + movie_lang + '/smoke.png') if display_frame: cv.imshow("Statutory Labeling", frame) key = cv.waitKey(1) & 0xFF writeFrame(frame, fps) else: elseFrame(frames, display_frame) else: elseFrame(frames, display_frame) eel.mSpinner() if video_path != '': eel.mAddTick() writer.release() vid.release() if display_frame: cv.destroyWindow("Statutory Labeling") eel.info('Output file is saved to: Video/' + Path(video_path).stem + '-Ouput.mkv') os.system('ffmpeg -i output.avi -i Audio/' + Path(video_path).stem + '-audio.wav -c copy Video/' + Path(video_path).stem + '-Ouput.mkv') print('Output file is saved to: Video/' + Path(video_path).stem + '-Ouput.mkv') print("Process finished") if (os.path.isfile('Audio/' + Path(video_path).stem + '-audio.wav')): os.system('rm Audio/' + Path(video_path).stem + '-audio.wav') except: print("An error occured") eel.mSpinner() eel.mAddCross() eel.info("An error occured")