def main(): #初始化acl acl_resource = AclResource() acl_resource.init() #创建一个检测网络实例,当前使用vgg_ssd网络.当更换检测网络时,在此实例化新的网络 detector = VggSsd(acl_resource, MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) video_decoders, postprocessor = create_threads(detector) if video_decoders is None: log_error("Please check the configuration in %s is valid" % (FACE_DETEC_CONF)) return while True: all_process_fin = True for decoder in video_decoders: ret, data = decoder.get_data() if ret == False: log_info("Read data ret ", ret) continue if data: detect_results = detector.execute(data) postprocessor.process(data, detect_results) all_process_fin = False if all_process_fin: log_info("all video decoder finish") break postprocessor.exit() log_info("sample execute end")
def execute(model_path, frames_input_src, output_dir): ## Initialization ## #initialize acl runtime acl_resource = AclResource() acl_resource.init() ## Prepare Model ## # parameters for model path and model inputs model_parameters = { 'model_dir': model_path, 'width': 368, # model input width 'height': 368, # model input height } # perpare model instance: init (loading model from file to memory) # model_processor: preprocessing + model inference + postprocessing model_processor = ModelProcessor(acl_resource, model_parameters) ## Get Input ## # Read the image input using OpenCV img_original = cv2.imread(args.frames_input_src) ## Model Prediction ## # model_processor.predict: processing + model inference + postprocessing # canvas: the picture overlayed with human body joints and limbs canvas = model_processor.predict(img_original) # Save the detected results cv2.imwrite(os.path.join(args.output_dir, 'Result_Pose.jpg'), canvas)
def execute(model_path, frames_input_src, output_dir): ## Initialization ## #initialize acl runtime acl_resource = AclResource() acl_resource.init() ## Prepare Model ## # parameters for model path and model inputs model_parameters = { 'model_dir': model_path, 'width': 224, # model input width 'height': 224, # model input height } # perpare model instance: init (loading model from file to memory) # model_processor: preprocessing + model inference + postprocessing model_processor = ModelProcessor(acl_resource, model_parameters) ## Get Input ## # Read the image input using OpenCV; OpenCV imread as BGR img_original = cv2.imread(args.frames_input_src) ## Model Prediction ## # model_processor.predict: processing + model inference + postprocessing # category: the category with hightest prob. category = model_processor.predict(img_original) # Save the detected results cv2.putText(img_original, category, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) cv2.imwrite(os.path.join(args.output_dir, 'Result.jpg'), img_original)
def main(): """ main """ if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() crowdcount = CrowdCount(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = crowdcount.init() if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")): os.mkdir(os.path.join(SRC_PATH, "../outputs")) image_dir = sys.argv[1] images_list = [os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in constants.IMG_EXT] for image_file in images_list: image = AclImage(image_file) crop_and_paste_image = crowdcount.pre_process(image) print("pre process end") result = crowdcount.inference([crop_and_paste_image]) result_img_encode = crowdcount.post_process(result, image_file) return result_img_encode
def main(): """ acl resource initialization """ if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) #ACL resource initialization acl_resource = AclResource() acl_resource.init() model = Model(model_path) images_list = [ os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR) if os.path.splitext(img)[1] in IMG_EXT ] for pic in images_list: orig_shape, orig_l, l_data = preprocess(pic) result_list = model.execute([ l_data, ]) postprocess(result_list, pic, orig_shape, orig_l) break print("Execute end")
def main(): """ Program execution with picture directory parameters """ if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() #Instance classification detection, pass into the OM model storage path, model input width and height parameters classify = Classify(acl_resource, MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) #From the parameters of the picture storage directory, reasoning by a picture image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in IMG_EXT ] #Create a directory to store the inference results if not os.path.isdir('../outputs'): os.mkdir('../outputs') resized_image_list = [] batch_image_files = [] num = 0 batch_amount = len(images_list) // BATCH left = len(images_list) % BATCH for image_file in images_list: num += 1 #Read the pictures image = AclImage(image_file) image_dvpp = image.copy_to_dvpp() #preprocess image resized_image = classify.pre_process(image_dvpp) print("pre process end") batch_image_files.append(image_file) resized_image_list.append(resized_image) if batch_amount > 0: #Each set of BATCH pictures, reasoning and post-processing if num == BATCH: #Reasoning pictures result = classify.inference(resized_image_list, BATCH) #process inference results classify.post_process(result, batch_image_files, BATCH) batch_amount -= 1 num = 0 batch_image_files = [] resized_image_list = [] else: #remaining images are inferred and post-processed if num == left: #Reasoning pictures result = classify.inference(resized_image_list, BATCH) #The inference results are processed classify.post_process(result, batch_image_files, left)
def execute(model_path): ## Initialization ## #initialize acl runtime acl_resource = AclResource() acl_resource.init() ## Prepare Model ## # parameters for model path and model inputs model_parameters = { 'model_dir': model_path, 'width': 368, # model input width 'height': 368, # model input height } # perpare model instance: init (loading model from file to memory) # model_processor: preprocessing + model inference + postprocessing model_processor = ModelProcessor(acl_resource, model_parameters) ## Get Input ## # Initialize Camera cap = Camera(id = 0, fps = 10) ## Set Output ## # open the presenter channel chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF) if chan == None: print("Open presenter channel failed") return while True: ## Read one frame from Camera ## img_original = cap.read() if not img_original: print('Error: Camera read failed') break # Camera Input (YUV) to RGB Image image_byte = img_original.tobytes() image_array = np.frombuffer(image_byte, dtype=np.uint8) img_original = YUVtoRGB(image_array) img_original = cv2.flip(img_original,1) ## Model Prediction ## # model_processor.predict: processing + model inference + postprocessing # canvas: the picture overlayed with human body joints and limbs canvas = model_processor.predict(img_original) ## Present Result ## # convert to jpeg image for presenter server display _,jpeg_image = cv2.imencode('.jpg',canvas) # construct AclImage object for presenter server jpeg_image = AclImage(jpeg_image, img_original.shape[0], img_original.shape[1], jpeg_image.size) # send to presenter server chan.send_detection_data(img_original.shape[0], img_original.shape[1], jpeg_image, []) # release the resources cap.release()
def main(): acl_resource = AclResource() acl_resource.init() detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT) model = Model(MODEL_PATH) chan = presenteragent.presenter_channel.open_channel(MASK_DETEC_CONF) if chan is None: print("Open presenter channel failed") return lenofUrl = len(sys.argv) if lenofUrl <= 1: print("[ERROR] Please input mp4/Rtsp URL") exit() elif lenofUrl >= 3: print("[ERROR] param input Error") exit() URL = sys.argv[1] URL1 = re.match('rtsp://', URL) URL2 = re.search('.mp4', URL) if URL1 is None and URL2 is None: print("[ERROR] should input correct URL") exit() cap = video.AclVideo(URL) while True: # Read a frame ret, image = cap.read() if ret != 0: print("read None image, break") break #pre process model_input = detect.pre_process(image) if model_input is None: print("Pre process image failed") break # inference result = model.execute(model_input) if result is None: print("execute mode failed") break # post process jpeg_image, detection_list = detect.post_process(result, image) if jpeg_image is None: print("The jpeg image for present is None") break chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT, jpeg_image, detection_list)
def main(): """ acl resource initialization """ acl_resource = AclResource() acl_resource.init() #load model model = Model(model_path) chan = presenteragent.presenter_channel.open_channel(COLORIZATION_CONF) if chan is None: print("Open presenter channel failed") return lenofUrl = len(sys.argv) if lenofUrl <= 1: print("[ERROR] Please input mp4/Rtsp URL") exit() elif lenofUrl >= 3: print("[ERROR] param input Error") exit() URL = sys.argv[1] URL1 = re.match('rtsp://', URL) URL2 = re.search('.mp4', URL) if URL1 is None and URL2 is None: print("[ERROR] should input correct URL") exit() cap = cv.VideoCapture(URL) #Gets the total frames frames_num = cap.get(7) currentFrames = 0 while True: #read image ret, frame = cap.read() if ret is not True: print("read None image, break") break if currentFrames == frames_num - 1: currentFrames = 0 cap.set(1, 0) currentFrames += 1 #Gets the L channel value orig_shape, orig_l, l_data = preprocess(frame) result_list = model.execute([l_data,]) result_jpeg = postprocess(result_list, orig_shape, orig_l) chan.send_image(orig_shape[0], orig_shape[1], result_jpeg)
def main(): """ main """ if (len(sys.argv) != 2): print("Please input video path") exit(1) #acl init acl_resource = AclResource() acl_resource.init() #load model model = Model(MODEL_PATH) #open video video_path = sys.argv[1] print("open video ", video_path) cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) Width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) Height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) lf.set_img_size((Width, Height)) #create output directory if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) output_Video = os.path.basename(video_path) output_Video = os.path.join(OUTPUT_DIR, output_Video) fourcc = cv2.VideoWriter_fourcc( *'mp4v') # DIVX, XVID, MJPG, X264, WMV1, WMV2 outVideo = cv2.VideoWriter(output_Video, fourcc, fps, (Width, Height)) # Read until video is completed while (cap.isOpened()): ret, frame = cap.read() if ret == True: #preprocess orig_shape, rgb_img, framecv = preprocess(frame) #inference result_list = model.execute([ rgb_img, ]) #postprocess frame = postprocess(result_list, framecv, orig_shape) outVideo.write(frame) # Break the loop else: break cap.release() outVideo.release() print("Execute end")
def main(): """ main """ SRC_PATH = os.path.realpath(__file__).rsplit("/", 1)[0] MODEL_PATH = "../model/deploy_vel.om" MODEL_WIDTH = 512 MODEL_HEIGHT = 512 # With picture directory parameters during program execution if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() single_image_dehaze = SingleImageDehaze(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = single_image_dehaze.init() utils.check_ret("single_image_dehaze init ", ret) image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in constants.IMG_EXT ] # Create a directory to save inference results if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")): os.mkdir(os.path.join(SRC_PATH, "../outputs")) for image_file in images_list: image_name = image_file.split('/')[-1] # read image im = Image.open(image_file) # Preprocess the picture resized_image = single_image_dehaze.pre_process(im) # Inferencecd result = single_image_dehaze.inference([ resized_image, ]) # # Post-processing single_image_dehaze.post_process(result, image_name)
def main(): """ main """ image_dir = os.path.join(currentPath, "data") images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in constants.IMG_EXT ] acl_resource = AclResource() acl_resource.init() hpa = Hpa(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = hpa.init() utils.check_ret("hpa init ", ret) # Create a directory to save inference results if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) for image_file in images_list: image_name = os.path.join(image_dir, os.path.basename(image_file)) print('====' + image_name + '====') # read image im = Image.open(image_name) if len(im.split()) != 3: print('warning: "{}" is not a color image and will be ignored'. format(image_name)) continue # Preprocess the picture resized_image = hpa.pre_process(im) # Inferencecd result = hpa.inference([ resized_image, ]) # # Post-processing hpa.post_process(result, image_name)
def main(): """ acl resource initialization """ acl_resource = AclResource() acl_resource.init() model = Model(model_path) with codecs.open(dict_path, 'r', 'utf-8') as reader: for line in reader: token = line.strip() token_dict[token] = len(token_dict) with open(sample_path, "r") as f: text = f.read() with open(label_path, "r", encoding="utf-8") as f: label_dict = json.loads(f.read()) X1, X2 = preprocess(text) X1 = np.ascontiguousarray(X1, dtype='float32') X2 = np.ascontiguousarray(X2, dtype='float32') X1 = np.expand_dims(X1, 0) X2 = np.expand_dims(X2, 0) s_time = time.time() result_list = model.execute([X1, X2]) e_time = time.time() print(result_list) y = postprocess(result_list) if not os.path.exists(output_dir): os.mkdir(output_dir) save_to_file(output_dir + 'prediction_label.txt', label_dict[str(y)]) print("Original text: %s" % text) print("Prediction label: %s" % label_dict[str(y)]) print("Cost time:", e_time - s_time) print("Execute end")
def main(): """ main """ if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() gesture = Gesture(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = gesture.init() utils.check_ret("Gesture.init ", ret) image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in const.IMG_EXT ] if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")): os.mkdir(os.path.join(SRC_PATH, "../outputs")) for image_file in images_list: image = AclImage(image_file) resized_image = gesture.pre_process(image) print("pre process end") result = gesture.inference([ resized_image, ]) gesture.post_process(result, image_file)
def process(self): """ Analyze and encrypt web pages and their parts or encrypt files This is quite confusing, what we going to do? It is not needed to describe Algorithm compatibility check and Resource directory check. What we have to describe is encryption of some files. First step: - we have to encrypt specified elements of web pages and this new ciphertext save to json file with his own ID - we have to generate new encrypted element tag and replace this original content by this tag Second step: - we have to encrypt "secret" of each ACL resource (list item) which contains cryptokeys of encrypted web pages - after that we have to save ACL to file and store new generated cryptokeys in some variable Third step: - we have to encrypt "secret" of each role which contains cryptokeys of encrypted ACL reources - after that we have to save roles to file and store new generated cryptokeys in some variable Fourth step: - this is almost end of process - we have to encrypt user confident data, so it is encrypted by specified pass or pub key certificate Fifth step: - we have to create version file. Why? Because of cache. We caching some files as config, acl or roles, so we have to know when reload these files - version file contains olny time constant, which describes last modification of resources and config files """ # Algorithm compatibility check if not self.algorithm_compatibility_check(): print("Error: Some of specified algorithm is not supported.") self.print_algorithm_support() exit(100) # Roles dependancy check dcheck = self.roles.role_dependancy_check() if dcheck != None: print("Error: Inheritance miss match! Role not found: " + dcheck) exit(100) # Resources directory check if not os.path.isdir(self.config.uri_base + "/" + self.config.uri_resources_dir): os.mkdir(self.config.uri_base + "/" + self.config.uri_resources_dir) if self.verbose: print("[PROCESS] Directory '" + self.config.uri_resources_dir + "' has been created") # Init variables content = None perm = None oda = None # First step: Encrypt web pages or their parts and save crytpo keys and info in ACL for loc in self.web_pages: error = False # At first try open as BOM, as a second try standard UTF-8 web_page = self.fileworker.open_file(loc) if web_page == None: print("Warning: Can't process " + loc) error = True if error == False: self.process_web_page(loc, web_page.read()) # Second step: Generate ACL file acl_cryptokeys = [] for resource in self.acl.resources: # resource.secret contains cryptokey from web page promise = self.encrypt( json.dumps(resource.secret) ) # ACL resource secret encryption (json.dumps --> json in string form) self.acl.add_encrypted_resource( AclResource(resource.id, resource.uri, resource.access, resource.permission, promise["ciphertext"])) acl_cryptokeys.append({ "resource_id": resource.id, "permission": resource.permission, "secret": promise["secret"] }) self.acl.save() if self.verbose: print("[PROCESS] ACL has been generated") # Third step: Generate Roles file for ack in acl_cryptokeys: # ack["secret"] contains cryptokey from ACL resource self.roles.add_ack_to_role( ack["resource_id"], ack["permission"], ack["secret"]) # ack == ACL cryptokey from ACL resource self.roles.compute_heredity( ) # Some roles has heredity, so we have to copy some resources to ensure access role_cryptokeys = [] for role in self.roles.list: promise = self.encrypt(json.dumps( role.secret)) # RoleList Role secret encryption self.roles.add_encrypted_role( Role(role.name, role.inherits, promise["ciphertext"])) role_cryptokeys.append({ "role": role.name, "secret": promise["secret"] }) self.roles.save() if self.verbose: print("[PROCESS] Roles has been generated") # Fourth step: Generate user database files and encrypt them with specific keys for rck in role_cryptokeys: #rck["secret"] contains cryptokey from Role self.users.add_rck_to_user(rck["role"], rck["secret"]) for user in self.users.list: if user.key_type == "password": # PBKDF2 #promise_pwd = self.pbkdf2(user.password) #promise = self.encrypt(json.dumps(user.secret), promise_pwd["ciphertext"], user.key_type) # Users User secret encryption by password #self.users.add_encrypted_user(EncryptedUser(user.username, user.roles, promise["ciphertext"], promise["secret"]["algorithm"], promise_pwd["salt"])) # Simple SHA-256 promise_pwd = self.sha256(user.password) promise = self.encrypt( json.dumps(user.secret), promise_pwd, user.key_type) # Users User secret encryption by password self.users.add_encrypted_user( EncryptedUser(user.username, user.roles, promise["ciphertext"], promise["secret"]["algorithm"], "")) elif user.key_type == "certificate": promise = self.encrypt(json.dumps( user.secret)) # Users User secret encryption by password encryptedUser = EncryptedUser(user.username, user.roles, promise["ciphertext"], promise["secret"]["algorithm"], "") promisersa = self.encrypt(promise["secret"]["key"], user.certificate, "certificate") encryptedUser.key_secret = promisersa encryptedUser.key_algorithm = {"name": "RSA-OAEP"} self.users.add_encrypted_user(encryptedUser) else: print( "[PROCESS][WARNING] Something goes wrong with user encryption. Be prepared for everything." ) self.users.save() if self.verbose: print("[PROCESS] Users has been generated") # We have to generate version.json which specify when was everything generated # (why? because we want to sometimes) refresh all cached data! version_file = codecs.open( self.config.uri_base.replace("\\", "/") + "/" + self.config.uri_version, "w+") json.dump( { "page-version": datetime.datetime.timestamp(datetime.datetime.now()) }, version_file) version_file.close() if self.verbose: print("[PROCESS] Version file has been generated")
def process_web_page(self, loc, web_page): """ Process web page (get data and perform encryption) """ if self.verbose: print("[PROCESS][PAGE] " + loc) # Get content for encryption contents = re.findall(r'<!--EE:BEGIN-->(.*?)<!--EE:END-->', web_page, re.DOTALL) if self.verbose and len(contents) == 0: print("[PROCESS][PAGE][INFO] No encrypted element found") # Encryption element id in current document ee_id = 1 page_divisions = [] for ee in contents: if self.verbose: print( "[PROCESS][PAGE][INFO] Encrypted element has been found! ID: " + str(ee_id)) # Get post processing info perm_list = re.findall(r'<!--PERM:(.*?)-->', ee) oda_list = re.findall(r'<!--ODA:(.*?)-->', ee) # Remove remove post processing info ee = re.sub(r'<!--PERM:(.*?)-->', '', ee) ee = re.sub(r'<!--ODA:(.*?)-->', '', ee) resource_uri = re.sub(self.dir.replace("\\", "/") + '/', '', loc) resource_id = self.sha256(resource_uri + '.' + str(ee_id)) # Get permissions permissions_chaos = perm_list[0].split(",") permissions = [] for perm in permissions_chaos: permissions.append(perm.rstrip().lstrip()) # Get ODA (on denied action) oda = oda_list[0].rstrip().lstrip() promise = self.encrypt(ee) # Define access if len(permissions) == 0: access = "public" else: access = "private" # Add new ACL resources self.acl.add_resource( AclResource(resource_id, resource_uri, access, permissions, promise["secret"])) page_division = { "ee_id": ee_id, "resource_id": resource_id, "ciphertext": promise["ciphertext"], "oda": oda } page_divisions.append(page_division) if self.verbose: print("[PROCESS][PAGE][INFO] Resource has been saved! ID: " + resource_id) ee_id += 1 # Update web page and save ciphertext file self.save_wp(loc, web_page, page_divisions) if self.verbose: print("[PROCESS][PAGE][INFO] File has been updated") return
def execute(model_path, frames_input_src, output_dir, is_presenter_server): ## Initialization ## #initialize acl runtime acl_resource = AclResource() acl_resource.init() ## Prepare Model ## # parameters for model path and model inputs model_parameters = { 'model_dir': model_path, 'width': 368, # model input width 'height': 368, # model input height } # perpare model instance: init (loading model from file to memory) # model_processor: preprocessing + model inference + postprocessing model_processor = ModelProcessor(acl_resource, model_parameters) ## Get Input ## # Read the video input using OpenCV cap = cv2.VideoCapture(frames_input_src) ## Set Output ## if is_presenter_server: # if using presenter server, then open the presenter channel chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF) if chan == None: print("Open presenter channel failed") return else: # if saving result as video file (mp4), then set the output video writer using opencv video_output_path = '{}/demo-{}-{}.mp4'.format( output_dir, os.path.basename(frames_input_src), str(random.randint(1, 100001))) video_writer = cv2.VideoWriter(video_output_path, 0x7634706d, 25, (1280, 720)) if video_writer == None: print('Error: cannot get video writer from openCV') while (cap.isOpened()): ## Read one frame of the input video ## ret, img_original = cap.read() if not ret: print('Cannot read more, Reach the end of video') break ## Model Prediction ## # model_processor.predict: processing + model inference + postprocessing # canvas: the picture overlayed with human body joints and limbs canvas = model_processor.predict(img_original) ## Present Result ## if is_presenter_server: # convert to jpeg image for presenter server display _, jpeg_image = cv2.imencode('.jpg', canvas) # construct AclImage object for presenter server jpeg_image = AclImage(jpeg_image, img_original.shape[0], img_original.shape[1], jpeg_image.size) # send to presenter server chan.send_detection_data(img_original.shape[0], img_original.shape[1], jpeg_image, []) else: # save to video video_writer.write(canvas) # release the resources cap.release() if not is_presenter_server: video_writer.release()
def main(opt): # Step 1: initialize ACL and ACL runtime acl_resource = AclResource() # 1.2: one line of code, call the 'init' function of the AclResource object, to initilize ACL and ACL runtime acl_resource.init() # Step 2: Load models mot_model = Model(acl_resource, 'model/dlav0.om') # Create output dir if not exist; default outputs result_root = opt.output_root if opt.output_root != '' else '.' mkdir_if_missing(result_root) video_name = os.path.basename(opt.input_video).replace(' ', '_').split('.')[0] # setup dataloader, use LoadVideo or LoadImages dataloader = LoadVideo(opt.input_video, (1088, 608)) # result_filename = os.path.join(result_root, 'results.txt') frame_rate = dataloader.frame_rate # dir for output images; default: outputs/'VideoFileName' save_dir = os.path.join(result_root, video_name) if save_dir and os.path.exists(save_dir) and opt.rm_prev: shutil.rmtree(save_dir) mkdir_if_missing(save_dir) # initialize tracker tracker = JDETracker(opt, mot_model, frame_rate=frame_rate) timer = Timer() results = [] # img: h w c; 608 1088 3 # img0: c h w; 3 608 1088 for frame_id, (path, img, img0) in enumerate(dataloader): if frame_id % 20 == 0: print('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking, start tracking timer timer.tic() # list of Tracklet; see multitracker.STrack online_targets = tracker.update(np.array([img]), img0) # prepare for drawing, get all bbox and id online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) timer.toc() # draw bbox and id online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
def main(): acl_resource = AclResource() acl_resource.init() #Create an object to perform detection preprocessing, postprocessing and so on detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT) #Load the pre-trained model from path model = Model(MODEL_PATH) #Connect to the Presenter Server according to the config file chan = presenteragent.presenter_channel.open_channel(MASK_DETEC_CONF) if chan == None: print("Open presenter channel failed") return #Start the Pi camera on the Atlas 200DK, input argument for the camera port(0 or 1) cap = Camera(0) while True: #Read an image frame from the camera image = cap.read() if image is None: print("read None image, break") break #Image preprocessing model_input = detect.pre_process(image) if model_input == None: print("Pre process image failed") break #Send the data to the model for detection result = model.execute(model_input) if result is None: print("execute mode failed") break #Post-process the detection result jpeg_image, detection_list = detect.post_process(result, image) for i in range(len(detection_list)): #Print the detection result, the detection box coordinates, #detected label, confidence print(detection_list[i].box.lt.x, detection_list[i].box.lt.y, detection_list[i].box.rb.x, detection_list[i].box.rb.y, detection_list[i].result_text, detection_list[i].confidence) ##################################################################################################################################################################### d = math.sqrt( pow(detection_list[i].box.lt.x - detection_list[i].box.rb.x, 2) + pow(detection_list[i].box.lt.y - detection_list[i].box.rb.y, 2)) if (d > 80): msg = detection_list[i].result_text bytesToSend = str.encode(msg) serverAddressPort = ("192.168.1.22", 20001) bufferSize = 1024 UDPClientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) UDPClientSocket.sendto(bytesToSend, serverAddressPort) for j in range(i + 1, len(detection_list)): x_cor_1 = detection_list[i].box.lt.x + abs( (detection_list[i].box.lt.x - detection_list[i].box.rb.x)) / 2 y_cor_1 = detection_list[i].box.lt.y + abs( (detection_list[i].box.lt.y - detection_list[i].box.rb.y)) / 2 x_cor_2 = detection_list[j].box.lt.x + abs( (detection_list[j].box.lt.x - detection_list[j].box.rb.x)) / 2 y_cor_2 = detection_list[j].box.lt.y + abs( (detection_list[j].box.lt.y - detection_list[j].box.rb.y)) / 2 width_1 = abs(detection_list[i].box.lt.x - detection_list[i].box.rb.x) width_2 = abs(detection_list[j].box.lt.x - detection_list[j].box.rb.x) width_thrs = abs(min(width_1, width_2) * 3) diss = math.sqrt( pow(x_cor_1 - x_cor_2, 2) + pow(y_cor_1 - y_cor_2, 2)) #print("Too close!!!!!!", j , "distance:",diss,"threshhold:",width_thrs) if width_1 > 50 and width_2 > 50: if diss < width_thrs: msg = "too_close" bytesToSend = str.encode(msg) serverAddressPort = ("192.168.1.22", 20001) bufferSize = 1024 UDPClientSocket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) UDPClientSocket.sendto(bytesToSend, serverAddressPort) ######################################################################################################################################################## if jpeg_image == None: print("The jpeg image for present is None") break #Send the result to the Present Server to display onto the browser chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT, jpeg_image, detection_list)
def execute(model_path): ## Initialization ## #initialize acl runtime acl_resource = AclResource() acl_resource.init() # load offline model for face detection model_face = Model(acl_resource, MODEL_PATH_FACE) model_head_pose = Model(acl_resource, MODEL_PATH_HEAD_POSE) ## Prepare Model ## # parameters for model path and model inputs model_parameters = { 'model_dir': model_path, 'width': 368, # model input width 'height': 368, # model input height } # perpare model instance: init (loading model from file to memory) # model_processor: preprocessing + model inference + postprocessing model_processor = ModelProcessor(acl_resource, model_parameters) last_five_frame_result = [] # Initialize Camera cap = Camera(id = 0, fps = 10) # Read reference images img_left1 = cv2.imread(LEFT1_PATH) img_left2 = cv2.imread(LEFT2_PATH) img_right1 = cv2.imread(RIGHT1_PATH) img_right2 = cv2.imread(RIGHT2_PATH) img_stop = cv2.imread(STOP_PATH) # Get reference output canvas_left1,joint_list_left1 = model_processor.predict(img_left1) canvas_left2,joint_list_left2 = model_processor.predict(img_left2) canvas_right1,joint_list_right1 = model_processor.predict(img_right1) canvas_right2,joint_list_right2 = model_processor.predict(img_right2) canvas_stop,joint_list_stop = model_processor.predict(img_stop) # Get angles from reference images angle_left1=getangle(joint_list_left1) angle_left2=getangle(joint_list_left2) angle_right1=getangle(joint_list_right1) angle_right2=getangle(joint_list_right2) angle_stop=getangle(joint_list_stop) # Initialize count countleft=0 countright=0 countstop=0 ## Presenter Server Output ## chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF) if chan == None: print("Open presenter channel failed") return predict = StateMachine() while True: ## Read one frame of the input video ## img_original = cap.read() if not img_original: print('Error: Camera read failed') break ## HEAD POSE BEGIN ## # Camera Input (YUV) to RGB Image image_byte = img_original.tobytes() image_array = np.frombuffer(image_byte, dtype=np.uint8) img_original = YUVtoRGB(image_array) img_original = cv2.flip(img_original, -1) # Make copy of image for head model processing and body model processing img_bodypose = copy.deepcopy(img_original) img_headpose = copy.deepcopy(img_original) ## Model Prediction ## # model_processor.predict: processing + model inference + postprocessing # canvas: the picture overlayed with human body joints and limbs # img_bodypose is modified with skeleton canvas, joint_list_input = model_processor.predict(img_bodypose) angle_input=getangle(joint_list_input) dif5=abs(angle_input-angle_left1) dif6=abs(angle_input-angle_left2) dif7=abs(angle_input-angle_right1) dif8=abs(angle_input-angle_right2) dif9=abs(angle_input-angle_stop) result = "invalid" # last_five_result = "invalid" if all( i < 25 for i in dif5): result = "left1" elif all( i < 25 for i in dif6): result = "left2" elif all( i < 25 for i in dif7): result = "right1" elif all( i < 25 for i in dif8): result = "right2" elif all( i < 25 for i in dif9): result = "stop" font = cv2.FONT_HERSHEY_SIMPLEX bottomLeftCornerOfText = (10, 100) fontScale = 1 fontColor = (255,255,255) lineType = 2 cv2.putText(img_bodypose, result, bottomLeftCornerOfText, font, fontScale, fontColor, lineType) ## FACE DETECTION MODEL BEGIN ## input_image = PreProcessing_face(img_headpose) face_flag = False try: resultList_face = model_face.execute([input_image]).copy() # draw bounding box on img_bodypose xmin, ymin, xmax, ymax = PostProcessing_face(img_bodypose, resultList_face) bbox_list = [xmin, ymin, xmax, ymax] face_flag = True except: print('No face detected') # FACE DETECTION MODEL END ## ## HEADPOSE BEGIN ## head_status_string = "No output" if face_flag is True: input_image = PreProcessing_head(img_headpose, bbox_list) try: resultList_head = model_head_pose.execute([input_image]).copy() except Exception as e: print('No head pose estimation output') # draw headpose points on image facepointList, head_status_string, canvas = PostProcessing_head(resultList_head, bbox_list, img_bodypose) print('Headpose:', head_status_string) headpose_result = head_status_string ## HEADPOSE END ## predict.staterunner(result,headpose_result) ## Present Result ## # convert to jpeg image for presenter server display _,jpeg_image = cv2.imencode('.jpg', img_bodypose) # construct AclImage object for presenter server jpeg_image = AclImage(jpeg_image, img_original.shape[0], img_original.shape[1], jpeg_image.size) # send to presenter server chan.send_detection_data(img_original.shape[0], img_original.shape[1], jpeg_image, []) cap.release()
def main(): if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) if not os.path.exists(OUTPUT_TXT_DIR): os.mkdir(OUTPUT_TXT_DIR) #acl资源初始化 acl_resource = AclResource() acl_resource.init() #加载模型 model = Model(acl_resource, MODEL_PATH) src_dir = os.listdir(INPUT_DIR) print("src_dir = ", src_dir) #从data目录逐张读取图片进行推理 for pic in src_dir: #读取图片 pic_path = os.path.join(INPUT_DIR, pic) pic_name = pic.split('.')[0] print(pic_name) bgr_img = cv2.imread(pic_path) t1 = time.time() img, ratio = letterbox(bgr_img, new_shape=(320, 640)) # resize to (320,640,3) img = img[:, :, ::-1] #bgr to rgb img = img.transpose(2, 0, 1) #(3,320,640) img = np.ascontiguousarray(img) img = img.astype(np.float32) img = img / 255.0 data = np.concatenate((img[:, ::2, ::2], img[:, 1::2, ::2], img[:, ::2, 1::2], img[:, 1::2, 1::2]), axis=0) #[12,160,320] t2 = time.time() result_list = model.execute([ data, ]) t3 = time.time() post = yolov5_post(result_list) #[1,25200,12] result_return = non_max_suppression(post, conf_thres=conf_threshold, iou_thres=iou_threshold) if len(result_return['detection_classes']): det = np.array(result_return['detection_boxes'])[:, :4] bbox = scale_coords((320, 640), det, bgr_img.shape, ratio) t4 = time.time() print("result = ", result_return) print("preprocess cost:", t2 - t1) print("forward cost:", t3 - t2) print("postprocess cost:", t4 - t3) print("total cost:", t4 - t1) print("FPS:", 1 / (t4 - t1)) for i in range(len(result_return['detection_classes'])): box = bbox[i] class_name = result_return['detection_classes'][i] confidence = result_return['detection_scores'][i] cv2.rectangle(bgr_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), colors[i % 6]) p3 = (max(int(box[0]), 15), max(int(box[1]), 15)) out_label = class_name cv2.putText(bgr_img, out_label, p3, cv2.FONT_ITALIC, 0.6, colors[i % 6], 1) output_file = os.path.join(OUTPUT_DIR, "out_" + pic) print("output:%s" % output_file) cv2.imwrite(output_file, bgr_img) pic_name = pic.split('.')[0] predict_result_path = os.path.join(OUTPUT_TXT_DIR, str(pic_name) + '.txt') with open(predict_result_path, 'w') as f: for i in range(len(result_return['detection_classes'])): box = bbox[i] class_name = result_return['detection_classes'][i] confidence = result_return['detection_scores'][i] box = list(map(int, box)) box = list(map(str, box)) confidence = '%.4f' % confidence bbox_mess = ' '.join([ class_name, confidence, box[0], box[1], box[2], box[3] ]) + '\n' f.write(bbox_mess) print("Execute end")
def main(): if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) if not os.path.exists(OUTPUT_TXT_DIR): os.mkdir(OUTPUT_TXT_DIR) #acl资源初始化 acl_resource = AclResource() acl_resource.init() #加载模型 model = Model(acl_resource, MODEL_PATH) src_dir = os.listdir(INPUT_DIR) print("src_dir = ", src_dir) #从data目录逐张读取图片进行推理 t_pre = 0 t_for = 0 t_post = 0 for pic in src_dir: #读取图片 pic_path = os.path.join(INPUT_DIR, pic) bgr_img = cv.imread(pic_path) #预处理 t1 = time.time() #data, w, h= preprocess(pic_path) data, w, h = preprocess_cv2(bgr_img) t2 = time.time() t_pre += (t2-t1) #送进模型推理 result_list = model.execute([data,]) t3 = time.time() t_for += (t3-t2) #处理推理结果 result_return = post_process(result_list, w, h) t4 = time.time() t_post += (t4-t3) print("result = ", result_return) print("preprocess cost:", t2-t1) print("forward cost:", t3-t2) print("proprocess cost:", t4-t3) for i in range(len(result_return['detection_classes'])): box = result_return['detection_boxes'][i] class_name = result_return['detection_classes'][i] confidence = result_return['detection_scores'][i] cv.rectangle(bgr_img, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i%6], 2) p3 = (max(int(box[1]), 15), max(int(box[0]), 15)) out_label = class_name cv.putText(bgr_img, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i%6], 1) output_file = os.path.join(OUTPUT_DIR, "out_" + pic) print("output:%s" % output_file) cv.imwrite(output_file, bgr_img) pic_name = pic.split('.')[0] predict_result_path = os.path.join(OUTPUT_TXT_DIR, str(pic_name)+'.txt') with open(predict_result_path, 'w') as f: for i in range(len(result_return['detection_classes'])): box = result_return['detection_boxes'][i] class_name = result_return['detection_classes'][i] confidence = result_return['detection_scores'][i] box = list(map(int, box)) box = list(map(str, box)) confidence = '%.4f' % confidence bbox_mess = ' '.join([class_name, confidence, box[1], box[0], box[3], box[2]]) + '\n' f.write(bbox_mess) num = len(src_dir) print("avg preprocess cost:", t_pre/num) print("avg forward cost:", t_for/num) print("avg proprocess cost:", t_post/num) total = t_pre/num + t_for/num + t_post/num print("avg total cost:", total) print("avg FPS:", 1/(total)) print("Execute end")