def read_data(data_file, fields): table_name = get_file_type(data_file) drop_date = get_drop_date(data_file) try: with open(os.path.join('data', data_file), 'r') as f: create_table(table_name, fields) for line in f: insert_line(table_name, drop_date, line.strip(), fields) mark_loaded(data_file) except IOError as e: logger.error('No data file "{}" found, skipping...'.format(data_file)) return None except Exception as e: # rollback transaction and skip conn.rollback() logger.error('Error for "{}": {}'.format(data_file, e)) return None try: # commit single transaction of table creation and insertions conn.commit() logger.info('SUCCESS loading data for "{}"'.format(data_file)) except: logger.error('Transaction commit failed for "{}", skipping...'.format( data_file)) return None
def _open_files_dialog_cb(file_select, response_id): filenames = file_select.get_filenames() file_select.destroy() if response_id != Gtk.ResponseType.OK: return if len(filenames) == 0: return # Only accept video files if utils.get_file_type(filenames[0]) != "video": return global _last_load_file _last_load_file = filenames[0] global _current_path, _render_data # if another clip has already been opened then we need to shutdown players. # and reset render data if _current_path != None: _render_data = None if _player != None: _player.shutdown() if _effect_renderer != None: _effect_renderer.shutdown() _current_path = filenames[0] # Finish clip open when dialog has been destroyed GLib.idle_add(_finish_clip_open)
def upload_resource(): if 'file' not in request.files: abort(400, 'no file has been uploaded.') file = request.files['file'] if file.filename == '': abort(400, 'no file has been uploaded') file_extension = get_file_extension(file.filename) if file and allowed_file(file_extension, current_app.config['ALLOWED_EXTENSIONS']): ret = upload_file_to_qiniu(current_app.config['QINIU_ACCESS_KEY'], current_app.config['QINIU_SECRET_KEY'], current_app.config['QINIU_BUCKET_NAME'], file.filename, file.read()) if not ret['ret']: abort(500, ret['msg']) else: abort(400, 'not allowed file type.') url = f"http://{current_app.config['QINIU_BUCKET_URL']}/{ret['key']}" file_type = get_file_type(file_extension) sessionFactory = getSessionFactory() session = sessionFactory.get_session() new_resource = Resources(0, file.filename, file_type, url) session.add(new_resource) session.commit() session.close() return jsonify({'msg': 'ok'})
def get_media_type(file_path): """ Returns media type of file. """ if os.path.exists(file_path): mime_type = utils.get_file_type(file_path) else: # IMAGE_SEQUENCE media objects have a MLT formatted resource path that does not # point to an existing file in the file system. # We're doing a heuristic here to identify those. pros_index = file_path.find("%0") d_index = file_path.find("d.") if pros_index != -1 and d_index != -1: return IMAGE_SEQUENCE all_index = file_path.find(".all") if all_index != -1: return IMAGE_SEQUENCE return FILE_DOES_NOT_EXIST if mime_type.startswith("video"): return VIDEO if mime_type.startswith("audio"): return AUDIO if mime_type.startswith("image"): return IMAGE return UNKNOWN
def smartPasteLeave(self, event): if setting_fftool.has_query: return ss = self.smartPaste.get(1.0, tk.END) arr = utils.split_str(ss, False) if len(arr) and arr[0] == self.smartNotice: # print("默认") return # 检查文件是否存在 /指定格式 narr = [] ft = (".mp4", ".mov", ".avi", ".mkv", ".mpg") for item in arr: typestr = utils.get_file_type(item).lower() if not ft.count(typestr) or not os.path.isfile(item): continue narr.append(item) if len(narr): narr = utils.pathlib_paths(narr) self.updateList(narr) self.start_btn.set_state(True) self.clearQuery() self.smartPaste.delete(1.0, tk.END) self.smartPaste.insert(tk.INSERT, self.smartNotice)
def get_media_type(file_path): """ Returns media type of file. """ if os.path.exists(file_path): mime_type = utils.get_file_type(file_path) else: # IMAGE_SEQUENCE media objects have a MLT formatted resource path that does not # point to an existing file in the file system. # We're doing a heuristic here to identify those. pros_index = file_path.find("%0") d_index = file_path.find("d.") if pros_index != -1 and d_index != -1: return IMAGE_SEQUENCE all_index = file_path.find(".all") if all_index != -1: return IMAGE_SEQUENCE return FILE_DOES_NOT_EXIST if mime_type.startswith("video"): return VIDEO if mime_type.startswith("audio"): return AUDIO if mime_type.startswith("image"): return IMAGE return UNKNOWN
def loading(): """ Returns the `LOADING_IMG` as the content of the response. """ with open(app.config.get('LOADING_IMG')) as loading_img: response = make_response(loading_img.read()) response.headers['Content-Type'] = utils.get_file_type(loading_img) return response
def __init__(self, post: Submission): self.post = post self.url = post.url self.caption = f"{post.title} r/{post.subreddit} {post.url}" if "/gallery/" not in post.url: self.type = get_file_type(self.url) else: self.type = "gallery"
def process_item(self, item, orig_item = None): if isinstance(item, dict) and "subpage" in item: item["subpage"] = self.process_item(item["subpage"]) if type(item) in (str,unicode) and os.path.exists(item) and not os.path.isdir(item): if not orig_item: orig_item = {} newitem = {"type": get_file_type(item), "mime": get_mimetype(item)} if "width" in orig_item: newitem["width"] = orig_item["width"] if "height" in orig_item: newitem["height"] = orig_item["height"] if newitem["type"] == "image" and ("width" not in newitem or "height" not in newitem): content_type, width, height = getImageInfo(open(item).read()) if content_type and width and height: try: if "width" not in newitem and "height" not in newitem: newitem["width"] = width newitem["height"] = height elif "width" not in newitem: newitem["width"] = width * newitem["height"] / height else: newitem["height"] = height * newitem["width"] / width except TypeError: pass newitem["rawpath"] = item if self.params["WEB_ROOT"] in item: newitem["url"] = item.replace(self.params["WEB_ROOT"], self.params["WEB_URL"]) elif self.params["copy_images"]: new_name = item.replace(os.sep, "_") new_path = os.path.join(self.params["target_dir"], "imgs", new_name) if not DEBUG and (not os.path.exists(new_path) or (os.path.getmtime(item) > os.path.getmtime(new_path))): shutil.copy(item, new_path) newitem["rawpath"] = new_path newitem["url"] = os.path.join("imgs", new_name) else: newitem["url"] = item.replace(self.params["target_dir"], "") return newitem elif isinstance(item, dict) and "type" in item: if item["type"] in ("image", "video"): processed = self.process_item(item["url"], item) if isinstance(processed, dict): item.update(processed) else: item["url"] = processed if "popup" in item and "rawpath" in item: item["fullurl"] = item["url"] item["url"] = make_thumbnail(item) item["type"] = "imagegallery" if item["popup"] == "gallery" else "imagepopup" return item elif item["type"] == "stack": item["stack"] = self.process_items(item["stack"]) return item elif item["type"] in item_processors: return item_processors[item["type"]](item, self.params) else: return item elif isinstance(item, list): return self.process_items(item) else: return item
def read_csv_spec(data_file, header=True): spec_file = '{}.csv'.format(get_file_type(data_file)) try: with open(os.path.join('specs', spec_file), 'r') as f: csv_reader = csv.reader(f, delimiter=',') return list(islice(csv_reader, 1 if header else 0, None)) except IOError as e: logger.error( 'No csv spec file "{}" found for data file "{}", skipping...'. format(spec_file, data_file)) return None
def extract(self, lease_id, document_path): if get_file_type(document_path) != "pdf": tmp_path = "/tmp/{}.pdf".format(uuid4()) try: ImageConversionService.convert(document_path, tmp_path) document_path = tmp_path except Exception as _: raise ExtractorImageConversionFailed() self._upload_file_to_bucket(lease_id, document_path) paragraphs = self._detect_document( self.bucket_file_storage_path + str(lease_id), self.bucket_paragraph_output_path + str(lease_id), ) return paragraphs
def paste_right_click(self, _): ss = utils.clipboard_get() arr = utils.split_str(ss, True) if len(arr) and arr[0] == self.smart_notice: # print("默认") return # 检查文件是否存在 /指定格式 new_arr = [] t = self.file_types_tup for item in arr: s = utils.get_file_type(item).lower() if not t.count(s) or not os.path.isfile(item): continue new_arr.append(item) if len(new_arr): new_arr = utils.pathlib_paths(new_arr, True) self.tree.set_list(new_arr)
def upload_avatar(): img = request.files.get('avatar') return_json = {'data':{}} user_id = current_user().id if allowed_file(img.filename): ext = get_file_type(img.filename) filename = str(uuid1(user_id)) + '.' + ext path = IMAGEPATH file_path = path + filename current_user().avatar = filename db.session.commit() # print(current_user().avatar) img.save(file_path) return_json['code'] = 200 return_json['data']['msg'] = 'success' return jsonify(return_json) return_json['code'] = 900 return_json['data']['msg'] = 'abnormal image type' return jsonify(return_json)
def todict(self): dic = {} dic['id'] = self.id dic['username'] = self.username dic['email'] = self.email dic['is_admin'] = self.isAdmin #将文件编程base64图片流传给前端 dic['avatar'] = {} if self.avatar == None: dic['avatar']['code'] = 400 else: avatarpath = IMAGEPATH + self.avatar print(avatarpath) try: with open(avatarpath, 'rb') as img_f: img_stream = base64.b64encode(img_f.read()) dic['avatar'] = {} dic['avatar']['code'] = 200 dic['avatar']['img_type'] = get_file_type(self.avatar) dic['avatar']['img_stream'] = img_stream except: dic['avatar']['code'] = 300 dic['motto'] = self.motto return dic
def processFunction(result): try: global total_api_instance_count global total_file_count global SEARCHED_REPO global DOWNLOAD_LIST global WRITE_QUEUE global CODE_QUEUE current_file = result current_repo = result.repository repository_name = current_repo.full_name file_type = utils.get_file_type(current_file.name) print("Searching for API usage in: ") print(" Repository name: " + repository_name) print(" File path: " + current_file.path) # Check if current file is contained within site-packages or venv list_not_processed = [".git", "venv", "site-packages", "sklearn"] contain_not_processed = False for term in list_not_processed: if term in current_file.path: contain_not_processed = True break if contain_not_processed: return None # Check if the repo is already looked into before # And check the minimum number of star on the repo # Consider deactivating the functionality for ipynb / jupyter notebook first because of their different # type of file (not a simple plain text) # Also consider a less rigid approach in which a repo does not need to have a requirements.txt but instead have # a mention of sklearn in the readme as there are some people who list their requirements in the readme already_exist = False # check if repository is checked before if repository_name in SEARCHED_REPO: REPO_PATH = SEARCHED_REPO[repository_name] # check if the same path is already checked # if not, add to the list of checked path if current_file.path in REPO_PATH: already_exist = True else: REPO_PATH.append(current_file.path) else: # Repository is not checked yet, add them into the dictionary SEARCHED_REPO[repository_name] = [current_file.path] if not already_exist and current_repo.stargazers_count >= MINIMUM_STAR: # Process the file content try: decoded_file_content = current_file.decoded_content.decode( "utf-8") except: return None try: tree = ast.parse(decoded_file_content) except: return # SAVE THE FILE TO COUNT THE RECALL LATER: api_name = FORMATTED_QUERY_NAME.split(".")[-1] list_processed_api = process_api_format(tree, api_name) is_api_found = False list_api_location = [] for entry in list_processed_api: current_name = entry["name"] current_keys = entry["key"] current_line = entry["line_no"] current_keys.append('') if FORMATTED_QUERY_NAME in current_name: # Then check the keys key_is_correct = True isNone = False for key in FORMATTED_QUERY_KEYS: if key == "None": isNone = True if key not in current_keys: key_is_correct = False # Special processing if query key is None if isNone: # len is supposed to be 1 if no keyword is declared (e.g. only have '' as keyword param) if len(current_keys) > 1: key_is_correct = False else: key_is_correct = True if key_is_correct: temp_string = "API Invocation in line: " + current_line.__str__( ) + "\n" temp_string += entry["code"] CODE_QUEUE.put(entry["code"]) list_api_location.append(temp_string) is_api_found = True if is_api_found: git_url = current_file.repository.git_url if git_url not in DOWNLOAD_LIST.queue: DOWNLOAD_LIST.put("git clone " + git_url + "\n") total_file_count += 1 listWrite = [] listWrite.append("----------------\n") listWrite.append("Github Link: " + git_url.__str__() + "\n") listWrite.append("Repository: " + repository_name + "\n") listWrite.append("File path: " + current_file.path + "\n") for text in list_api_location: total_api_instance_count += 1 listWrite.append(text + "\n") listWrite.append("\n") WRITE_QUEUE.put(listWrite) except Exception as e: print(e.__str__())
def check_dir(path, config, check_arch=False, show_opti=True): opti_list = config["opti"] arch_list = config["arch"] compiler_list = config["compiler"] # - architecture check is done in the compile script # - show_opti removes opti levels to reduce the number of options to show if not opti_list or not arch_list or not compiler_list: logger.error("you should give at list one option.") return if not os.path.exists(path): return cmd = "find {0} -type f -executable | sort".format(path) if check_arch: cmd += " | xargs file" files = system(cmd).splitlines() if not files: return check_list = defaultdict(lambda: defaultdict(set)) arch_fail_list = defaultdict(lambda: defaultdict(list)) success_list = defaultdict(list) fail_list = defaultdict(list) for b in files: if check_arch: b, b_type = b.split(":") base_name = os.path.basename(b) matches = RESTR.search(base_name).groups() package, compiler, arch, opti, bin_name = matches bin_name = bin_name.replace(".elf", "") if opti not in opti_list: continue if arch not in arch_list: continue if compiler not in compiler_list: continue check_list[package][bin_name].add(":".join([compiler, arch, opti])) if check_arch: # we use the command line result since it is much faster real_arch = get_file_type(b_type, use_str=True) if real_arch != arch: arch_fail_list[package][bin_name].append((b, arch, real_arch)) opts = [compiler_list, arch_list, opti_list] opts = list(itertools.product(*opts)) opts = set(map(lambda x: ":".join(x), opts)) num_opt = len(opts) for package, val in check_list.items(): for bin_name, opt in val.items(): left_options = opts - set(opt) if not left_options: success_list[package].append((bin_name, opt)) else: if not show_opti: left_options = set( map(lambda x: re.sub(":O(0|1|2|3|s)", "", x), left_options)) fail_list[package].append((bin_name, left_options)) return success_list, fail_list, arch_fail_list
def _do_add_files_search(data): add_folder = _add_media_window.file_chooser.get_filenames()[0] load_action = _add_media_window.action_select.get_active() search_recursively = _add_media_window.recursively_checkbox.get_active() use_extension = _add_media_window.use_extension_checkbox.get_active() user_extensions = _add_media_window.extension_entry.get_text() maximum_file_option = _add_media_window.maximum_select.get_active() if add_folder == None: return if search_recursively == True: candidate_files = [] for root, dirnames, filenames in os.walk(add_folder): for filename in filenames: candidate_files.append(os.path.join(root, filename)) else: candidate_files = [] for filename in os.listdir(add_folder): candidate_files.append(add_folder + "/" + filename) if use_extension == False: if load_action == 0: # "All Files", see dialogs.py filtered_files = candidate_files else: filtered_files = [] for cand_file in candidate_files: file_type = utils.get_file_type(cand_file) if load_action == 1 and file_type == "video": # 1 = "Video Files", see dialogs.py filtered_files.append(cand_file) elif load_action == 2 and file_type == "audio": # 2 = "Audio Files", see dialogs.py filtered_files.append(cand_file) elif load_action == 3 and file_type == "image": # 3 = "Image Files", see dialogs.py filtered_files.append(cand_file) else: # Try to accept spaces, commas and periods between extensions. stage1 = user_extensions.replace(",", " ") stage2 = stage1.replace(".", " ") exts = stage2.split() filtered_files = [] for ext in exts: for cand_file in candidate_files: if fnmatch.fnmatch(cand_file, "*." + ext): filtered_files.append(cand_file) # This is recursive, we need upper limit always max_files = 29 if maximum_file_option == 1: max_files = 49 # see dialogs.py elif maximum_file_option == 2: max_files = 99 # see dialogs.py elif maximum_file_option == 3: max_files = 199 # see dialogs.py filtered_amount = len(filtered_files) if filtered_amount > max_files: filtered_files = filtered_files[0:max_files] global _add_files_set _add_files_set = filtered_files files_text = "" for f in filtered_files: files_text += f + "\n" text_buffer = Gtk.TextBuffer() text_buffer.set_text(files_text) _add_media_window.files_view.set_buffer(text_buffer) info_text = _("Files to load: ") + str(len(filtered_files)) _add_media_window.load_info.set_text(info_text) if filtered_amount > max_files: _add_media_window.load_info_2.set_text( str(filtered_amount - max_files) + " / " + str(filtered_amount) + _(" of found files will not be loaded.")) else: _add_media_window.load_info_2.set_text("") if len(filtered_files) > 0: _add_media_window.add_button.set_sensitive(True) else: _add_files_set = None _add_media_window.add_button.set_sensitive(False)
def __init__(self, files): super(FlurryMapWritable, self).__init__() map_version = utils.generate_random_string(2) id = 1 for resource_file in files: sys.stdout.write(".") resource = FlurryResourceWritable(resource_file, map_version) for inner_resource in resource.build(): id = id + 1 self.resources[id] = inner_resource for id, resource in self.resources.iteritems(): sys.stdout.write(".") disk = resource['disk'] self.resources_disk[disk] = id uri = resource['uri'] self.resources_uri[disk] = id if len(resource['provides']) > 0: tag = resource['provides'] self.resources_tag[tag] = id if len(resource['module']) > 0: for module in resource['module']: if not module in self.modules: self.modules[module] = [] self.modules[module].append(tag) # Build module resources for module, resources in self.modules.iteritems(): id = id + 1 mod = FlurryResource() mod.requires = resources mod.type = "module" self.resources[id] = mod.toData() self.resources_tag[module] = id for module, resources in settings.packages.iteritems(): uri = "/rsrc/v1/" + map_version + "/r/" + utils.md5_for_string(module)[2:10] + "." + utils.get_file_type(module) self.packages[uri] = resources
def process_item(self, item, orig_item=None, in_media_item=False): if isinstance(item, dict) and "subpage" in item: item["subpage"] = self.process_item(item["subpage"]) if type(item) in (str, unicode) and os.path.exists(item) and not os.path.isdir(item): if not orig_item: orig_item = {} newitem = {"type": get_file_type(item), "mime": get_mimetype(item)} if "width" in orig_item: newitem["width"] = orig_item["width"] if "height" in orig_item: newitem["height"] = orig_item["height"] if newitem["type"] == "image" and ("width" not in newitem or "height" not in newitem): width, height = get_image_size(item) if width and height: try: if "width" not in newitem and "height" not in newitem: newitem["width"] = width newitem["height"] = height elif "width" not in newitem: newitem["width"] = width * newitem["height"] / height else: newitem["height"] = height * newitem["width"] / width except TypeError: pass # Should all images should be converted to thumbnails? do_thumbnail = False if newitem["type"] == "image" and self.params["allthumbs"]: # If we are in a media item, do not make thumbnail now, it will # be done (if needed) in the media item if not in_media_item: adjust_thumb_size(newitem, self.params) do_thumbnail = True newitem["rawpath"] = item if self.params['WEB_ROOT'] and self.params['WEB_URL'] and self.params["WEB_ROOT"] in item: newitem["url"] = item.replace(self.params["WEB_ROOT"], self.params["WEB_URL"]) elif self.params["copy_images"] or do_thumbnail: new_name = item.replace(os.sep, "_") new_path = os.path.join(self.params["target_dir"], "imgs", new_name) if not do_thumbnail and not orig_item.get("type") == "thumbnail": if not DEBUG and (not os.path.exists(new_path) or (os.path.getmtime(item) > os.path.getmtime(new_path))): shutil.copy(item, new_path) newitem["rawpath"] = new_path newitem["url"] = os.path.join("imgs", new_name) if do_thumbnail: newitem["url"] = make_thumbnail(newitem, orig_path=item) else: newitem["url"] = item.replace(self.params["target_dir"], "") if "type" in orig_item: newitem["type"] = orig_item["type"] return newitem elif isinstance(item, dict) and "type" in item: if item["type"] in ("image", "video", "thumbnail"): original_item = dict(item) processed = self.process_item(item["url"], item, in_media_item=True) if isinstance(processed, dict): item.update(processed) else: item["url"] = processed if item["type"] == "image" and self.params["allthumbs"]: item["type"] = "thumbnail" if not original_item.get("width") and not original_item.get("height"): adjust_thumb_size(item, self.params) if ("popup" in item or item["type"] == "thumbnail") and "rawpath" in item: item["url"] = make_thumbnail(item, orig_path=original_item["url"]) if item["type"] != "thumbnail": item["fullurl"] = item["url"] item["type"] = "imagegallery" if item["popup"] == "gallery" else "imagepopup" else: item["type"] = "image" return item elif item["type"] == "stack": item["stack"] = self.process_items(item["stack"]) return item elif item["type"] in item_processors: return item_processors[item["type"]](item, self.params) else: return item elif isinstance(item, list): return self.process_items(item) else: return item
def infer_on_stream(args, client): """ Initialize the inference network, stream video to network, and output stats and video. :param args: Command line arguments parsed by `build_argparser()` :param client: MQTT client :return: None """ # Initialise the class infer_network = Network() # Set Probability threshold for detections prob_threshold = args.prob_threshold # Load the model through `infer_network` model = infer_network.load_model(args.model, args.cpu_extension, args.device) input_shape = infer_network.get_input_shape() # Handle the input stream # Identify if image, video or camera and process accordingly input_type = utils.get_file_type(args.input) cap = None im_flag = False total_count = 0 last_count = 0 start_time = 0 if input_type == "IMAGE": im_flag = True elif input_type == "VIDEO": cap = cv2.VideoCapture(args.input) elif input_type == "CAM": cap = cv2.VideoCapture(0) else: print( "Unrecognized image or video file format. Please provide proper path or 'CAM' for camera input" ) sys.exit(1) cap.open(args.input) # Loop until stream is over while cap.isOpened(): # Read from the video capture flag, frame = cap.read() if not flag: break key_pressed = cv2.waitKey(60) if key_pressed == 27: break width = int(cap.get(3)) height = int(cap.get(4)) # Pre-process the image as needed p_frame = cv2.resize(frame, (input_shape[3], input_shape[2])) p_frame = p_frame.transpose((2, 0, 1)) p_frame = p_frame.reshape(1, *p_frame.shape) # Start asynchronous inference for specified request infer_network.exec_net(model, p_frame) # Wait for the result if infer_network.wait() == 0: # Get the results of the inference request result = infer_network.get_output() # Extract any desired stats from the results out_frame, current_count = get_stats_draw_box( frame, result, width, height, args.prob_threshold) # Calculate and send relevant information on # current_count, total_count and duration to the MQTT server # Topic "person": keys of "count" and "total" # Topic "person/duration": key of "duration" if current_count > last_count: start_time = time.time() total_count = total_count + current_count - last_count client.publish("person", json.dumps({"total": total_count})) if current_count < last_count: duration = int(time.time() - start_time) client.publish("person/duration", json.dumps({"duration": duration})) client.publish("person", json.dumps({"count": current_count})) last_count = current_count # Send the frame to the FFMPEG server sys.stdout.buffer.write(out_frame) sys.stdout.flush() # Write an output image if `single_image_mode` if im_flag: cv2.imwrite("test_out.jpg", out_frame) cap.release() cv2.destroyAllWindows() client.disconnect()
def process_item(self, item, orig_item=None, in_media_item=False): if isinstance(item, dict) and "subpage" in item: item["subpage"] = self.process_item(item["subpage"]) if type(item) in ( str, unicode) and os.path.exists(item) and not os.path.isdir(item): if not orig_item: orig_item = {} newitem = {"type": get_file_type(item), "mime": get_mimetype(item)} if "width" in orig_item: newitem["width"] = orig_item["width"] if "height" in orig_item: newitem["height"] = orig_item["height"] if newitem["type"] == "image" and ("width" not in newitem or "height" not in newitem): width, height = get_image_size(item) if width and height: try: if "width" not in newitem and "height" not in newitem: newitem["width"] = width newitem["height"] = height elif "width" not in newitem: newitem[ "width"] = width * newitem["height"] / height else: newitem[ "height"] = height * newitem["width"] / width except TypeError: pass # Should all images should be converted to thumbnails? do_thumbnail = False if newitem["type"] == "image" and self.params["allthumbs"]: # If we are in a media item, do not make thumbnail now, it will # be done (if needed) in the media item if not in_media_item: adjust_thumb_size(newitem, self.params) do_thumbnail = True newitem["rawpath"] = item if self.params['WEB_ROOT'] and self.params[ 'WEB_URL'] and self.params["WEB_ROOT"] in item: newitem["url"] = item.replace(self.params["WEB_ROOT"], self.params["WEB_URL"]) elif self.params["copy_images"] or do_thumbnail: new_name = item.replace(os.sep, "_") new_path = os.path.join(self.params["target_dir"], "imgs", new_name) if not do_thumbnail and not orig_item.get( "type") == "thumbnail": if not DEBUG and ( not os.path.exists(new_path) or (os.path.getmtime(item) > os.path.getmtime(new_path))): shutil.copy(item, new_path) newitem["rawpath"] = new_path newitem["url"] = os.path.join("imgs", new_name) if do_thumbnail: newitem["url"] = make_thumbnail(newitem, orig_path=item) else: newitem["url"] = item.replace(self.params["target_dir"], "") if "type" in orig_item: newitem["type"] = orig_item["type"] return newitem elif isinstance(item, dict) and "type" in item: if item["type"] in ("image", "video", "thumbnail"): original_item = dict(item) processed = self.process_item(item["url"], item, in_media_item=True) if isinstance(processed, dict): item.update(processed) else: item["url"] = processed if item["type"] == "image" and self.params["allthumbs"]: item["type"] = "thumbnail" if not original_item.get( "width") and not original_item.get("height"): adjust_thumb_size(item, self.params) if ("popup" in item or item["type"] == "thumbnail") and "rawpath" in item: item["url"] = make_thumbnail( item, orig_path=original_item["url"]) if item["type"] != "thumbnail": item["fullurl"] = item["url"] item["type"] = "imagegallery" if item[ "popup"] == "gallery" else "imagepopup" else: item["type"] = "image" return item elif item["type"] == "stack": item["stack"] = self.process_items(item["stack"]) return item elif item["type"] in item_processors: return item_processors[item["type"]](item, self.params) else: return item elif isinstance(item, list): return self.process_items(item) else: return item
def infer_on_stream(args, client): """ Initialize the inference network, stream video to network, and output stats and video. :param args: Command line arguments parsed by `build_argparser()` :param client: MQTT client :return: None """ # Initialise the class infer_network = Network() # Set Probability threshold for detections prob_threshold = args.prob_threshold ### DONE: Load the model through `infer_network` ### infer_network.load_model(args.model, args.device, args.cpu_extension) net_input_shape = infer_network.get_input_shape() logging.debug("Loaded the model. Shape: {}".format(net_input_shape)) ### DONE: Handle the input stream ### is_single_image, stream = utils.get_file_type(args.input) logging.debug("Got the stream: {}".format(stream)) camera = cv2.VideoCapture(stream) stream_width = int(camera.get(3)) stream_height = int(camera.get(4)) if not camera.isOpened(): logging.error("Error opening video stream {}".format(args.input)) exit(1) last_count = 0 total_count = 0 frames_counter = 0 # Number of frames people on screen contiguous_frame_counter = 0 # Number of contiguous frames with same number of people on it fps = int(camera.get(cv2.CAP_PROP_FPS)) # Frames per sec, was 10 logging.debug("FPS: {}".format(fps)) prev_count = 0 contiguous_count_threshold = 5 # half a second as threshold as FPS = 10 ### DONE: Loop until stream is over ### while camera.isOpened(): ### DONE: Read from the video capture ### more_image_flag, input_frame = camera.read() if not more_image_flag: break key_pressed = cv2.waitKey(60) if key_pressed == 27: break ### DONE: Pre-process the image as needed ### preprocessed_frame = utils.preprocessing(input_frame, net_input_shape[2], net_input_shape[3]) ### DONE: Start asynchronous inference for specified request ### inference_start_time = time.time() infer_network.exec_net(preprocessed_frame) ### DONE: Wait for the result ### if infer_network.wait() == 0: ### DONE: Get the results of the inference request # ## inference_duration = time.time() - inference_start_time result = infer_network.get_output() ### DONE: Extract any desired stats from the results ### output_frame, current_count = utils.draw_boxes( input_frame, result, stream_width, stream_height, prob_threshold) ### DONE: Calculate and send relevant information on ### inference_message = "Inference time: {:.3f}ms".format( inference_duration * 1000) cv2.putText(output_frame, inference_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) ### current_count, total_count and duration to the MQTT server ### ### Topic "person": keys of "count" and "total" ### ### Topic "person/duration": key of "duration" ### frames_counter += 1 logging.debug( "prev:{} , current:{} , frames:{}, cont_frames:{}, last:{} , total:{}" .format(prev_count, current_count, frames_counter, contiguous_frame_counter, last_count, total_count)) if current_count != prev_count: prev_count = current_count contiguous_frame_counter = 0 else: contiguous_frame_counter += 1 if contiguous_frame_counter >= contiguous_count_threshold: if current_count > last_count: total_count += (current_count - last_count) frames_counter = 0 # reset for new people in frame client.publish( "person", json.dumps({ "total": total_count, "count": current_count })) elif current_count < last_count: duration = int(frames_counter / fps) client.publish("person/duration", json.dumps({"duration": duration})) last_count = current_count ### DONE: Send the frame to the FFMPEG server ### sys.stdout.buffer.write(output_frame) sys.stdout.flush() ### DONE: Write an output image if `single_image_mode` ### if is_single_image: outputFileName = "out_" + os.path.basename(args.input) cv2.imwrite(outputFileName, output_frame) camera.release() cv2.destroyAllWindows() client.disconnect()