def prepare(self): data = self.request.body.decode('utf-8') # 这里可能会出现传过来的json加载一次不能转为dict,故多加载几次直到转为dict为止 while not isinstance(data, dict): data = json.loads(data) if self.request.remote_ip not in Runner_IP_dict: Runner_IP_dict.update({self.request.remote_ip: {}}) # ip定位控制器 self.Runner_dict = Runner_IP_dict[self.request.remote_ip] self.data = {} self.mode = data['mode'] self.video_mode = data['video_mode'] self.video_address = data['video_address'] if data['video_mode'] == 'live' else '' for i, item in enumerate(data['id']): self.data.update({ item: {'video_name': data['video_name'][i] if 'local' == data['video_mode'] else '', 'key_words': data['key_words'], 'video_mode': data['video_mode'], 'video_address': data['video_address'][i] if data['video_mode'] == 'live' and self.mode == 'new' else '' } }) if len(self.data) <= 0: message = "[ERROR]: {} Receive data error! IP".format(str(datetime.datetime.now()).split('.')[0], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: receive data error!") raise ValueError("[ERROR]:Value data less have 1 elements,but get 0 elements!")
def record(self, cut_video_info: dict): """ Report to mysql database. :param cut_video_info: result of predict. :return: update of predict. """ message = "[INFO]: {} Record.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Record.") insert_command = f"insert into {self.mysql_table} " \ f"(video_id, start, end, person_name, save_file, mode) " \ f"values(\'{self.video_path}\', " \ f"\'{cut_video_info['start']}\', " \ f"\'{cut_video_info['end']}\', " \ f"\'{cut_video_info['name']}\'," \ f" \'{cut_video_info['video_path']}\'," \ f" \'{cut_video_info['mode']}\' )" try: self.mysql.execute_command(insert_command) except pymysql.err.IntegrityError: message = "[Warning]: {} Record: {} is exists!".format( str(datetime.datetime.now()).split('.')[0], cut_video_info['video_path']) write_log(message=message, file_=os.path.join(LOG_DIR, 'warning-' + self.video_name + '.log')) # print("[Warning]: This chip is exists!") return cut_video_info
def wait_to_create(file_name: str, process_name: str, pr_id: str) -> None: while not os.path.exists(file_name) or os.path.getsize(file_name) == 0: time.sleep(1) message = "[INFO]: {} This process: {} is loading ... ...".format( str(datetime.datetime.now()).split('.')[0], process_name) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + pr_id + '.log'))
def face_recognize(self, image): message = "[INFO]: {} Face recognize.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Face recognize.") return {'mode': 'video', 'name': self.face_detector._recognize(image)}
def get_address(self): message = "[INFO]: {} Get address.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Get address.") address = "http://" + self.host_name + ":" + self.push_port + '/video/' + self.video_name + '.flv' return address
def get_video_len(self): result = self._main.recv() self._main.close() self._proc.close() message = "[INFO]: {} Main process communicate closed.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) return result
def initMysql(self): self.mysql = SQLHold(host=self.host_name, user=self.mysql_user, password=self.mysql_password, database=self.mysql_database, port=self.mysql_port) message = "[INFO]: {} Mysql init succeed!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log'))
def new_one_runner(self, data: dict): """ 以id的方式记录每个process,将每个process记录到一个dict内,以id的方式存取 :param data: {'id': process target, 'data': runner data, 'class': runner class, 'count': runner counter} :return: None """ id = data['id'] data['data'].update({'ip': self.request.remote_ip}) runner = Runner(data) self.Runner_dict.update({id: runner}) runner.start() message = "[INFO]: {} Process {} start! IP: {}".format(str(datetime.datetime.now()).split('.')[0], id, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log'))
def initialize(self): message = "[INFO]: {} Initialize config.json.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Initialize config.json.") self.video_path = None self.cap = None self.output_dir = None with open('config.json', 'r') as f: data = json.load(f) self.fr_num_frame = data['video_len'] # 经计算大约1秒长度为31629(以代码中的语音裁剪为基准) self.sr_num_frame = data['audio_len'] self.width = data['width'] self.height = data['height'] self.host_name = data['host_name'] self.push_port = data['push_port'] self.mysql_user = data['mysql_user'] self.mysql_password = data['mysql_passwd'] self.mysql_database = data['mysql_database'] self.mysql_port = data['mysql_port'] self.mysql_table = data['mysql_table'] self.video_root_path = data['video_root_path'] self.image_root_path = data['image_root_path'] self.output_root_dir = data['output_dir'] self.code_dir = data['code_dir'] if not os.path.exists(self.output_root_dir): os.mkdir(self.output_root_dir) message = "[INFO]: {} New dir: {}.".format( str(datetime.datetime.now()).split('.')[0], self.output_root_dir) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: New dir:", self.output_root_dir) self.api_key = data['api_key'] self.secret_key = data['secret_key'] self.dev_pid = data['dev_pid'] self.__fr_on = data['fr'] self.__sr_on = data['sr'] self.threshold = data['threshold']
def fetch_token(self): message = "[INFO]: {} Fetch token!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) params = { 'grant_type': 'client_credentials', 'client_id': self.api_key, 'client_secret': self.secret_key } post_data = urlencode(params) post_data = post_data.encode('utf-8') req = Request(self.token_url, post_data) try: message = "[INFO]: {} Request finished!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) f = urlopen(req) result_str = f.read() except URLError as err: result_str = err.reason message = "[ERROR]: {} Token http response reason : {}.".format( str(datetime.datetime.now()).split('.')[0], str(result_str)) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) print('[ERROR] Token http response reason :', str(result_str)) # print('token http response http code : ' + str(err.code) result_str = result_str.decode() # print(result_str) result = json.loads(result_str) # print(result) if ('access_token' in result.keys() and 'scope' in result.keys()): if self.scope and (not self.scope in result['scope'].split(' ') ): # SCOPE = False 忽略检查 message = "[ERROR]: {} Scope is not correct!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) print('[ERROR]: Scope is not correct!') # print('SUCCESS WITH TOKEN: %s ; EXPIRES IN SECONDS: %s' % (result['access_token'], result['expires_in'])) return result['access_token'] else: message = "[ERROR]: {} Maybe API_KEY or SECRET_KEY not correct: access_token or scope not found in token response.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) print( '[ERROR]: Maybe API_KEY or SECRET_KEY not correct: access_token or scope not found in token response' )
def close_deal(self): for item in self.data.keys(): if item not in self.Runner_dict.keys(): message = "[ERROR]: {} Process {} hasn't create! IP: {}".format( str(datetime.datetime.now()).split('.')[0], item, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: {} process hasn't created!".format(item)) return {'status': 'Failed', 'message': "{} process hasn't created!".format(item)} else: message = "[INFO]: {} Process {} has stopped! IP: {}".format(str(datetime.datetime.now()).split('.')[0], item, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) # print("[INFO]: {} process has stopped!".format(item)) return self.close_one_process(item)
def send_chips(self): """ "code": 100 有片段需要发送 101 无片段需要发送 """ const_info = '' # print(self.__CONSIST_TABLE) time_now = time.time() #print(time_now - self.start_time - 4) if not os.path.exists(chips_dir): return { "code": 101, "result": '', 'const_info': '' if const_info is None else const_info } chip_list = [ os.path.join(self.code_dir, chips_dir, item) for item in os.listdir(os.path.join(chips_dir)) ] if len(chip_list) == 0: return { "code": 101, "result": '', 'const_info': '' if const_info is None else const_info } result = [] for item in chip_list: if item not in self.__sended: self.__sended.append(item) result.append(item) message = "[INFO]: {} Send chips: {}.".format( str(datetime.datetime.now()).split('.')[0], ','.join(result)) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) if len(result) == 0: return { "code": 101, "result": '', 'const_info': '' if const_info is None else const_info } return { "code": 100, "result": result, 'const_info': '' if const_info is None else const_info }
def __init__(self, data: dict): super(CutServer, self).__init__() self.data = data self.__sended = [] self.__send_count = 0 self.key_words = [] self.cap = None self.mysql = None self.mode = data['video_mode'] self.video_name = data['video_name'].split('.')[0] if 'local' == self.mode else \ data['video_address'].split('/')[-1].split('.')[0] # 仅记录video的名称(不含路径和后缀名) self.initialize() ip_dir = os.path.join(self.output_root_dir, data['ip']) if not os.path.exists(ip_dir): os.mkdir(ip_dir) self.output_dir = os.path.join(ip_dir, self.video_name) if not os.path.exists(self.output_dir): os.mkdir(self.output_dir) else: shutil.rmtree(self.output_dir) message = "[INFO]: {} remove dir: {}.".format( str(datetime.datetime.now()).split('.')[0], self.output_dir) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: remove dir:", self.output_dir) os.mkdir(self.output_dir) message = "[INFO]: {} New dir: {}.".format( str(datetime.datetime.now()).split('.')[0], self.output_dir) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: New dir:", self.output_dir) # 视频路径或者url self.video_path = os.path.join(self.video_root_path, data['video_name']) if \ data['video_mode'] == 'local' else data['video_address'] self.speech_detector = None if self.__sr_on == 'on': self.__ad_main, self.__ad_proc = mp.Pipe(duplex=True) self.speech_detector = BaiduSRC(api_key=self.api_key, secret_key=self.secret_key, dev_pid=self.dev_pid, pipe=self.__ad_proc) self.speech_detector.start() self.start_time = time.time() self._main, self._proc = mp.Pipe(duplex=False)
def close_all_process(self): if len(self.Runner_dict) <= 0: message = "[ERROR]: {} There are no processes! IP: {}".format(str(datetime.datetime.now()).split('.')[0], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: There are no processes!") return {'status': 'Failed', 'message': "No process has been created!"} for id in list(self.Runner_dict.keys()): self.close_one_process(id) message = "[INFO]: {} All processes closed! IP: {}".format(str(datetime.datetime.now()).split('.')[0], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) # print("[INFO]: All processes closed!") return {'status': 'Stop', 'message': "All processes closed!", "keys": list(self.Runner_dict.keys())}
def get_audio_frame(self) -> list: message = "[INFO]: {} Get audio frame.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Get audio frame.") if len(self.audio) > self.__len_audio: self.audio_count += 1 audio = self.audio[0:self.__len_audio] # 切3秒 self.audio = self.audio[self.__len_audio:] else: self.audio_count += 1 audio = self.audio self.audio = self.audio[len(self.audio):] return audio
def extract_feature(self, image, img_point) -> np.ndarray: """ Input a processed data, output this image's face feature. :param image: processed image data. :return: if there has a face ,return it's feature, else return None. """ message = "[INFO]: {} Extract feature!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) if image is None: return None face_feature = [] for index, face in enumerate(img_point): shape = self.shape_predictor(image, face.rect) face_feature.append( self.face_rec_model.compute_face_descriptor(image, shape)) face_feature = np.asarray(face_feature) return face_feature
def get_feature(mysql: SQLHold, pr_id: str) -> dict: message = "[INFO]: {} Get feature.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + pr_id + '.log')) command = 'select * from face_feature' mysql.execute_command(command) result = mysql.fetchall() feature_dict = {} for i, name, item in result: item = np.asarray([float(val) for val in item.split(',')], dtype=np.float32) feature_dict.update({name: item}) message = "[INFO]: {} there are {} face feature".format( str(datetime.datetime.now()).split('.')[0], len(feature_dict)) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + pr_id + '.log')) return feature_dict
def new_deal(self): message = "[INFO]: {} Client request. IP: {}".format(str(datetime.datetime.now()).split('.')[0], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) last_num = len(self.Runner_dict) # 记录上一时刻进程的个数(路数) for item in self.data.keys(): # print('data') if item in self.Runner_dict.keys() and self.Runner_dict[item].process.is_alive(): message = "[ERROR]: {} Don't repeat the process of {}! IP:".format(str(datetime.datetime.now()).split('.')[0], item, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: Don't repeat the process of {}!".format(item)) return {'status': 'Failed', 'message': "Don't repeat the process of {}!".format(item)} self.new_one_runner({'class': self.runner_class, 'data': self.data[item], 'id': item}) # 用当前状态的路数减去上一个状态的路数,差值若等于data的长度则创建进程成功,否则失败 if len(self.Runner_dict) - last_num == len(self.data): message = "[INFO]: {} New {} number of process! IP: {}".format(str(datetime.datetime.now()).split('.')[0], len(self.data), self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) # print("[INFO]: New {} number of process!".format(len(self.data))) if 'live' == self.video_mode: return {'status': 'Initing', 'message': "New process succeed!", 'Live-address': self.video_address} else: return {'status': 'Initing', 'message': "New process succeed!", 'Live-address': address(self.Runner_dict), 'all_frame': get_video_len(self.Runner_dict)} else: message = "[ERROR]: {} Number of {} processes hasn't created! IP: {}".format(str(datetime.datetime.now()).split('.')[0], len(self.data) - (len(self.Runner_dict) - last_num), self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: {} processes hasn't created!".format(len(self.data) - (len(Runner_dict) - last_num))) for item in self.data.keys(): if item not in self.Runner_dict.keys(): message = "[ERROR]: {} Process {} hasn't create! IP: {}".format( str(datetime.datetime.now()).split('.')[0], self.data[item]['video_address'], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: {} hasn't create!".format(self.data[item]['video_address'])) return {'status': 'Failed', 'message': "Process create error!"}
def split_audio(self, video_path: str, save_file: str) -> None: """ Split audio from video. :param video_path: video path. :param save_dir: audio path. :return: None """ message = "[INFO]: {} Split audio.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Split audio.") self.audio_count = 0 command = [ "ffmpeg", "-y", "-i", video_path, "-ac", str(1), "-ar", str(16000), "-loglevel", "error", save_file ] subprocess.check_output(command, stdin=open(os.devnull), shell=False)
def image_process(self, image): """ Process one image.(B, G, R) Input a image's path or a np.ndarray, output this image's face feature. Select output image's format by self.schema. :param image: file path or image data(3 dimension). :return: a face picture(224, 224, 3). """ message = "[INFO]: {} Image process!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) if isinstance(image, str): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) b, g, r = cv2.split(image) image = cv2.merge([r, g, b]) face_image = self.detector(image, 1) return face_image
def __init__(self, key_name, face_feature, threshold): """ Init. """ super(FaceDetecter, self).__init__('', 'dlib', port=9998) currentpath = os.path.dirname(os.path.abspath(__file__)) self.detector = dlib.cnn_face_detection_model_v1( currentpath + '/models/mmod_human_face_detector.dat') self.face_rec_model = dlib.face_recognition_model_v1( currentpath + '/models/face_model.dat') self.shape_predictor = dlib.shape_predictor( currentpath + '/models/face_alignment.dat') self.cascade = cv2.CascadeClassifier( currentpath + "/models/haarcascade_frontalface_alt.xml") message = "[INFO]: {} Fr model loaded!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) self.key_name = key_name self.face_feature = face_feature self.face_feature.insert(0, [0] * 128) self.threshold = threshold
def close_one_process(self, id: str): if len(self.Runner_dict) <= 0: message = "[ERROR]: {} There are no processes! IP: {}".format(str(datetime.datetime.now()).split('.')[0], self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: There are no processes!") return {'status': 'Failed', 'message': "No process has been created!"} if self.Runner_dict[id].process.is_alive(): self.Runner_dict[id].close() self.Runner_dict.pop(id) message = "[INFO]: {} Process {} closed! IP: {}".format(str(datetime.datetime.now()).split('.')[0], id, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) print("[INFO]: {} process closed!".format(id)) return {'status': 'Stop', 'message': 'Process close succeed!', "keys": list(self.Runner_dict.keys())} else: self.Runner_dict.pop(id) message = "[INFO]: {} Process {} is not alive! IP: {}".format(str(datetime.datetime.now()).split('.')[0], id, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) print("[INFO]: {} process is not alive!".format(id)) return {'status': 'Stop', 'message': "The remaining process is killed!"}
def release(self): try: # 这里需要将cap,mysql初始化为None,否则就是进程变量无法通信而引发报错 if self.cap is not None: self.cap.release() if self.mysql is not None: self.mysql.close() self.__ad_main.send('stop') if self.speech_detector is not None: sr_pid = self.speech_detector.pid self.speech_detector.terminate() os.kill(sr_pid, signal.SIGSTOP) self.__ad_main.close() self.__ad_proc.close() message = "[INFO]: {} Release finished.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Release finished.") except Exception as e: err_message = traceback.format_exc() message = "[ERROR]: {} Release error, we don't know this error!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'error-' + self.video_name + '.log')) write_log(message=err_message, file_=os.path.join(LOG_DIR, 'error-' + self.video_name + '.log'))
def cut_video(self, cut_video_info: dict) -> dict: message = "[INFO]: {} Cut video.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Cut video.") save_dir = os.path.join(self.output_dir, 'chips') if not os.path.exists(save_dir): os.mkdir(save_dir) message = "[INFO]: {} New dir: {}".format( str(datetime.datetime.now()).split('.')[0], save_dir) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: New dir:", save_dir) if self.mode == 'live': extension = '.ts' else: extension = self.video_path[-4:] if cut_video_info['mode'] == 'audio': start = mktime_sec(cut_video_info['start']) end = mktime_sec(cut_video_info['end']) end_time = mktime_sec(cut_video_info['start'] + cut_video_info['end']).split('.')[0] else: start = mktime_form(cut_video_info['start'], self.fps) end = str( (cut_video_info['end'] - cut_video_info['start']) / self.fps) end_time = mktime_form(cut_video_info['end'], self.fps).split('.')[0] output_file = os.path.join( save_dir, cut_video_info['mode'] + '#' + start.split('.')[0].replace(':', '-') + '#' + end_time.replace(':', '-') + '#' + cut_video_info['name'] + '#' + self.video_name + extension) command = [ "ffmpeg", "-loglevel", "error", "-y", "-ss", start, "-i", self.video_path, "-t", end, "-c:v", "libx264", "-c:a", "aac", "-ar", "44100", output_file ] # shell变量是否需要开启取决于命令command的类型,str类型的command需要使用shell=True,list类型的command使用shell=False message = "[INFO]: {} ffmpeg.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) subprocess.check_output(command, stdin=open(os.devnull), shell=False) result = {'video_path': output_file} # 由于subprocess是启用另外一个进程执行任务,所以得等待到文件创建为止 wait_to_create(output_file, "[Cut Video]", self.video_name) return result
def inference(self, frame_feature) -> list: """ Inference the person. :param frame_feature: predict face feature. :return: name list. """ message = "[INFO]: {} Inference face feature!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) compare_result = np.zeros(shape=(len(frame_feature), len(self.key_name) + 1)) for i, feature in enumerate(frame_feature): compare_result[i] = face_recognition.face_distance( self.face_feature, feature) compare_result = np.where(compare_result < self.threshold, compare_result, 0) result = [] for item in compare_result: if np.argmax(item) <= 0: continue result.append(self.key_name[np.argmax(item) - 1]) return result
def get_image_feature(self, img): """ This is a factor function which are used to produce a image's face feature. Input a image's path or a np.ndarray, output this image's face feature. <-- Notice! --> #### If this use to get the key face feature, #### weather the input is str or np.ndarray data, #### each image only include one face. ##### If this use to get the frame's face feature, ##### 2 or more face can in image. <-- End! --> :param img: file path or image data. :return: if there has a face ,return it's feature, else return None. """ message = "[INFO]: {} Get face feature!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) img_point = self.image_process(img) return self.extract_feature(img, img_point)
def __refresh(self): message = "[INFO]: {} Refresh arguments!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Refresh arguments!") if 'live' == self.mode: self.video_path = self.fetch.change_file() if self.__fr_on == 'on': # capture change! self.cap = cv2.VideoCapture(self.video_path) # self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) # self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) print('---', width, height) if not (width == self.width and self.height == height): self.__resize = (self.width, int( (self.width / width) * height)) else: self.__resize = (self.width, self.height) self.all_frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT) self.frame_count = 0 self.fps = self.cap.get(cv2.CAP_PROP_FPS) self.__fr_all_times = int(self.all_frames / self.fr_num_frame) + 1 # audio change! if self.__sr_on == 'on': audio_file = os.path.join( self.output_dir, 'wav', self.video_path.split('/')[-1].split('.')[0] + '.wav') self.split_audio( self.video_path, save_file=audio_file) if self.__sr_on == 'on' else None wait_to_create(audio_file, "[Audio Cut]", self.video_name) with open(audio_file, 'rb') as f: message = "[INFO]: {} Read audio file: {}.".format( str(datetime.datetime.now()).split('.')[0], audio_file) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) self.audio = f.read() # 这里设置为固定值,这个值为3秒 self.__len_audio = self.sr_num_frame self.__sr_all_times = int(len(self.audio) / self.__len_audio) + 1 self.__audio_time = get_static_time(self.__len_audio, len(self.audio), self.all_frames, self.fps) self.__fr_sr_count = (int( self.__fr_all_times / self.__sr_all_times)) * self.fr_num_frame message = "[INFO]: {} Audio times: {}, Video times {}, sr count {}.".format( str(datetime.datetime.now()).split('.')[0], self.__sr_all_times, self.__fr_all_times, self.__fr_sr_count) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log'))
def chips_deal(self): result_all = {} const_all = {} for item in self.data.keys(): if item not in self.Runner_dict.keys(): message = "[ERROR]: {} Process {} hasn't created! IP: {}".format(str(datetime.datetime.now()).split('.')[0], item, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'error.log')) # print("[ERROR]: {} process hasn't created!".format(item)) return{'status': 'Failed', 'message': "{} process hasn't created!".format(item)} if not self.Runner_dict[item].process.is_alive(): message = "[INFO]: {} Process {} has finished! IP: {}".format(str(datetime.datetime.now()).split('.')[0], item, self.request.remote_ip) write_log(message=message, file_=os.path.join(LOG_DIR, 'run.log')) # print("[INFO]: {} process has finished!".format(item)) result = ['Finished'] result_all.update({item: result}) else: result = self.Runner_dict[item].get_chips() if result['code'] == 101: const_all.update({item: result['const_info']}) print("[INFO]: No chips!") elif result['code'] == 100: result_all.update({item: result['result']}) const_all.update({item: result['const_info']}) if len(result_all) <= 0: return { 'status': 'Push', 'message': 'No chips!', 'chips': '', 'const_info': const_all } return { 'status': 'Push', 'message': 'New chips!', 'chips': result_all, 'const_info': const_all }
def initialize_request(self, data: dict): message = "[INFO]: {} Initialize request.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Initialize request.") key_words = data['key_words'] # init mysql self.initMysql() # end # 文件、路径初始化 audio_dir = os.path.join(self.output_dir, 'wav') if not os.path.exists(audio_dir): os.mkdir(audio_dir) message = "[INFO]: {} New dir: {}.".format( str(datetime.datetime.now()).split('.')[0], audio_dir) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: New dir:", audio_dir) # cv初始化 if 'live' == self.mode: self.fetch = FetchStream(self.video_path, self.output_dir) self.fetch.start_download() # 固定读取图像大小 self.__refresh() face_feature_dict = get_feature(self.mysql, self.video_name) key_name, face_feature = get_face_data(face_feature_dict, key_words) self.face_detector = FaceDetecter(key_name=key_name, face_feature=face_feature, threshold=self.threshold) self.key_words = key_words self._proc.send(int(self.all_frames / self.fps))
def speech_recognize(self): message = "[INFO]: {} Speech recognize.".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Speech recognize.") data = self.get_audio_frame() # 音频文件为空 if 0 == len(data): message = "[INFO]: {} Audio recognition finished!".format( str(datetime.datetime.now()).split('.')[0]) write_log(message=message, file_=os.path.join(LOG_DIR, 'run-' + self.video_name + '.log')) # print("[INFO]: Audio recognition finished!") self.__sr_on = 'off' return {'mode': 'audio', 'name': '', 'start': '', 'end': ''} self.__ad_main.send(data) result = self.__ad_main.recv()[0] # 检测结果为空 if '' == result: return {'mode': 'audio', 'name': '', 'start': '', 'end': ''} name = [] for item in self.key_words: if item in result: name.append(item) name = ','.join(name) if len(name) > 0 else '' return { 'mode': 'audio', 'name': name, 'start': self.__audio_time * (self.audio_count - 1), 'end': self.__audio_time }