def check_face_img(face_img): # pose_predict(姿势): [[pitch, yaw, roll]](Pitch: 俯仰; Yaw: 摇摆; Roll: 倾斜) ''' :param face_img: 人脸对应的矩阵 :param image_id: 图片id :return: 是否进行识别(False:不进行识别) ''' current_day = get_current_day() log_file = open(os.path.join(log_dir, current_day + '.txt'), 'a') face_img_str = base64.b64encode(msgpack_numpy.dumps(face_img)) request = { "request_type": 'check_pose', "face_img_str": face_img_str, "image_id": str(time.time()) } result = requests.post(angle_url, data=request) try: if result.status_code == 200: pose_predict = json.loads(result.content)["pose_predict"] if not pose_predict: # 加载失败 log_file.write('\t'.join(map(str, ['pose filter request'])) + '\n') log_file.close() return False else: pose_predict = msgpack_numpy.loads( base64.b64decode(pose_predict)) if pose_predict == None: log_file.write( '\t'.join(map(str, ['pose filter detect'])) + '\n') log_file.close() return False pitch, yaw, roll = pose_predict[0] if math.fabs(pitch) < pitch_threshold and math.fabs( yaw) < yaw_threshold and math.fabs( roll) < roll_threshold: log_file.write('\t'.join( map(str, ['pose not filter', str(pose_predict[0])])) + '\n') log_file.close() return True else: log_file.write('\t'.join( map(str, ['pose filter threshold', str(pose_predict[0])])) + '\n') log_file.close() return False else: return False except: traceback.print_exc() log_file.close() return False
def verif_all_person(all_person_file, test_person_file): all_person_list = open(all_person_file).read().split('\n') test_person_list = open(test_person_file).read().split('\n') for test_element in test_person_list: try: test_tmp = test_element.split('\t') this_similarity_list = [] if len(test_tmp) == 2: test_pic_path, test_face_feature = test_tmp[0], test_tmp[1] test_face_feature = msgpack_numpy.loads( base64.b64decode(test_face_feature)) for all_element in all_person_list: try: all_tmp = all_element.split('\t') if len(all_tmp) == 2: all_pic_path, all_face_feature = all_tmp[ 0], all_tmp[1] all_face_feature = msgpack_numpy.loads( base64.b64decode(all_face_feature)) similarity = cosine_similarity( all_face_feature, test_face_feature)[0][0] # print similarity this_similarity_list.append( (similarity, all_pic_path)) else: # print 'all_tmp :', all_tmp continue except: traceback.print_exc() continue else: # print 'test_tmp :', test_tmp continue except: traceback.print_exc() continue this_similarity_list.sort(key=lambda x: x[0]) # print test_tmp[0], this_similarity_list[-1][0], this_similarity_list[-1][1], \ # this_similarity_list[-2][0], this_similarity_list[-2][1], \ # this_similarity_list[-3][0], this_similarity_list[-3][1], \ # this_similarity_list[-4][0], this_similarity_list[-4][1] print test_tmp[0], this_similarity_list[-1][1]
def check_face_img(self, face_img, image_id): # 计算角度 ''' :param face_img: 人脸对应的矩阵 :param image_id: 图片id :return: 是否进行识别(False:不进行识别) ''' # 姿势检测 current_day = get_current_day() log_file = open(os.path.join(log_dir, current_day + '.txt'), 'a') face_img_str = base64.b64encode(msgpack_numpy.dumps(face_img)) request = { "request_type": 'check_pose', "face_img_str": face_img_str, "image_id": image_id, } url = "http://%s:%d/" % (check_ip, check_port) result = image_request(request, url) try: pose_predict = json.loads(result)["pose_predict"] if not pose_predict: # 加载失败 log_file.write( '\t'.join(map(str, [image_id, 'pose filter request'])) + '\n') log_file.close() return False else: pose_predict = msgpack_numpy.loads( base64.b64decode(pose_predict)) if pose_predict == None: log_file.write( '\t'.join(map(str, [image_id, 'pose filter detect'])) + '\n') log_file.close() return False pitch, yaw, roll = pose_predict[0] if math.fabs(pitch) < self.pitch_threshold and \ math.fabs(yaw) < self.yaw_threshold and \ math.fabs(roll) < self.roll_threshold: log_file.close() return True else: log_file.write('\t'.join( map(str, [image_id, 'pose filter threshold'])) + '\n') log_file.close() return False except: traceback.print_exc() log_file.close() return False
def load_from_bytes(buf): """ Args: buf: the output of `dumps`. """ # Since 0.6, the default max size was set to 1MB. # We change it to approximately 1G. return msgpack_numpy.loads(buf, raw=False, max_bin_len=MAX_MSGPACK_LEN, max_array_len=MAX_MSGPACK_LEN, max_map_len=MAX_MSGPACK_LEN, max_str_len=MAX_MSGPACK_LEN)
def cal_sim(): sim_file = 'test_cluster.txt' all_content = open(sim_file, 'r').read().split('\n') error_num = 0 all_num = 0 del_num = 0 del_list = [] last_index = 1 id_feature_dic = {} error_id = [] for content in all_content: try: id, feature = content.split('\t') id_feature_dic[id] = msgpack_numpy.loads(base64.b64decode(feature)) except: error_id.append(id) continue # pdb.set_trace() for index in range(2, len(all_content)): try: last_id, last_feature = all_content[last_index].split('\t') this_id, this_feature = all_content[index].split('\t') this_feature = msgpack_numpy.loads(base64.b64decode(this_feature)) last_feature = msgpack_numpy.loads(base64.b64decode(last_feature)) if this_feature.size == 256 and last_feature.size == 256: cos_sim = pairwise.cosine_similarity(this_feature, last_feature)[0][0] all_num += 1 if cos_sim > 0.85: del_num += 1 del_list.append(this_id) print this_id, last_id, last_index, cos_sim else: last_index = index except: error_num += 1 continue return del_list
def verif_all_person(all_person_file, test_person_file): all_person_list = open(all_person_file).read().split('\n') test_person_list = open(test_person_file).read().split('\n') for test_element in test_person_list: try: test_tmp = test_element.split('\t') this_similarity_list = [] if len(test_tmp) == 2: test_pic_path, test_face_feature = test_tmp[0], test_tmp[1] test_face_feature = msgpack_numpy.loads(base64.b64decode(test_face_feature)) for all_element in all_person_list: try: all_tmp = all_element.split('\t') if len(all_tmp) == 2: all_pic_path, all_face_feature = all_tmp[0], all_tmp[1] all_face_feature = msgpack_numpy.loads(base64.b64decode(all_face_feature)) similarity = cosine_similarity(all_face_feature, test_face_feature)[0][0] # print similarity this_similarity_list.append((similarity, all_pic_path)) else: # print 'all_tmp :', all_tmp continue except: traceback.print_exc() continue else: # print 'test_tmp :', test_tmp continue except: traceback.print_exc() continue this_similarity_list.sort(key=lambda x:x[0]) # print test_tmp[0], this_similarity_list[-1][0], this_similarity_list[-1][1], \ # this_similarity_list[-2][0], this_similarity_list[-2][1], \ # this_similarity_list[-3][0], this_similarity_list[-3][1], \ # this_similarity_list[-4][0], this_similarity_list[-4][1] print test_tmp[0], this_similarity_list[-1][1]
def segment_spectra_chunk(obj, id, ibm_cos): print(f'Segmenting spectra chunk {obj.key}') sp_mz_int_buf = msgpack.loads(obj.data_stream.read()) def _first_level_segment_upload(segm_i): l = ds_segments_bounds[segm_i][0, 0] r = ds_segments_bounds[segm_i][-1, 1] segm_start, segm_end = np.searchsorted(sp_mz_int_buf[:, 1], (l, r)) # mz expected to be in column 1 segm = sp_mz_int_buf[segm_start:segm_end] ibm_cos.put_object(Bucket=bucket, Key=f'{ds_segments_prefix}/chunk/{segm_i}/{id}.msgpack', Body=msgpack.dumps(segm)) with ThreadPoolExecutor(max_workers=128) as pool: pool.map(_first_level_segment_upload, range(len(ds_segments_bounds)))
def post(self): request_type = self.get_body_argument('request_type') if request_type == 'check_pose': try: image_id = self.get_body_argument("image_id") face_img_str = self.get_body_argument("face_img_str") print "receive image", image_id, time.time() face_img = msgpack_numpy.loads(base64.b64decode(face_img_str)) start = time.time() pose_predict = angle_calculate_server.calculate_angle(face_img, image_id) end = time.time() pose_predict = base64.b64encode(msgpack_numpy.dumps(pose_predict)) print 'pose predict time :', (end - start) self.write(json.dumps({"pose_predict": pose_predict})) except: traceback.print_exc() return
def load_data(result_file, pack_file): person_feature_dic = {} # {person_name:[(pic_name, pic_feature),...,(pic_name, pic_feature)]} for line in open(result_file): tmp = line.rstrip().split('\t') if len(tmp) == 2: try: pic_path = tmp[0].split('/') person_name = pic_path[-2] pic_name = pic_path[-1] feature = msgpack_numpy.loads(base64.b64decode(tmp[1])) feature_list = person_feature_dic.get(person_name, []) feature_list.append((pic_name, feature)) person_feature_dic[person_name] = feature_list except: print tmp continue msgpack_numpy.dump(person_feature_dic, open(pack_file, 'wb'))
def check_face_img(self, face_img, image_id): # 计算角度 ''' :param face_img: 人脸对应的矩阵 :param image_id: 图片id :return: 是否进行识别(False:不进行识别) ''' # 姿势检测 current_day = get_current_day() log_file = open(os.path.join(log_dir, current_day+'.txt'), 'a') face_img_str = base64.b64encode(msgpack_numpy.dumps(face_img)) request = { "request_type": 'check_pose', "face_img_str": face_img_str, "image_id": image_id, } url = "http://%s:%d/" % (check_ip, check_port) result = image_request(request, url) try: pose_predict = json.loads(result)["pose_predict"] if not pose_predict: # 加载失败 log_file.write('\t'.join(map(str, [image_id, 'pose filter request'])) + '\n') log_file.close() return False else: pose_predict = msgpack_numpy.loads(base64.b64decode(pose_predict)) if pose_predict == None: log_file.write('\t'.join(map(str, [image_id, 'pose filter detect'])) + '\n') log_file.close() return False pitch, yaw, roll = pose_predict[0] if math.fabs(pitch) < self.pitch_threshold and \ math.fabs(yaw) < self.yaw_threshold and \ math.fabs(roll) < self.roll_threshold: log_file.close() return True else: log_file.write('\t'.join(map(str, [image_id, 'pose filter threshold'])) + '\n') log_file.close() return False except: traceback.print_exc() log_file.close() return False
def post(self): request_type = self.get_body_argument('request_type') if request_type == 'check_pose': try: image_id = self.get_body_argument("image_id") face_img_str = self.get_body_argument("face_img_str") print "receive image", image_id, time.time() face_img = msgpack_numpy.loads(base64.b64decode(face_img_str)) start = time.time() pose_predict = angle_calculate_server.calculate_angle( face_img, image_id) end = time.time() pose_predict = base64.b64encode( msgpack_numpy.dumps(pose_predict)) print 'pose predict time :', (end - start) self.write(json.dumps({"pose_predict": pose_predict})) except: traceback.print_exc() return
def check_face_img(face_img): # pose_predict(姿势): [[pitch, yaw, roll]](Pitch: 俯仰; Yaw: 摇摆; Roll: 倾斜) ''' :param face_img: 人脸对应的矩阵 :param image_id: 图片id :return: 是否进行识别(False:不进行识别) ''' current_day = get_current_day() log_file = open(os.path.join(log_dir, current_day + '.txt'), 'a') face_img_str = base64.b64encode(msgpack_numpy.dumps(face_img)) request = {"request_type": 'check_pose', "face_img_str": face_img_str, "image_id": str(time.time())} result = requests.post(angle_url, data=request) try: if result.status_code == 200: pose_predict = json.loads(result.content)["pose_predict"] if not pose_predict: # 加载失败 log_file.write('\t'.join(map(str, ['pose filter request'])) + '\n') log_file.close() return False else: pose_predict = msgpack_numpy.loads(base64.b64decode(pose_predict)) if pose_predict == None: log_file.write('\t'.join(map(str, ['pose filter detect'])) + '\n') log_file.close() return False pitch, yaw, roll = pose_predict[0] if math.fabs(pitch) < pitch_threshold and math.fabs(yaw) < yaw_threshold and math.fabs(roll) < roll_threshold: log_file.write('\t'.join(map(str, ['pose not filter', str(pose_predict[0])])) + '\n') log_file.close() return True else: log_file.write('\t'.join(map(str, ['pose filter threshold', str(pose_predict[0])])) + '\n') log_file.close() return False else: return False except: traceback.print_exc() log_file.close() return False
def add_all_new_pic(self): ''' 遍历数据库(将修改过的数据加入LSHForest) 一分钟一次(避免频繁查数据库, 也不会造成太大的延迟) 使用研究院的模型时, 只能先保存特征, 直接移动特征(在数据库中加一列) ''' current_day = get_current_day() log_file = open(os.path.join(self.log_dir, current_day + '.txt'), 'a') start = time.time() add_num = 0 all_new_pic_name = get_all_new_face() for feature_str, person_name in all_new_pic_name: face_feature = np.reshape(msgpack_numpy.loads(base64.b64decode(feature_str)), (1, self.feature_dim)) self.add_one_pic(face_feature, person_name) add_num += 1 if add_num > 0: end = time.time() current_time = get_current_time() log_file.write('\t'.join(map(str, [current_time, 'add_pic_num :', add_num, 'Dynamic_increase_time :', (end - start)])) + '\n') log_file.close() else: log_file.close()
def receive(self, flags=0): topic, raw_data = self.subscriber.recv_multipart(flags=flags) return topic.decode(), loads(raw_data, encoding='utf-8')
"face_img_str": face_img_str, "image_id": image_id, } requestPOST = urllib2.Request(data=urllib.urlencode(request), url="http://10.160.164.26:%d/" % check_port) requestPOST.get_method = lambda: "POST" try: s = urllib2.urlopen(requestPOST).read() except urllib2.HTTPError, e: print e.code except urllib2.URLError, e: print str(e) try: pose_predict = json.loads(s)["pose_predict"] if not pose_predict: # 加载失败 print image_id, 'pose filter' return False else: pose_predict = msgpack_numpy.loads(base64.b64decode(pose_predict)) print pose_predict return True except: traceback.print_exc() return False if __name__ == '__main__': face_img = cv2.imread('xiejunping1468293619.94.png_face_0.jpg') valid_one_pic_pose(face_img, 'test')
} requestPOST = urllib2.Request( data=urllib.urlencode(request), url="http://10.160.164.26:%d/" % check_port ) requestPOST.get_method = lambda: "POST" try: s = urllib2.urlopen(requestPOST).read() except urllib2.HTTPError, e: print e.code except urllib2.URLError, e: print str(e) try: pose_predict = json.loads(s)["pose_predict"] if not pose_predict: # 加载失败 print image_id, 'pose filter' return False else: pose_predict = msgpack_numpy.loads(base64.b64decode(pose_predict)) print pose_predict return True except: traceback.print_exc() return False if __name__ == '__main__': face_img = cv2.imread('xiejunping1468293619.94.png_face_0.jpg') valid_one_pic_pose(face_img, 'test')