示例#1
0
 def analyse_people(self, filePath):
     client = AipBodyAnalysis(self.APP_ID, self.API_KEY, self.SECRET_KEY)
     """ 读取图片 """
     with open(filePath, 'rb') as fp:
         data = client.bodyAttr(fp.read())
         data = str(data)
         res = re.findall(r"'(.*?)': {'score': (0.\d+), 'name': '(.*?)'}", data, re.S)
         del res[0]
         for r in res:
             print("{:—<20}特征:{:—<15}精准度:{}".format(r[0],r[2], r[1]))
         """ 如果有可选参数 """
示例#2
0
    def detection(self):
        clients = AipBodyAnalysis(self.APP_ID, self.API_KEY, self.SECRET_KEY)

        image = self.get_file_content('test.jpg')
        options = {}
        # """ 带参数调用人体检测与属性识别 """
        res = clients.bodyAttr(image, options)
        data = res['person_info'][0]['attributes']
        news = {}
        for k, v in data.items():
            news[k] = v['name']
        pprint(news)
示例#3
0
class tylt_AipBodyAnalysis:
    def __init__(self):
        self.APP_ID = '14372361'
        self.API_KEY = 'rwIh36pOcyqV8S9rQ6BBQlxh'
        self.SERECT_KEY = 'NiHoVNuAwK1RoBRypCKo6OPQrdopWU2Y'
        self.client = AipBodyAnalysis(self.APP_ID, self.API_KEY,
                                      self.SERECT_KEY)

    def body_attr(self, image):
        return self.client.bodyAttr(image)

    @staticmethod
    def dealwith_body_attr_result(person_info_list):
        """
        处理人体属性信息并返回
        :param person_info_list: 人体属性信息列表
        :return:
        """
        response_list = []
        for person in person_info_list:
            attributes = person['attributes']
            location = {
                'left': person['location']['left'],
                'top': person['location']['top'],
                'width': person['location']['width'],
                'height': person['location']['height'],
            }
            person_info_dict = {
                'upper_wear_fg': attributes['upper_wear_fg']['name'],
                'cellphone': attributes['cellphone']['name'],
                'lower_cut': attributes['lower_cut']['name'],
                'umbrella': attributes['umbrella']['name'],
                'orientation': attributes['orientation']['name'],
                'headwear': attributes['headwear']['name'],
                'gender': attributes['gender']['name'],
                'age': attributes['age']['name'],
                'upper_cut': attributes['upper_cut']['name'],
                'glasses': attributes['glasses']['name'],
                'lower_color': attributes['lower_color']['name'],
                'bag': attributes['bag']['name'],
                'upper_wear_texture': attributes['upper_wear_texture']['name'],
                'smoke': attributes['smoke']['name'],
                'vehicle': attributes['vehicle']['name'],
                'lower_wear': attributes['lower_wear']['name'],
                'carrying_item': attributes['carrying_item']['name'],
                'upper_wear': attributes['upper_wear']['name'],
                'upper_color': attributes['upper_color']['name'],
                'occlusion': attributes['occlusion']['name'],
                'location': location,
            }
            response_list.append(person_info_dict)

        return response_list
示例#4
0
        if(i==1):
            """ 调用人体关键点识别 """
            j=client.bodyAnalysis(image)
            """decode(’utf-8’)表示将utf-8编码的字符串转换成unicode编码。
            encode(‘gb2312’),表示将unicode编码的字符串转换成gb2312编码。"""
            print(json.dumps(j,sort_keys=True,indent=4,separators=(',',':')))
            print_div(a,j)

        if(i==2):
            """ 调用人体属性识别
            client.bodyAttr(image);"""
            """ 如果有可选参数 """
            options = {}
            options["type"] = "gender,age"
            """ 带参数调用人体属性识别 """
            j=client.bodyAttr(image, options)
            d=json.dumps(j,sort_keys=True,indent=4,separators=(',',':'),ensure_ascii=False)
            print(d)

        if(i==3):
            """ 调用人流量统计 
            client.bodyNum(image);"""
            """ 如果有可选参数 """
            options = {}
            options["area"] = "" #x1,y1,x2,y2,x3,y3...xn,yn
            options["show"] = "false"

            """ 带参数调用人流量统计 """
            j=client.bodyNum(image)
            print(json.dumps(j,sort_keys=True,indent=4,separators=(',',':'),ensure_ascii=False))
示例#5
0
def check_image():
    image = get_file_content('test1.jpg')

    # ID setup
    APP_ID = '19378067'
    API_KEY = 'DAYZeonKGtfLoPXA3UFivyl7'
    SECRET_KEY = 'mGt3Lomozy1G5tueq4Pu1kkQ4xwFhdC'

    client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
    """ 调用人体关键点识别 """
    keyword = client.bodyAttr(image)
    print(keyword)
    """ 打印json格式 """
    print
    json.dumps(keyword, sort_keys=True, indent=2)

    print("person_num:%d" % (keyword['person_num']))

    draw_image = cv2.imread('test1.jpg')

    for i in range(0, keyword['person_num']):
        # 识别人体的位置(x,y)
        x1 = keyword['person_info'][i]['location']['left']
        y1 = keyword['person_info'][i]['location']['top']

        x2 = keyword['person_info'][i]['location']['width'] + x1
        y2 = keyword['person_info'][i]['location']['height'] + y1

        # 点位取整
        round(x1)
        round(x2)
        round(y1)
        round(y2)

        # 打开图片并绘画出来人体框图
        print("draw_image")

        print("( %d , %d )" % (x1, y1))
        print("( %d , %d )" % (x2, y2))

        # 画出矩形
        cv2.rectangle(draw_image, (int(x1), int(y1)), (int(x2), int(y2)),
                      (0, 255, 0), 4)

        face_mask = keyword['person_info'][i]['attributes']['face_mask'][
            'name']
        mask1 = '无口罩'
        mask2 = '戴口罩'
        mask3 = '不确定'

        if face_mask == mask1:
            mask = "No mask"
        if face_mask == mask2:
            mask = "wear mask"
        if face_mask == mask3:
            mask = "uncertain"

        print(face_mask)
        person_name = 'person' + str(i) + mask
        print(person_name)

        # 标注文本
        cv2.putText(draw_image, person_name, (int(x1), int(y1)),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)

    while (1):
        cv2.imshow('person_detect', draw_image)
        if cv2.waitKey(1) & 0xFF == ord('w'):
            cv2.imwrite("test1.jpg", draw_image)
            break
        if cv2.getWindowProperty('person_detect', cv2.WND_PROP_AUTOSIZE) < 1:
            break
    cv2.destroyAllWindows()
示例#6
0
API_KEY = 'TxGkNH961xrF41oxeP86pdKr'
SECRET_KEY = 'IKAacd4z9CoPp06RaWlwgYqe3g5TfelN'

client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)


# 读取图片
def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


image = get_file_content('example.jpg')

# 可选参数
options = {}
options['type'] = 'gender,age,headwear,glasses,orientation'

# 调用人体属性识别
t1 = time.time()
details = client.bodyAttr(image, options)
# print(details['person_info'][0]['attributes']['gender']['name'])
# print(details['person_info'][0]['attributes']['glasses']['name'])
# print(details['person_info'][0]['attributes']['age']['name'])
# print(details['person_info'][0]['attributes']['headwear']['name'])
# print(details['person_info'][0]['attributes']['orientation']['name'])
for i in ['gender', 'glasses', 'age', 'headwear', 'orientation']:
    print(details['person_info'][0]['attributes'][i]['name'])
t2 = time.time()
print(t2 - t1)
示例#7
0
def chack_image():

    image = get_file_content('test1.jpg')

    #ID setup
    APP_ID = '19103828'
    API_KEY = 'vxYkTAqGYoWe4XRonMUeCvtH'
    SECRET_KEY = 'etnQGZksMjPFU7GcGUIwtVOL7MBUK0Pa'

    client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
    """ 调用人体关键点识别 """
    keyword = client.bodyAttr(image)
    """ 打印json格式 """
    print(json.dumps(keyword, sort_keys=True, indent=2))

    print("person_num:%d" % (keyword['person_num']))

    draw_image = cv2.imread('test1.jpg')

    for i in range(0, keyword['person_num']):
        #识别人体的位置(x,y)
        x1 = keyword['person_info'][i]['location']['left']
        y1 = keyword['person_info'][i]['location']['top']

        x2 = keyword['person_info'][i]['location']['width'] + x1
        y2 = keyword['person_info'][i]['location']['height'] + y1

        #点位取整
        round(x1)
        round(x2)
        round(y1)
        round(y2)

        #打开图片并绘画出来人体框图
        print("draw_image")

        print("( %d , %d )" % (x1, y1))
        print("( %d , %d )" % (x2, y2))

        #画出矩形
        cv2.rectangle(draw_image, (int(x1), int(y1)), (int(x2), int(y2)),
                      (0, 255, 0), 4)

        face_mask = keyword['person_info'][i]['attributes']['face_mask'][
            'name']
        face_mask = face_mask.encode('utf-8').decode('utf-8')
        mask1 = '无口罩'
        mask1.encode('utf-8').decode('utf-8')
        mask2 = '戴口罩'
        mask2.encode('utf-8').decode('utf-8')
        mask3 = '不确定'
        mask3.encode('utf-8').decode('utf-8')

        if face_mask == mask1:
            mask = "No mask"
        if face_mask == mask2:
            mask = "wear mask"
        if face_mask == mask3:
            mask = "uncertain"

        print(face_mask)
        person_name = str(i) + mask

        #标注文本
        cv2.putText(draw_image, person_name, (int(x1), int(y1)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)

    while (1):
        # cv2.imshow('porson_detect',draw_image) #打开窗口显示图片
        # if cv2.waitKey(1) & 0xFF == ord('w'): # 当按下w时保存图片并退出显示窗口
        cv2.imwrite("test1.jpg", draw_image)
        break
    cv2.destroyAllWindows()
示例#8
0
    def detection(self):
        self.start_detect = False
        encode_image = self.image_encode()
        print (type(encode_image))
        image = cv2.imread(self.path_to_save_image)
        #初始化aipfacce对象
        client = AipBodyAnalysis(self.APP_ID, self.API_KEY, self.SECRET_KEY)
        #接收返回的检测结果
        result = client.bodyAttr(encode_image)
        print (result)
        self.rec_count += 1
        num = result["person_num"]
        if result["person_num"] == 0:
            rospy.loginfo("Start Turning robot")
            self.turn_robot(1.5)
            self.angle_count += 1.5
            self.take_photo_signal = True

        if result["person_num"] > 1:
            for i in range(num):
                person_info = result["person_info"][i]
                point1 = (person_info["location"]["left"], person_info["location"]["top"])
                point2 = (person_info["location"]["left"]+person_info["location"]["width"], person_info["location"]["top"]+person_info["location"]["height"])
                cv2.rectangle(image, point1, point2, (0,0,255), 2)
            cv2.imwrite(self.path_to_save_result, image)
            rospy.loginfo("Start Turning robot")
            self.turn_robot(1.5)
            self.angle_count += 1.5
            self.take_photo_signal = True

        if result["person_num"] == 1:
            person_info = result["person_info"][0]
            person_x = person_info["location"]["left"] + person_info["location"]["width"]/2
            person_y = person_info["location"]["top"] + person_info["location"]["height"]/2
            point1 = (person_info["location"]["left"], person_info["location"]["top"])
            point2 = (person_info["location"]["left"]+person_info["location"]["width"], person_info["location"]["top"]+person_info["location"]["height"])
            
            cv2.rectangle(image, point1, point2, (0,0,255), 2)
            cv2.imwrite(self.path_to_save_result, image)
            
            if person_x-320 > 80:
                self.turn_robot(-1.1)
            if person_x -320 < -80:
                self.turn_robot(1.1)
            person_area = person_info["location"]["width"] * person_info["location"]["height"]
            print ("person area {}".format(person_area))
            print ("person width {}".format(person_info["location"]["width"]))
            print ("person height {}".format(person_info["location"]["height"]))
            if (person_area >= 13000):
                self.forward_robot(0.5)
                rospy.sleep(2)
                self.find_person = True
                rospy.loginfo("Person Target Reached!") 
                msg = String()
                msg.data = "Person Target Reached!"
                self.pub_talkback.publish(msg)
        if self.rec_count ==8:
           self.find_person = True
	   rospy.loginfo("Person Not Found") 
	   msg = String()
	   msg.data = "Person Not Found!"
	   self.pub_talkback.publish(msg)
    def detection(self):
        self.start_detect = False
        encode_image = self.image_encode()
        print(type(encode_image))
        image = cv2.imread(self.path_to_save_image)
        #初始化aipfacce对象
        client = AipBodyAnalysis(self.APP_ID, self.API_KEY, self.SECRET_KEY)
        #接收返回的检测结果
        result = client.bodyAttr(encode_image)
        print(result)
        num = result["person_num"]
        if result["person_num"] == 0:
            rospy.loginfo("Start Turning robot")
            self.turn_robot(1.5)
            self.angle_count += 1.5
            self.take_photo_signal = True

        if result["person_num"] > 1:
            self.pub_speech.publish(
                "I can see more than one person, I have found the target person"
            )
            self.pub_speech.publish("Please stand in front of me and lead me")
            for i in range(num):
                person_info = result["person_info"][i]
                point1 = (person_info["location"]["left"],
                          person_info["location"]["top"])
                point2 = (person_info["location"]["left"] +
                          person_info["location"]["width"],
                          person_info["location"]["top"] +
                          person_info["location"]["height"])
                cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
            cv2.imwrite(self.path_to_save_result, image)
            self.find_person = True
            msg = Result()
            msg.mission_type = "person"
            msg.result = "success"
            self.pub_control.publish(msg)

        if result["person_num"] == 1:
            person_info = result["person_info"][0]
            person_x = person_info["location"][
                "left"] + person_info["location"]["width"] / 2
            person_y = person_info["location"][
                "top"] + person_info["location"]["height"] / 2
            point1 = (person_info["location"]["left"],
                      person_info["location"]["top"])
            point2 = (person_info["location"]["left"] +
                      person_info["location"]["width"],
                      person_info["location"]["top"] +
                      person_info["location"]["height"])

            cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
            cv2.imwrite(self.path_to_save_result, image)

            if person_x - 320 > 80:
                self.turn_robot(-1.1)
            if person_x - 320 < -80:
                self.turn_robot(1.1)
            person_area = person_info["location"]["width"] * person_info[
                "location"]["height"]
            print("person area {}".format(person_area))
            print("person width {}".format(person_info["location"]["width"]))
            print("person height {}".format(person_info["location"]["height"]))
            if (person_area >= 13000):
                self.forward_robot(0.5)
                rospy.sleep(2)
                self.find_person = True
                rospy.loginfo("Person Target Reached!")
                control_msg = Result()
                speech_msg = String()
                control_msg.mission_type = "person"
                control_msg.result = "success"
                speech_msg.data = "I have reached the target person"
                self.pub_speech.publish(speech_msg)
                self.pub_control.publish(control_msg)
示例#10
0
class Guest_Recognition():
    def __init__(self):

        rospy.init_node('receptionist_image')
        # rospy.on_shutdown(self.cleanup)
        self.rate = rospy.Rate(1)

        APP_ID = '18721308'
        API_KEY = 'lNQGdBNazTPv8LpSP4x0GQlI'
        SECRET_KEY = 'nW8grONY777n4I2KvpOVuKGDNiY03omI'
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.bridge = CvBridge()
        self.image_type = "BASE64"
        self.filepath = "/home/lurejewel/catkin_ws/src/receptionist_image/receptionist_image_core/pictures/image.jpg"
        # self.filepath = "/home/tianyili/robocup/src/image.jpg"

        self.FindGuest = 1
        self.FindJohn = 2
        self.FindSofa = 3

        self.guest = Guest()

        self.john = user()
        self.john.user_id = 'john'
        self.john.facenum = 0

        self.guest1 = user()
        self.guest1.user_id = 'guest1'
        self.guest1.facenum = 0
        self.groupIdList = 'reception_group'

        self.options_body = {}
        self.options_body["type"] = "gender,age"
        self.options_face = {}
        self.options_face["face_field"] = "age,gender"
        self.options_face["max_face_num"] = 3
        self.options_face["face_type"] = "LIVE"
        #
        self.boxes = BoundingBoxes()
        self.human_boxes = BoundingBoxes()
        self.sofa = BoundingBox()
        self.seat = RegionOfInterest()
        # 工作状态
        self.workstate = 0
        #发布器
        self.control_pub = rospy.Publisher("/control", reception, queue_size=1)
        self.roi_pub = rospy.Publisher('/image/roi',
                                       RegionOfInterest,
                                       queue_size=1)  # turn robot
        self.seat_pos = rospy.Publisher('/image/found_seat_pos',
                                        RegionOfInterest,
                                        queue_size=1)  # turn robot
        self.seat_pub = rospy.Publisher('/image/found_seat',
                                        Bool,
                                        queue_size=1)  # to arm
        self.introduce = rospy.Publisher('/image/found_john',
                                         Bool,
                                         queue_size=1)  # turn robot
        #订阅器
        self.control_sub = rospy.Subscriber("/control",
                                            reception,
                                            self.controlCallback,
                                            queue_size=1)
        self.sofa_sub = rospy.Subscriber("/darknet_ros/bounding_boxes",
                                         BoundingBoxes,
                                         self.sofaCallback,
                                         queue_size=1)
        self.img_sub = rospy.Subscriber("/usb_cam/image_raw",
                                        Image,
                                        self.imgCallback,
                                        queue_size=1)

        self.time = time.time()
        self.ROI = RegionOfInterest()

    def controlCallback(self, msg):

        if msg.NowTask == msg.GuestRecognition and msg.FinishState == False:
            print("===Start to find guest===")
            self.workstate = self.FindGuest

        elif msg.NowTask == msg.Introducing and msg.FinishState == False:
            print("===Start to find John===")
            self.workstate = self.FindJohn
            # self.can_speak(self.groupIdList)

        elif msg.NowTask == msg.Serving and msg.FinishState == False:
            print("===Start to find Sofa===")
            self.workstate = self.FindSofa

        else:
            # 其他情况不进行工作
            # TODO: DoorDetection
            self.workstate = 0

    # 沙发检测
    def sofaCallback(self, msg):

        if self.workstate != self.FindSofa:
            return

        self.boxes = msg.bounding_boxes
        self.sofa = None
        self.human_boxes = []
        t = time.time()
        # 检查视野目标
        for box in self.boxes:
            if box.Class == 'bed':
                self.sofa = box
                break
        for box in self.boxes:
            if box.Class == 'person':
                self.human_boxes.append(box)
        # 判断空位置
        if self.sofa is not None:
            sofa_width = box.xmax - box.xmin
            hum_in_sofa = []
            if len(self.human_boxes) != 0:
                # 计算一个人的平均占据宽度
                hum_width = 0.0
                flag = 0
                for human in self.human_boxes:
                    # 人在沙发位置
                    if (human.xmax < self.sofa.xmax) and (human.xmin >
                                                          self.sofa.xmin):
                        flag += 1
                        hum_width += human.xmax - human.xmin
                        hum_in_sofa.append(human)
                # 判断情况
                if flag == 0:
                    # 没有人——指向沙发
                    self.seat.x_offset = self.sofa.xmin
                    self.seat.width = self.sofa.xmax - self.sofa.xmin
                    self.seat.y_offset = self.sofa.ymin
                    self.seat.height = self.sofa.ymax - self.sofa.ymin
                    if t - self.time > 3:
                        self.time = t
                        self.seat_pos.publish(self.seat)
                elif flag == 1:
                    # 只有一个人
                    dis_R = self.sofa.xmax - hum_in_sofa[0].xmax
                    dis_L = hum_in_sofa[0].xmin - self.sofa.xmin
                    if dis_R >= dis_L:
                        self.seat.x_offset = hum_in_sofa[0].xmax
                        self.seat.width = self.sofa.xmax - hum_in_sofa[0].xmax
                        self.seat.y_offset = self.sofa.ymin
                        self.seat.height = self.sofa.ymax - self.sofa.ymin
                        if t - self.time > 3:
                            self.time = t
                            self.seat_pos.publish(self.seat)
                    else:
                        self.seat.x_offset = self.sofa.xmin
                        self.seat.width = hum_in_sofa[0].xmin - self.sofa.xmin
                        self.seat.y_offset = self.sofa.ymin
                        self.seat.height = self.sofa.ymax - self.sofa.ymin
                        if t - self.time > 3:
                            self.time = t
                            self.seat_pos.publish(self.seat)
                else:
                    # 为人体数据排序——只需要按照一个边
                    hum_width = hum_width / flag
                    for i in range(len(hum_in_sofa)):
                        for j in range(0, len(hum_in_sofa) - 1):
                            if hum_in_sofa[j].xmax > hum_in_sofa[j + 1].xmax:
                                data = copy.deepcopy(hum_in_sofa[j + 1])
                                hum_in_sofa[j + 1] = hum_in_sofa[j]
                                hum_in_sofa[j] = data
                    # 检查每一个人的纵向差异
                    STATE = False
                    for i in range(len(hum_in_sofa - 1)):
                        if hum_in_sofa[i].xmax - hum_in_sofa[
                                i + 1].xmin < hum_width:
                            continue
                        else:
                            self.seat.x_offset = hum_in_sofa[i].xmax
                            self.seat.width = hum_in_sofa[i + 1].xmin
                            self.seat.y_offset = hum_in_sofa[i].ymin
                            self.seat.height = hum_in_sofa[i].ymax
                            if t - self.time > 3:
                                self.time = t
                                self.seat_pos.publish(self.seat)
                            STATE = True
                    # 没有空余位置
                    if STATE == False:
                        self.seat.x_offset = 0
                        self.seat.width = 0
                        self.seat.y_offset = 0
                        self.seat.height = 0
                        if t - self.time > 3:
                            self.time = t
                            self.seat_pos.publish(self.seat)
            else:
                self.seat.x_offset = self.sofa.xmin
                self.seat.width = self.sofa.xmax - self.sofa.xmin
                self.seat.y_offset = self.sofa.ymin
                self.seat.height = self.sofa.ymax - self.sofa.ymin
                if t - self.time > 3:
                    self.time = t
                    self.seat_pos.publish(self.seat)
        else:
            self.seat.x_offset = 0
            self.seat.width = 0
            self.seat.y_offset = 0
            self.seat.height = 0
            if t - self.time > 3:
                self.time = t
                self.seat_pos.publish(self.seat)
                print(self.seat.width / 2 + self.seat.x_offset)
        if 300 < self.seat.width / 2 + self.seat.x_offset < 340:
            data = Bool()
            data.data = True
            self.seat_pub.publish(data)
        else:
            data = Bool()
            data.data = False
            self.seat_pub.publish(data)

    # 人体数据转换
    def msgtobody(self, image_msg):

        # 转为base64存储
        cv2.imwrite(self.filepath, image_msg)
        with open(self.filepath, 'rb') as fp:
            return fp.read()

    # 打开文件
    def fileopen(self, filepath):

        with open(filepath, 'rb') as fp:
            imgjson = base64.b64encode(fp.read())
            data = str(imgjson).encode('utf-8')
            return data

    # 判断人脸已知
    def judger(self, result):

        if result['error_code'] == 0:
            result_sc = result['result']['user_list'][0]['score']
            # print(result_sc,type(result_sc))
            if result_sc > 80:
                # return result['result']['user_list'][0]['user_id']
                return True
            else:
                return False
        else:
            return False

    # 人脸库搜索   groupIdList="用户组名称"
    def face_search(self, filepath, groupIdList='reception_group'):

        image = self.fileopen(filepath)
        imageType = "BASE64"
        result = self.client_face.search(image, imageType, groupIdList)
        print(result)  # 打印出所有信息
        print(result['result']['user_list'][0]['user_id'])
        return self.judger(result)
        # if result['error_code'] == 0:
        #     judger(result)
        # elif result['result'] == None:
        #     return False

    def imgCallback(self, image_msg):

        if self.workstate == self.FindGuest:
            try:
                cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
            except CvBridgeError as e:
                print(e)
            self.find_people(cv_image)

        if self.workstate == self.FindJohn:
            try:
                cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
            except CvBridgeError as e:
                print(e)
            self.can_speak(cv_image, self.groupIdList)

    """寻找人体并返回未知人员ROI"""

    def find_people(self, image):

        segment_data = self.msgtobody(image)
        result = self.client_body.bodyAttr(segment_data, self.options_body)
        if 'person_num' in result:
            person_num = result['person_num']
            # self.objpos_pub.publish()
        else:
            person_num = 0
        print("The number of person is " + str(person_num))
        if person_num == 0:
            self.ROI.x_offset = 0
            self.ROI.y_offset = 0
            self.ROI.width = 0
            self.ROI.height = 0
            if t - self.time > 3:
                self.time = t
                self.roi_pub.publish(self.ROI)
        for num in range(0, int(person_num)):
            # 不认识的人则发布信息ROI
            cv2.imwrite(self.filepath, image)
            if self.face_search(self.filepath) == False:
                location = result['person_info'][num]['location']
                print(location)
                # print('upper_color:',upper_color,' upper_wear_fg:',upper_wear_fg)
                self.ROI.x_offset = int(location['left'])
                self.ROI.y_offset = int(location['top'])
                self.ROI.width = int(location['width'])
                self.ROI.height = int(location['height'])
                if t - self.time > 3:
                    self.time = t
                    self.roi_pub.publish(self.ROI)
                    print(self.ROI)
                    msg = reception()
                    msg.NowTask = msg.GuestRecognition
                    msg.NextTask = msg.Requesting
                    msg.FinishState = True
                    self.control_pub.publish(msg)

    # 检查是否是被介绍者
    """判断是否为被介绍者"""

    def can_speak(self, image, groupIdList='reception_group'):

        # result = self.face_search(filepath,groupIdList)
        #     # 是已知人员可介绍
        # if result['score']>80:
        #     self.introduce.publish(True)
        cv2.imwrite(self.filepath, image)
        result = self.face_search(self.filepath, groupIdList)
        # 是已知人员可介绍
        # if result:

        if t - self.time > 3:
            self.time = t
            self.introduce.publish(result)
            print('result:', result)

    def face_add(self, filepath, image_msg, groupid, userid):  # 人脸库增加 地址 组 用户

        image = self.fileopen(filepath)
        imageType = "BASE64"
        result = self.client.addUser(image, imageType, groupid, userid)
        if result['error_code'] == 0:
            print("增加人脸成功")
        else:
            print("增加人脸失败")
            print(result)

    def get_user_facenum(self, userid):

        if userid == 'john':
            return self.john
        else:
            return self.guest1

    def add_user(self, file_name, image_msg):

        cv2.imwrite(file_name, image_msg)
        search_result = self.face_search(file_name, 'reception_group')
        curusr = user()

        # 加入新人脸
        if search_result['error_code'] == 222207:
            userid = 'john'
            self.john.facenum += 1
            curusr = self.john

        elif search_result['error_code'] == 0:
            if search_result['result']['user_list'][0]['score'] < 80:
                # ---------------------------------------
                num = len(search_result['result']['user_list'][0])
                if num == 0:
                    userid = 'john'
                    self.john.facenum += 1
                    curusr = self.john
                elif num == 1:
                    userid = 'guest1'
                    self.guest1.facenum += 1
                    curusr = self.guest1

            # 旧人脸
            else:
                userid = search_result['result']['user_list'][0]['user_id']
                curusr = self.get_user_facenum(userid)
        # 加入人脸张数信息 未达到上限20则持续添加,否则不作处理
        if curusr.facenum == 0 and curusr.user_id:
            self.face_add(file_name, image_msg, 'reception_group',
                          curusr.user_id)
            print("curusr --------------------------")
            # print(curusr.user_id , curusr.facenum)
        else:
            print('wrong')
示例#11
0
import os
from aip import AipBodyAnalysis
""" 你的 APPID AK SK """
APP_ID = '11741060'
API_KEY = 'uXS3628lLLRGVGYNnqzNjksc'
SECRET_KEY = 'EU4Pykb3RbQllrulScrGizb6IYsmZcYy'

client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """
filename = '2.jpg'
filepath = os.path.abspath('.') + '\\' + filename

with open(filepath, 'rb') as f:  # TODO
    img = f.read()
""" 百度手势识别SDK未开放 """
r = client.bodyAttr(img)  # 人流量统计
print(r)
    def detection(self):
        self.start_detect = False
        encode_image = self.image_encode()
        print(type(encode_image))
        image = cv2.imread(self.path_to_save_image)
        #初始化aipfacce对象
        client = AipBodyAnalysis(self.APP_ID, self.API_KEY, self.SECRET_KEY)
        #接收返回的检测结果
        result = client.bodyAttr(encode_image)
        #print (result)
        num = result["person_num"]
        area = []
        point = []
        person_x = 0
        height = 0
        try:
            if result["person_num"] == 0:
                print("person_num =", result["person_num"])
                rospy.loginfo("Start Turning robot")
                self.turn_robot(0.7)
                self.angle_count += 0.7
                #self.find_person = False
                #self.take_photo_signal = True

            if result["person_num"] > 1:
                print("person_num =", result["person_num"])
                self.speech_pub.publish(
                    "I can see more than one person, I have found the target person"
                )
                self.speech_pub.publish(
                    "Please stand in front of me and lead me")
                for i in range(num):
                    person_info = result["person_info"][i]
                    point1 = (person_info["location"]["left"],
                              person_info["location"]["top"])
                    point2 = (person_info["location"]["left"] +
                              person_info["location"]["width"],
                              person_info["location"]["top"] +
                              person_info["location"]["height"])
                    cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
                    tmpPoint = (point1[0] + point2[0]) / 2
                    tmpArea = (point1[0] - point2[0]) * (point1[1] - point2[1])
                    point.append(tmpPoint)
                    area.append(tmpArea)
                tmpArea = area[0]
                person_x = point[0]
                for i in range(len(area)):
                    if tmpArea < area[i]:
                        person_x = point[i]
                        tmpArea = area[i]
                        height = result["person_info"][i]["location"]["top"]
                cv2.imwrite(self.path_to_save_result, image)
                #self.find_person = True
                #self.find_person = True
                #msg = String()
                #msg.data = "person_target_found"
                #self.pub_result.publish(msg)

            if result["person_num"] == 1:
                print("person_num =", result["person_num"])
                person_info = result["person_info"][0]
                person_x = person_info["location"][
                    "left"] + person_info["location"]["width"] / 2
                person_y = person_info["location"][
                    "top"] + person_info["location"]["height"] / 2
                point1 = (person_info["location"]["left"],
                          person_info["location"]["top"])
                point2 = (person_info["location"]["left"] +
                          person_info["location"]["width"],
                          person_info["location"]["top"] +
                          person_info["location"]["height"])
                height = person_info["location"]["top"]
                cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
                cv2.imwrite(self.path_to_save_result, image)
                #self.find_person = True
        except KeyError:
            print("NO PERSON DETECTED")
            pass

        print("========================")
        print("area =", area)
        print("point =", point)
        print("person_x =", person_x)
        print("height =", height)
        print("========================")
        if person_x - 320 > 80:
            self.turn_robot(-0.5)
            print("turn right")

        elif person_x - 320 < -80:
            self.turn_robot(0.5)
            print("turn left")

        else:
            print("Start going forward")
            self.is_stop_turn = True
            self.find_person = True
            '''
            while self.is_go_forward:
                print("go_forward")
                self.forward_robot(0.45)
            self.find_person = True
            rospy.loginfo("Person Target Reached!")
            control_msg = String()
            speech_msg = String()
            control_msg.data = "person_target_found"
            speech_msg.data = "I have reached the target person"
            self.speech_pub.publish(speech_msg)
            self.pub_result.publish(control_msg)
            self.is_stop_turn = False
            '''
            '''
示例#13
0
class Guest_Recognition:
    def __init__(self):
        APP_ID = '18721308'
        API_KEY = 'lNQGdBNazTPv8LpSP4x0GQlI'
        SECRET_KEY = 'nW8grONY777n4I2KvpOVuKGDNiY03omI'
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.image_type = "BASE64"
        msg = reception_image()
        self.guest = Guest()
        # self.name = None
        # self.age = None
        # self.image = None
        # self.gender = None
        # self.drink = None

        self.options_body = {}
        self.options_body["type"] = "gender,upper_color,upper_wear_fg"
        self.options_face = {}
        self.options_face["face_field"] = "age,gender"
        self.options_face["max_face_num"] = 3
        self.options_face["face_type"] = "LIVE"

        #ros params
        self.sub_image_raw_topic_name = None
        self.sub_destination_topic_name = None
        self.pub_gender_recognition_topic_name = None
        self.pub_object_position2d = None
        # self.depth_img                          = np.array((480,640))
        self.get_params()

        #发布器
        self.objpos_pub = rospy.Publisher('/image/object_position2d',
                                          String,
                                          queue_size=1)

        #订阅器
        self.control_sub = rospy.Subscriber("/control", reception,
                                            self.controlCallback)
        self.speech_sub = rospy.Subscriber('/speech/check_door', String,
                                           self.find_people)

    # --------------
    def get_params(self):
        self.sub_image_raw_topic_name = rospy.get_param(
            'sub_image_raw_topic_name', '/camera/rgb/image_raw')
        # self.sub_depth_image_topic_name        = rospy.get_param('sub_depth_image_topic_name',        '/camera/depth/image_raw')
        self.pub_gender_recognition_topic_name = rospy.get_param(
            'pub_gender_recognition_topic_name',
            '/kamerider_image/gender_recognition')
        self.objpos_pub = rospy.get_param('objpos_pub',
                                          '/image/object_position2d')
        #定义R发布器和订阅器,话题名通过ROS PARAM参数服务器获取
        rospy.Subscriber(self.sub_image_raw_topic_name, Image,
                         self.imageCallback)
        # rospy.Subscriber(self.sub_depth_image_topic_name, Image, self.depthCallback)

        self.pub_result = rospy.Publisher(
            self.pub_gender_recognition_topic_name, String, queue_size=1)
        # self.pub_person_pos = rospy.Publisher(self.pub_person_pos_topic_name, Pose,queue_size=1)
        self.objpos_pub = rospy.Publisher('/image/object_position2d',
                                          String,
                                          queue_size=1)

    def depthCallback(self, msg):
        bridge = CvBridge()
        try:
            cv_image = bridge.imgmsg_to_cv2(msg, '16UC1')
            self.depth_img = cv_image
        except CvBridgeError as e:
            print(e)


# -------------------------------------

# 人体数据转换

    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(file_name, image_msg)
        with open(file_name, 'rb') as fp:
            return fp.read()

    # 人脸库搜索   groupIdList="用户组名称"
    def face_search(filepath, groupIdList):
        image = fileopen(filepath)
        imageType = "BASE64"
        result = client.search(image, imageType, groupIdList)
        # print(result)  # 打印出所有信息
        print(result['result']['user_list'][0]['user_id'])

    # 判断人脸是否已知
    def judger(result):
        result_sc = result['result']['user_list'][0]['score']
        if result_sc > 80:
            return result['result']['user_list'][0]['user_id']
        else:
            return False

    """ 调用人脸视频比对人脸库 """

    def face_comparision(window_name='image', camera_idx=0):
        cv2.namedWindow(window_name)
        #视频来源,可以来自一段已存好的视频,也可以直接来自USB摄像头
        cap = cv2.VideoCapture(camera_idx)

        while cap.isOpened():
            ok, frame = cap.read()  #读取一帧数据
            if not ok:
                break
            c = cv2.waitKey(100)  #按q退出
            if c == ord('q'):
                break
            cv2.imshow(window_name, frame)
            base64_data = frame2base64(frame)
            image = str(base64_data, 'utf-8')
            imageType = "BASE64"

            result = client.search(image, imageType, 'test_group')

            if result['error_code'] == 0:
                judger(result)

        cap.release()
        cv2.destroyAllWindows()

    def find_people():
        # msg_nav=msh
        # bridge = CvBridge()
        # try:
        #     cv_image = bridge.imgmsg_to_cv2(msg, 'bgr8')
        # except CvBridgeError as e:
        #     print (e)
        cap = cv2.ViseoCapture(0)
        while cap.isOpened():
            ret, imgraw0 = cap.read()
            if ret:
                if cnt % 10 == 0:
                    imgraw = cv2.resize(imgraw0, (512, 512))
                    cv2.imwrite(filepath, imgraw)
                    with open(filepath, "rb") as fp:
                        segment_data = fp.read()
                    result = client_body.bodyAttr(segment_data, options_body)
                    # print(result)

                    # 解析位置信息
                    if 'person_num' in result:
                        person_num = result['person_num']
                    else:
                        person_num = 0

                    for num in range(0, int(person_num)):
                        print(num)
                        location = result['person_info'][num - 1]['location']
                        print('upper_color:', upper_color, ' upper_wear_fg:',
                              upper_wear_fg)

                        A = (int(location['left']), int(location['top']))
                        B = (int(location['left']) + int(location['width']),
                             int(location['top']))
                        C = (int(location['left']) + int(location['width']),
                             int(location['top']) + int(location['height']))
                        D = (int(location['left']),
                             int(location['top']) + int(location['height']))

                        cv2.waitKey(1000)
                        # self.objpos_pub.publish()
                cnt += 1

            cv2.waitKey(0)

    def imageCallback(self, msg):
        if self.take_photo_signal:
            print("[INFO] Start to take photo")
            bridge = CvBridge()
            self.take_photo_signal = False
            try:
                cv_image = bridge.imgmsg_to_cv2(msg, 'bgr8')
                cv2.imwrite(self.path_to_save_image, cv_image)
            except CvBridgeError as e:
                print(e)
            self.detection()

    # 挥手检测
    # image——输入的cv类型
    # gender——是否检查性别
    def detectWave(self, image, gender=False):
        #if time.time()-self.time < self.tau:
        #     return None
        # self.time = time.time()
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        point_t = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))

            # 存在挥手
            if len(loaction) > 0:
                # 女性检测
                # ----------性别检测----------
                if gender:
                    options = {}
                    options["type"] = "gender"
                    # 保证存在挥手
                    for locate in loaction:
                        img = image[locate[2]:locate[3], locate[0]:locate[1]]
                        img = self.msgtobody(img, "try__.png")
                        result = self.client_body.bodyAttr(img, options)
                        try:
                            result['person_info'][0]['attributes'][
                                'gender'] == "女性"
                        except:
                            continue
                        # 女性则直接返回女性位置
                        if result['person_info'][0]['attributes'][
                                'gender'] == "女性":
                            loc = []
                            loc.append(locate[0])
                            loc.append(locate[1])
                            loc.append(locate[2])
                            loc.append(locate[3])
                            return locate
                # 随机返回一个人
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate

        return None
示例#14
0
class BodyCheck:
    def __init__(self):
        self.time = time.time()
        APP_ID = '18889374'
        API_KEY = 'pUNweNaSK4rWz57vGs9KpuW1'
        SECRET_KEY = 'ru5LqWM0lrcVYBh9cjd32fy951nagqcA'
        self.imageType = "BASE64"
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.bridge = CvBridge()
        ##############人类数据
        self.filepath = "/home/dell/img/"
        self.option_face = {}
        self.option_body = {}
        self.option_face["face_field"] = "age,gender,glasses,race"
        self.option_face["max_face_num"] = 1
        self.option_body["type"] = "upper_wear,upper_color"

        ##############跟踪数据
        self.roi = None

        ##############话题名称
        # 接收器与发布器
        self.sub_image_name = rospy.get_param('~image_raw_topic_name',
                                              '/usb_cam/image_raw')
        # 发布器
        self.pub_pos_name = rospy.get_param('~object_view_topic_name', 'roi')
        self.pub_img_name = rospy.get_param('~image_test_topic_name',
                                            '/image/test')
        self.pub_fet_name = rospy.get_param('~feature_topic_name',
                                            '/image/feature')
        ##############发布器
        self.img_pub = rospy.Publisher(self.pub_img_name, Image)
        self.roi_pub = rospy.Publisher(self.pub_pos_name, RegionOfInterest)
        self.fet_pub = rospy.Publisher(self.pub_fet_name, Description)

        self.img_sub = rospy.Subscriber(self.sub_image_name, Image,
                                        self.imgCallback)
        print("============================================================")

    # 人体数据转换
    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            return fp.read()

    # 人脸数据转换
    def msgtoface(self, image_msg, file_name='image_faces.png'):
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            data = base64.b64encode(fp.read())
            # python2.7
            data = str(data).encode('utf-8')
            return data

    # 挥手检测,返回一个挥手的人的方框xxyy数据
    def detectWave(self, image, gender=False):
        print("============================================================")
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
            if len(loaction) > 0:
                # 返回挥手的随机一个人的位置
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate
        return None

    # 照片的回调函数,发布挥手人的位置
    def imgCallback(self, image):
        #try:
        #cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        #except CvBridgeError as e:
        #print(e)

        cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        position = self.detectWave(cv_image)
        if position is not None:
            msg = Description()
            msg.hair_style = "unknowm"
            msg.pose = "unknowm"
            # 特征检测——人脸和人体
            img_body = cv_image[position[2]:position[3],
                                position[0]:position[1]]
            face = self.msgtoface(img_body)
            result1 = self.client_face.detect(face, self.imageType,
                                              self.option_face)
            print(result1["result"]["face_list"][0]["gender"]["type"])
            print(result1["result"]["face_list"][0]["glasses"]["type"])
            print(result1["result"]["face_list"][0]["race"]["type"])
            print(str(result1["result"]["face_list"][0]["age"]))
            data = result1["result"]["face_list"][0]
            # 性别 + 眼睛 + 肤色 + 年龄
            msg.gender = data["gender"]["type"]
            msg.glasses = data["glasses"]["type"]
            msg.skin_color = data["race"]["type"]
            msg.age = str(data["age"])

            # 颜色 + 服装
            body = self.msgtobody(img_body)
            result2 = self.client_body.bodyAttr(body, self.option_body)
            print(
                result2["person_info"][0]["attributes"]["upper_wear"]["name"])
            data = result2["person_info"][0]["attributes"]
            # 红、橙、黄、绿、蓝、紫、粉、黑、白、灰、棕
            color = data["upper_color"]["name"]
            if color == "红":
                msg.clothes_color = "red"
            elif color == "橙":
                msg.clothes_color = "orange"
            elif color == "黄":
                msg.clothes_color = "yellow"
            elif color == "绿":
                msg.clothes_color = "green"
            elif color == "蓝":
                msg.clothes_color = "blue"
            elif color == "紫":
                msg.clothes_color = "purple"
            elif color == "粉":
                msg.clothes_color = "pink"
            elif color == "黑":
                msg.clothes_color = "black"
            elif color == "白":
                msg.clothes_color = "white"
            elif color == "灰":
                msg.clothes_color = "gray"
            else:
                msg.clothes_color = "brown"

            type_ = data["upper_wear"]["name"]
            if type_ == "长袖":
                msg.clothes = "Coat"
            else:
                msg.clothes = "Short"
            self.fet_pub.publish(msg)

        # 如果存在人
        try:
            cv2.rectangle(cv_image, (position[1], position[3]),
                          (position[0], position[2]), (0, 0, 255))
            roi = RegionOfInterest()
            roi.x_offset = position[0]
            roi.y_offset = position[2]
            roi.width = position[1] - position[0]
            roi.height = position[3] - position[2]
            self.roi = roi
            self.roi_pub.publish(roi)
            print("One Wave!")
        except:
            pass
        cv_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
        self.img_pub.publish(cv_image)
示例#15
0
from aip import AipBodyAnalysis
import matplotlib.pyplot as plt  # plt 用于显示图片
import matplotlib.image as mpimg  # mpimg 用于读取图片
import numpy as np
""" 你的 APPID AK SK """
APP_ID = '22952720'
API_KEY = 'inMk4SrpYUxPmSUGmqGw89Ow'
SECRET_KEY = '7ecoXWQBLwwEjwp83Od0jV5EZM166oG7'

client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """


def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


image = get_file_content('post_img.jpg')
""" 调用人体检测与属性识别 """

resp = client.bodyAttr(image)
# print(resp)
if resp['person_info'] and resp['person_info'][0]:
    attributes = resp['person_info'][0]['attributes']
    print('识别成功: \n衣服:{}\n裤子:{} \n年纪:{} \n动作:{}'.format(
        attributes['upper_wear']['name'], attributes['lower_wear']['name'],
        attributes['age']['name'], attributes['cellphone']['name']))
else:
    print('识别失败')
class BodyCheck:
    def __init__(self):
        self.time = time.time()
        APP_ID = '18889374'
        API_KEY = 'pUNweNaSK4rWz57vGs9KpuW1'
        SECRET_KEY = 'ru5LqWM0lrcVYBh9cjd32fy951nagqcA'
        self.image_type = "BASE64"
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.client_body.setConnectionTimeoutInMillis(2000)
        self.client_body.setSocketTimeoutInMillis(2000)
        self.client_face.setConnectionTimeoutInMillis(2000)
        self.client_face.setSocketTimeoutInMillis(2000)
        self.bridge = CvBridge()
        self.ispub = False
        ##############人类数据
        self.filepath = "/home/qian/catkin_ws/src/fare_src/kamerider_image/kamerider_image_api/imgfile/"
        ##############跟踪数据
        self.roi = None
        self.depth_array = None
        self.target_pos = Pose()

        ##############话题名称
        # 接收器与发布器
        self.check_gender = rospy.get_param('~check_gender', 'False')

        if (type(self.check_gender) == type('text')):
            self.check_gender = False

        ##############发布器
        self.img_pub = rospy.Publisher("/image/test", Image, queue_size=1)
        self.roi_pub = rospy.Publisher("roi", RegionOfInterest, queue_size=1)
        self.img_sub = rospy.Subscriber("/usb_cam/image_raw",
                                        Image,
                                        self.imgCallback,
                                        queue_size=1)
        self.word_pub = rospy.Publisher("/xfwords", String, queue_size=1)
        self.face_pub = rospy.Publisher("/start_recognize_faces",
                                        String,
                                        queue_size=1)

        print("============================================================")

    # 人体数据转换
    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            return fp.read()

    # 人脸数据转换
    def msgtoface(self, image_msg, file_name='image_faces.png'):
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            data = base64.b64encode(fp.read())
            # python2.7
            data = str(data).encode('utf-8')
            return data

    # 挥手检测,返回一个挥手的人的方框xxyy数据
    def detectWave(self, image, gender=False):
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        point_t = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))

            # 存在挥手
            if len(loaction) > 0:
                # 女性检测
                # ----------性别检测----------
                if gender:
                    options = {}
                    options["type"] = "gender"
                    # 保证存在挥手
                    for locate in loaction:
                        img = image[locate[2]:locate[3], locate[0]:locate[1]]
                        img = self.msgtobody(img, "image_face.png")
                        result = self.client_body.bodyAttr(img, options)
                        try:
                            result['person_info'][0]['attributes'][
                                'gender'] == "女性"
                        except:
                            continue
                        # 女性则直接返回女性位置
                        if result['person_info'][0]['attributes'][
                                'gender'] == "女性":
                            loc = []
                            loc.append(locate[0])
                            loc.append(locate[1])
                            loc.append(locate[2])
                            loc.append(locate[3])
                            return locate
                # 随机返回一个人
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate
        return None

    # 照片的回调函数,发布挥手人的位置
    def imgCallback(self, image):
        num = 0
        try:
            cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        except CvBridgeError as e:
            print(e)

    # 如果存在人
        roi = RegionOfInterest()
        try:
            position = self.detectWave(cv_image, self.check_gender)
            cv2.rectangle(cv_image, (position[1], position[3]),
                          (position[0], position[2]), (0, 0, 255))
            roi.x_offset = position[0]
            roi.y_offset = position[2]
            roi.width = position[1] - position[0]
            roi.height = position[3] - position[2]
            self.roi = roi
            self.roi_pub.publish(roi)
            if self.ispub == False:
                stringq = "I can tell one wave. Now I will recognize people. "
                self.word_pub.publish(stringq)
                stringq = "ok "
                self.face_pub.publish(stringq)
                self.ispub = True
            rospy.loginfo("One Wave!")
            num = 1
        except:
            self.roi = roi
        if num == 0:
            self.roi_pub.publish(roi)
        cv_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
        self.img_pub.publish(cv_image)