Ejemplo n.º 1
0
def bdcut(path):
    try:
        pic = Image.open(path)
        if pic.mode == "P": pic = pic.convert('RGB') # 有些图片有P通道,base编码后会出问题
        (wf,hf) = pic.size
        BD_VIP='否'
        BD_AI_client = AipBodyAnalysis('替换appid','替换apikey','替换secrekey')
        with open(path, 'rb') as fp:
                x_nose = int(BD_AI_client.bodyAnalysis(fp.read())["person_info"][0]['body_parts']['nose']['x']) # 返回鼻子横坐标
        if BD_VIP == '否':
                time.sleep(0.4) # 免费用户QPS=2
        else:
                time.sleep( 1/int(1.1*BD_VIP) )
        if x_nose + 1/3*hf > wf: # 判断鼻子在图整体的位置
                x_left = wf-2/3*hf # 以右为边
        elif x_nose - 1/3*hf < 0:
                x_left = 0 # 以左为边
        else:
                x_left = x_nose-1/3*hf # 以鼻子为中线向两边扩展
        fixed_pic = pic.crop((x_left,0,x_left+2/3*hf,hf))
        fixed_pic.save(path.replace('fanart','poster'), quality=95)
        print('[+]百度AI裁剪操作成功')
    except:
        imagecut=1
        print('[-] '+ path +' AI 分析失败,跳过 AI 直接裁剪。\n')
class BaiduAIP(object):
    def __init__(self):
        self.client = AipBodyAnalysis(cfg.APP_ID, cfg.API_KEY, cfg.SECRET_KEY)

    def bodyAnalysis(self, img_jpg):
        etval, buffer = cv2.imencode('.jpg', img_jpg)
        result = self.client.bodyAnalysis(buffer)  # 内部把buffer转换为base64了
        return result
Ejemplo n.º 3
0
def fanHuiTuPianGuanDianX_Y(url):  #从百度AI获取到图片关键点返回值,参数为图片路径
    """ 你的 APPID AK SK """
    APP_ID = '22952436'
    API_KEY = 'a5UrzVfjQHyuK0GSCXk8QoQH'
    SECRET_KEY = '8wguEEmbNTnMfAOOOigMr1cM1SZXvq1c'
    client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)

    def get_file_content(filePath):
        with open(filePath, 'rb') as fp:
            return fp.read()

    image = get_file_content(url)
    return client.bodyAnalysis(image)
Ejemplo n.º 4
0
class BaiDuAPI(object):
    #特殊 构造函数 初始化函数
    def __init__(self, filePath):
        target = configparser.ConfigParser()
        target.read(filePath, encoding='utf-8-sig')

        self.client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)

    """ 读取图片 """

    def get_file_content(self, photoPath):
        with open(photoPath, 'rb') as fp:
            return fp.read()

    """ 主函数 """

    def file_main(self, photoPath):

        #转码
        #base64_data = base64.b64encode(photoPath)

        # add by me
        f = open(photoPath, 'rb')
        img = base64.b64encode(f.read())

        #img = self.get_file_content('{}'.format(photoPath))
        """ 调用人体关键点识别 """
        #此处只能对一个人进行关键点识别
        #也就是说一个图片如果有好多人的话,只能标出一个人的关节特征
        #此处可以做修改,即进行把一张图所有人的关节特征都表达出来
        #------

        print(self.client.bodyAnalysis(img))

        result = self.client.bodyAnalysis(img)['person_info'][0]['body_parts']
        jo = joint.Joint(result)
        jo.xunhun(photoPath)
        print(result)
Ejemplo n.º 5
0
def bodypart(filename):

    #    APP_ID = 'd90755ad2f2047dbabb12ad0adaa0b03'
    #    API_KEY = 'b7b0f4d01f7f4aef9b5453f6558c23b1'
    #    SECRET_KEY = '6ad666162ef24213b5bde7bdd904fcbe'

    client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)

    # """ 读取图片 """
    image = get_file_content(filename)

    # """ 调用人体关键点识别 """
    para = client.bodyAnalysis(image)
    # print(para)
    # time.sleep(2)
    return para
Ejemplo n.º 6
0
def face_center(filename, model):
    app_id = config.getInstance().conf.get("face", "appid")
    api_key = config.getInstance().conf.get("face", "key")
    app_secret = config.getInstance().conf.get("face", "secret")
    client = AipBodyAnalysis(app_id, api_key, app_secret)
    with open(filename, 'rb') as fp:
        img = fp.read()
    result = client.bodyAnalysis(img)
    if 'error_code' in result:
        raise ValueError(result['error_msg'])
    print('[+]Found person      ' + str(result['person_num']))
    # 中心点取鼻子x坐标
    maxRight = 0
    maxTop = 0
    for person_info in result["person_info"]:
        x = int(person_info['body_parts']['nose']['x'])
        top = int(person_info['location']['top'])
        if x > maxRight:
            maxRight = x
            maxTop = top
    return maxRight, maxTop
Ejemplo n.º 7
0
                     选择需要的功能

                    1.人体关键点识别
                    2.人体属性识别
                    3.人流量统计
    ''')
    return int(i)
if __name__ == "__main__":    
    while(1):
        a=input('请输入图片(小于4M)的位置:')
        image = get_file_content(a)

        i=select_power()
        if(i==1):
            """ 调用人体关键点识别 """
            j=client.bodyAnalysis(image)
            """decode(’utf-8’)表示将utf-8编码的字符串转换成unicode编码。
            encode(‘gb2312’),表示将unicode编码的字符串转换成gb2312编码。"""
            print(json.dumps(j,sort_keys=True,indent=4,separators=(',',':')))
            print_div(a,j)

        if(i==2):
            """ 调用人体属性识别
            client.bodyAttr(image);"""
            """ 如果有可选参数 """
            options = {}
            options["type"] = "gender,age"
            """ 带参数调用人体属性识别 """
            j=client.bodyAttr(image, options)
            d=json.dumps(j,sort_keys=True,indent=4,separators=(',',':'),ensure_ascii=False)
            print(d)
Ejemplo n.º 8
0
class Guest_Recognition:
    def __init__(self):
        APP_ID = '18721308'
        API_KEY = 'lNQGdBNazTPv8LpSP4x0GQlI'
        SECRET_KEY = 'nW8grONY777n4I2KvpOVuKGDNiY03omI'
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.image_type = "BASE64"
        msg = reception_image()
        self.guest = Guest()
        # self.name = None
        # self.age = None
        # self.image = None
        # self.gender = None
        # self.drink = None

        self.options_body = {}
        self.options_body["type"] = "gender,upper_color,upper_wear_fg"
        self.options_face = {}
        self.options_face["face_field"] = "age,gender"
        self.options_face["max_face_num"] = 3
        self.options_face["face_type"] = "LIVE"

        #ros params
        self.sub_image_raw_topic_name = None
        self.sub_destination_topic_name = None
        self.pub_gender_recognition_topic_name = None
        self.pub_object_position2d = None
        # self.depth_img                          = np.array((480,640))
        self.get_params()

        #发布器
        self.objpos_pub = rospy.Publisher('/image/object_position2d',
                                          String,
                                          queue_size=1)

        #订阅器
        self.control_sub = rospy.Subscriber("/control", reception,
                                            self.controlCallback)
        self.speech_sub = rospy.Subscriber('/speech/check_door', String,
                                           self.find_people)

    # --------------
    def get_params(self):
        self.sub_image_raw_topic_name = rospy.get_param(
            'sub_image_raw_topic_name', '/camera/rgb/image_raw')
        # self.sub_depth_image_topic_name        = rospy.get_param('sub_depth_image_topic_name',        '/camera/depth/image_raw')
        self.pub_gender_recognition_topic_name = rospy.get_param(
            'pub_gender_recognition_topic_name',
            '/kamerider_image/gender_recognition')
        self.objpos_pub = rospy.get_param('objpos_pub',
                                          '/image/object_position2d')
        #定义R发布器和订阅器,话题名通过ROS PARAM参数服务器获取
        rospy.Subscriber(self.sub_image_raw_topic_name, Image,
                         self.imageCallback)
        # rospy.Subscriber(self.sub_depth_image_topic_name, Image, self.depthCallback)

        self.pub_result = rospy.Publisher(
            self.pub_gender_recognition_topic_name, String, queue_size=1)
        # self.pub_person_pos = rospy.Publisher(self.pub_person_pos_topic_name, Pose,queue_size=1)
        self.objpos_pub = rospy.Publisher('/image/object_position2d',
                                          String,
                                          queue_size=1)

    def depthCallback(self, msg):
        bridge = CvBridge()
        try:
            cv_image = bridge.imgmsg_to_cv2(msg, '16UC1')
            self.depth_img = cv_image
        except CvBridgeError as e:
            print(e)


# -------------------------------------

# 人体数据转换

    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(file_name, image_msg)
        with open(file_name, 'rb') as fp:
            return fp.read()

    # 人脸库搜索   groupIdList="用户组名称"
    def face_search(filepath, groupIdList):
        image = fileopen(filepath)
        imageType = "BASE64"
        result = client.search(image, imageType, groupIdList)
        # print(result)  # 打印出所有信息
        print(result['result']['user_list'][0]['user_id'])

    # 判断人脸是否已知
    def judger(result):
        result_sc = result['result']['user_list'][0]['score']
        if result_sc > 80:
            return result['result']['user_list'][0]['user_id']
        else:
            return False

    """ 调用人脸视频比对人脸库 """

    def face_comparision(window_name='image', camera_idx=0):
        cv2.namedWindow(window_name)
        #视频来源,可以来自一段已存好的视频,也可以直接来自USB摄像头
        cap = cv2.VideoCapture(camera_idx)

        while cap.isOpened():
            ok, frame = cap.read()  #读取一帧数据
            if not ok:
                break
            c = cv2.waitKey(100)  #按q退出
            if c == ord('q'):
                break
            cv2.imshow(window_name, frame)
            base64_data = frame2base64(frame)
            image = str(base64_data, 'utf-8')
            imageType = "BASE64"

            result = client.search(image, imageType, 'test_group')

            if result['error_code'] == 0:
                judger(result)

        cap.release()
        cv2.destroyAllWindows()

    def find_people():
        # msg_nav=msh
        # bridge = CvBridge()
        # try:
        #     cv_image = bridge.imgmsg_to_cv2(msg, 'bgr8')
        # except CvBridgeError as e:
        #     print (e)
        cap = cv2.ViseoCapture(0)
        while cap.isOpened():
            ret, imgraw0 = cap.read()
            if ret:
                if cnt % 10 == 0:
                    imgraw = cv2.resize(imgraw0, (512, 512))
                    cv2.imwrite(filepath, imgraw)
                    with open(filepath, "rb") as fp:
                        segment_data = fp.read()
                    result = client_body.bodyAttr(segment_data, options_body)
                    # print(result)

                    # 解析位置信息
                    if 'person_num' in result:
                        person_num = result['person_num']
                    else:
                        person_num = 0

                    for num in range(0, int(person_num)):
                        print(num)
                        location = result['person_info'][num - 1]['location']
                        print('upper_color:', upper_color, ' upper_wear_fg:',
                              upper_wear_fg)

                        A = (int(location['left']), int(location['top']))
                        B = (int(location['left']) + int(location['width']),
                             int(location['top']))
                        C = (int(location['left']) + int(location['width']),
                             int(location['top']) + int(location['height']))
                        D = (int(location['left']),
                             int(location['top']) + int(location['height']))

                        cv2.waitKey(1000)
                        # self.objpos_pub.publish()
                cnt += 1

            cv2.waitKey(0)

    def imageCallback(self, msg):
        if self.take_photo_signal:
            print("[INFO] Start to take photo")
            bridge = CvBridge()
            self.take_photo_signal = False
            try:
                cv_image = bridge.imgmsg_to_cv2(msg, 'bgr8')
                cv2.imwrite(self.path_to_save_image, cv_image)
            except CvBridgeError as e:
                print(e)
            self.detection()

    # 挥手检测
    # image——输入的cv类型
    # gender——是否检查性别
    def detectWave(self, image, gender=False):
        #if time.time()-self.time < self.tau:
        #     return None
        # self.time = time.time()
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        point_t = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))

            # 存在挥手
            if len(loaction) > 0:
                # 女性检测
                # ----------性别检测----------
                if gender:
                    options = {}
                    options["type"] = "gender"
                    # 保证存在挥手
                    for locate in loaction:
                        img = image[locate[2]:locate[3], locate[0]:locate[1]]
                        img = self.msgtobody(img, "try__.png")
                        result = self.client_body.bodyAttr(img, options)
                        try:
                            result['person_info'][0]['attributes'][
                                'gender'] == "女性"
                        except:
                            continue
                        # 女性则直接返回女性位置
                        if result['person_info'][0]['attributes'][
                                'gender'] == "女性":
                            loc = []
                            loc.append(locate[0])
                            loc.append(locate[1])
                            loc.append(locate[2])
                            loc.append(locate[3])
                            return locate
                # 随机返回一个人
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate

        return None
Ejemplo n.º 9
0
class BodyCheck:
    def __init__(self):
        self.time = time.time()
        APP_ID = '18889374'
        API_KEY = 'pUNweNaSK4rWz57vGs9KpuW1'
        SECRET_KEY = 'ru5LqWM0lrcVYBh9cjd32fy951nagqcA'
        self.imageType = "BASE64"
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.bridge = CvBridge()
        ##############人类数据
        self.filepath = "/home/dell/img/"
        self.option_face = {}
        self.option_body = {}
        self.option_face["face_field"] = "age,gender,glasses,race"
        self.option_face["max_face_num"] = 1
        self.option_body["type"] = "upper_wear,upper_color"

        ##############跟踪数据
        self.roi = None

        ##############话题名称
        # 接收器与发布器
        self.sub_image_name = rospy.get_param('~image_raw_topic_name',
                                              '/usb_cam/image_raw')
        # 发布器
        self.pub_pos_name = rospy.get_param('~object_view_topic_name', 'roi')
        self.pub_img_name = rospy.get_param('~image_test_topic_name',
                                            '/image/test')
        self.pub_fet_name = rospy.get_param('~feature_topic_name',
                                            '/image/feature')
        ##############发布器
        self.img_pub = rospy.Publisher(self.pub_img_name, Image)
        self.roi_pub = rospy.Publisher(self.pub_pos_name, RegionOfInterest)
        self.fet_pub = rospy.Publisher(self.pub_fet_name, Description)

        self.img_sub = rospy.Subscriber(self.sub_image_name, Image,
                                        self.imgCallback)
        print("============================================================")

    # 人体数据转换
    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            return fp.read()

    # 人脸数据转换
    def msgtoface(self, image_msg, file_name='image_faces.png'):
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            data = base64.b64encode(fp.read())
            # python2.7
            data = str(data).encode('utf-8')
            return data

    # 挥手检测,返回一个挥手的人的方框xxyy数据
    def detectWave(self, image, gender=False):
        print("============================================================")
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
            if len(loaction) > 0:
                # 返回挥手的随机一个人的位置
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate
        return None

    # 照片的回调函数,发布挥手人的位置
    def imgCallback(self, image):
        #try:
        #cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        #except CvBridgeError as e:
        #print(e)

        cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        position = self.detectWave(cv_image)
        if position is not None:
            msg = Description()
            msg.hair_style = "unknowm"
            msg.pose = "unknowm"
            # 特征检测——人脸和人体
            img_body = cv_image[position[2]:position[3],
                                position[0]:position[1]]
            face = self.msgtoface(img_body)
            result1 = self.client_face.detect(face, self.imageType,
                                              self.option_face)
            print(result1["result"]["face_list"][0]["gender"]["type"])
            print(result1["result"]["face_list"][0]["glasses"]["type"])
            print(result1["result"]["face_list"][0]["race"]["type"])
            print(str(result1["result"]["face_list"][0]["age"]))
            data = result1["result"]["face_list"][0]
            # 性别 + 眼睛 + 肤色 + 年龄
            msg.gender = data["gender"]["type"]
            msg.glasses = data["glasses"]["type"]
            msg.skin_color = data["race"]["type"]
            msg.age = str(data["age"])

            # 颜色 + 服装
            body = self.msgtobody(img_body)
            result2 = self.client_body.bodyAttr(body, self.option_body)
            print(
                result2["person_info"][0]["attributes"]["upper_wear"]["name"])
            data = result2["person_info"][0]["attributes"]
            # 红、橙、黄、绿、蓝、紫、粉、黑、白、灰、棕
            color = data["upper_color"]["name"]
            if color == "红":
                msg.clothes_color = "red"
            elif color == "橙":
                msg.clothes_color = "orange"
            elif color == "黄":
                msg.clothes_color = "yellow"
            elif color == "绿":
                msg.clothes_color = "green"
            elif color == "蓝":
                msg.clothes_color = "blue"
            elif color == "紫":
                msg.clothes_color = "purple"
            elif color == "粉":
                msg.clothes_color = "pink"
            elif color == "黑":
                msg.clothes_color = "black"
            elif color == "白":
                msg.clothes_color = "white"
            elif color == "灰":
                msg.clothes_color = "gray"
            else:
                msg.clothes_color = "brown"

            type_ = data["upper_wear"]["name"]
            if type_ == "长袖":
                msg.clothes = "Coat"
            else:
                msg.clothes = "Short"
            self.fet_pub.publish(msg)

        # 如果存在人
        try:
            cv2.rectangle(cv_image, (position[1], position[3]),
                          (position[0], position[2]), (0, 0, 255))
            roi = RegionOfInterest()
            roi.x_offset = position[0]
            roi.y_offset = position[2]
            roi.width = position[1] - position[0]
            roi.height = position[3] - position[2]
            self.roi = roi
            self.roi_pub.publish(roi)
            print("One Wave!")
        except:
            pass
        cv_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
        self.img_pub.publish(cv_image)
Ejemplo n.º 10
0
class BodyClient(object):
    # APP_ID = '44eab39f0ed44844b94f487a6e88fdbc'  # 'd90755ad2f2047dbabb12ad0adaa0b03'
    # API_KEY = '55e735d6888b46908915f3533a6b7442'  # '22f1025b88f54494bcf5d980697b4b83 '
    # SECRET_KEY = '41213cbdaffa483d9ed9a59a24157d4b'  # '4a4b41139c204905be1db659d751355f'
    APP_ID = '18176250'  # 'd90755ad2f2047dbabb12ad0adaa0b03'
    API_KEY = 'jgfFvfLmKWBTuuIFdFgF4YXI'  # '22f1025b88f54494bcf5d980697b4b83 '
    SECRET_KEY = 'NCnPnPOcSRexlxoz9fI75fUmEGl3H15f'  # '4a4b41139c204905be1db659d751355f'

    def __init__(self):
        self.client = AipBodyAnalysis(BodyClient.APP_ID, BodyClient.API_KEY, BodyClient.SECRET_KEY)

    def _body_seg(self,filename):
        image = get_file_content(filename)
        res = self.client.bodySeg(image)
        labelmap = base64.b64decode(res['labelmap'])
        nparr_labelmap = np.fromstring(labelmap, np.uint8)
        labelmapimg = cv2.imdecode(nparr_labelmap, 1)
        im_new_labelmapimg = np.where(labelmapimg == 1, 255, labelmapimg)
        return im_new_labelmapimg

    def _canny_points(self,img,rect=None):
        if rect:
            x,y,w,h = rect
        else:
            x,y=0,0
            h,w = img.shape[:2]
        body = img[y:y + h, x:x + w]
        edges = cv2.Canny(body, 10, 100)
        edgesmat = np.mat(edges)
        points = [(j + x, i + y) for i in range(h) for j in range(w) if edgesmat[i, j] == 255]
        return points

    def body_seg_points(self,filename):
        img_seg=self._body_seg(filename)
        return self._canny_points(img_seg)

    def body_seg(self,filename):
        img_seg = self._body_seg(filename)
        h, w = img_seg.shape[:2]
        return self._canny_points(img_seg),w,h

    '''
    {'person_num': 2, 'person_info': [{'body_parts': {'left_hip': {'y': 549.78125, 'x': 423.34375, 'score': 0.8641700744628906}, 'top_head': {'y': 295.46875, 'x': 394.0, 'score': 0.8867737650871277}, 'right_mouth_corner': {'y': 344.375, 'x': 384.21875, 'score': 0.8865712285041809}, 'neck': {'y': 363.9375, 'x': 394.0, 'score': 0.8912984728813171}, 'left_shoulder': {'y': 383.5, 'x': 442.90625, 'score': 0.8800243139266968}, 'left_knee': {'y': 657.375, 'x': 433.125, 'score': 0.8804177045822144}, 'left_ankle': {'y': 755.1875, 'x': 423.34375, 'score': 0.8549085855484009}, 'left_mouth_corner': {'y': 344.375, 'x': 403.78125, 'score': 0.8695278763771057}, 'right_elbow': {'y': 442.1875, 'x': 305.96875, 'score': 0.9053295850753784}, 'right_ear': {'y': 324.8125, 'x': 374.4375, 'score': 0.8913755416870117}, 'nose': {'y': 324.8125, 'x': 394.0, 'score': 0.8767616748809814}, 'left_eye': {'y': 315.03125, 'x': 403.78125, 'score': 0.8842508792877197}, 'right_eye': {'y': 315.03125, 'x': 384.21875, 'score': 0.872444748878479}, 'right_hip': {'y': 549.78125, 'x': 374.4375, 'score': 0.8706536293029785}, 'left_wrist': {'y': 491.09375, 'x': 462.46875, 'score': 0.8681846857070923}, 'left_ear': {'y': 324.8125, 'x': 413.5625, 'score': 0.8833358883857727}, 'left_elbow': {'y': 432.40625, 'x': 491.8125, 'score': 0.8757244944572449}, 'right_shoulder': {'y': 383.5, 'x': 345.09375, 'score': 0.8604100942611694}, 'right_ankle': {'y': 755.1875, 'x': 364.65625, 'score': 0.883700966835022}, 'right_knee': {'y': 657.375, 'x': 364.65625, 'score': 0.8726198673248291}, 'right_wrist': {'y': 491.09375, 'x': 335.3125, 'score': 0.8524751663208008}}, 'location': {'height': 522.3967895507812, 'width': 213.4878540039062, 'top': 279.5125427246094, 'score': 0.9985131025314331, 'left': 288.0614013671875}}, {'body_parts': {'left_hip': {'y': 539.0, 'x': 413.25, 'score': 0.2676204741001129}, 'top_head': {'y': 741.5, 'x': 524.625, 'score': 0.0297189150005579}, 'right_mouth_corner': {'y': 478.25, 'x': 463.875, 'score': 0.009633682668209076}, 'neck': {'y': 852.875, 'x': 332.25, 'score': 0.01634016819298267}, 'left_shoulder': {'y': 377.0, 'x': 423.375, 'score': 0.0272684283554554}, 'left_knee': {'y': 650.375, 'x': 423.375, 'score': 0.3098172545433044}, 'left_ankle': {'y': 751.625, 'x': 433.5, 'score': 0.4415453672409058}, 'left_mouth_corner': {'y': 478.25, 'x': 463.875, 'score': 0.01229123119264841}, 'right_elbow': {'y': 427.625, 'x': 494.25, 'score': 0.08809270709753036}, 'right_ear': {'y': 680.75, 'x': 750, 'score': 0.02279716171324253}, 'nose': {'y': 488.375, 'x': 453.75, 'score': 0.02511453814804554}, 'left_eye': {'y': 488.375, 'x': 443.625, 'score': 0.02269705384969711}, 'right_eye': {'y': 751.625, 'x': 750, 'score': 0.02191649936139584}, 'right_hip': {'y': 539.0, 'x': 372.75, 'score': 0.1868444383144379}, 'left_wrist': {'y': 488.375, 'x': 474.0, 'score': 0.3365231156349182}, 'left_ear': {'y': 893.375, 'x': 403.125, 'score': 0.007937739603221416}, 'left_elbow': {'y': 437.75, 'x': 484.125, 'score': 0.1944440901279449}, 'right_shoulder': {'y': 377.0, 'x': 433.5, 'score': 0.02875573188066483}, 'right_ankle': {'y': 751.625, 'x': 423.375, 'score': 0.1604309529066086}, 'right_knee': {'y': 670.625, 'x': 372.75, 'score': 0.1398747861385345}, 'right_wrist': {'y': 488.375, 'x': 474.0, 'score': 0.07319473475217819}}, 'location': {'height': 539.47509765625, 'width': 126.1507263183594, 'top': 458.58251953125, 'score': 0.00636356882750988, 'left': 622.8492431640625}}], 'log_id': 1953527121404955486}
    '''
    def _body_part(self,filename):
        image = get_file_content(filename)
        para = self.client.bodyAnalysis(image)
        if DEBUG:
            print(para)
        person_num=para.get('person_num',0)
        if person_num < 1:
            raise NoBodyException("文件%s 没有检测到人像,详细信息:%s"%(filename,para))
        person=para['person_info'][0]
        # score = person['location']['score']
        # if score < 0.5:
        #     raise NoBodyException()
        loc = person['location']
        x_left = int(loc['left'])
        y_top = int(loc['top'])
        w = int(loc['width'])
        h = int(loc['height'])
        return person['body_parts'],(x_left,y_top,w,h)

    #top_head, left_ankle, right_ankle
    def body_points(self,filename):
        parts,rect = self._body_part(filename)
        points = {k: (v['x'], v['y']) for k, v in parts.items()}
        return points,rect

    def process_body(self,filename):
        img = cv2.imread(filename)
        height, width = img.shape[:2]
        body_points, rect = self.body_points(filename)

        x_left, y_top, w, h = rect
        new_rect=expand_rect(x_left,y_top,w,h,width,height)

        img_seg = self._body_seg(filename)
        # cv2.imshow("seg",img_seg)
        outline_points = self._canny_points(img_seg, new_rect)
        # print(outline_points)
        return body_points, outline_points, rect
class BodyCheck:
    def __init__(self):
        self.time = time.time()
        APP_ID = '18889374'
        API_KEY = 'pUNweNaSK4rWz57vGs9KpuW1'
        SECRET_KEY = 'ru5LqWM0lrcVYBh9cjd32fy951nagqcA'
        self.image_type = "BASE64"
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.client_body.setConnectionTimeoutInMillis(2000)
        self.client_body.setSocketTimeoutInMillis(2000)
        self.client_face.setConnectionTimeoutInMillis(2000)
        self.client_face.setSocketTimeoutInMillis(2000)
        self.bridge = CvBridge()
        self.ispub = False
        ##############人类数据
        self.filepath = "/home/qian/catkin_ws/src/fare_src/kamerider_image/kamerider_image_api/imgfile/"
        ##############跟踪数据
        self.roi = None
        self.depth_array = None
        self.target_pos = Pose()

        ##############话题名称
        # 接收器与发布器
        self.check_gender = rospy.get_param('~check_gender', 'False')

        if (type(self.check_gender) == type('text')):
            self.check_gender = False

        ##############发布器
        self.img_pub = rospy.Publisher("/image/test", Image, queue_size=1)
        self.roi_pub = rospy.Publisher("roi", RegionOfInterest, queue_size=1)
        self.img_sub = rospy.Subscriber("/usb_cam/image_raw",
                                        Image,
                                        self.imgCallback,
                                        queue_size=1)
        self.word_pub = rospy.Publisher("/xfwords", String, queue_size=1)
        self.face_pub = rospy.Publisher("/start_recognize_faces",
                                        String,
                                        queue_size=1)

        print("============================================================")

    # 人体数据转换
    def msgtobody(self, image_msg, file_name='image_body.png'):
        # 转为base64存储
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            return fp.read()

    # 人脸数据转换
    def msgtoface(self, image_msg, file_name='image_faces.png'):
        cv2.imwrite(self.filepath + file_name, image_msg)
        with open(self.filepath + file_name, 'rb') as fp:
            data = base64.b64encode(fp.read())
            # python2.7
            data = str(data).encode('utf-8')
            return data

    # 挥手检测,返回一个挥手的人的方框xxyy数据
    def detectWave(self, image, gender=False):
        print("CHECK")
        data = self.msgtobody(image, "image_body.png")
        # ----------挥手检测----------
        result = self.client_body.bodyAnalysis(data)
        wave = []
        loaction = []
        point_t = []
        # 存在人
        if result['person_num'] > 0:
            id_ = -1
            # 对每个人进行检查
            for info in result['person_info']:
                id_ += 1
                keypoint = info['body_parts']
                # 腕高
                if keypoint['right_elbow']['y'] > keypoint['right_wrist']['y']:
                    # 腕在外侧
                    if keypoint['right_wrist']['x'] < keypoint[
                            'right_shoulder']['x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))
                # 腕高
                elif keypoint['left_elbow']['y'] > keypoint['left_wrist']['y']:
                    # 腕在外侧
                    if keypoint['left_wrist']['x'] > keypoint['left_shoulder'][
                            'x']:
                        wave.append(id_)
                        loc = []
                        loc.append(int(info['location']['left']))
                        loc.append(
                            int(info['location']['left'] +
                                info['location']['width']))
                        loc.append(int(info['location']['top']))
                        loc.append(
                            int(info['location']['top'] +
                                info['location']['height']))
                        loaction.append(copy.deepcopy(loc))

            # 存在挥手
            if len(loaction) > 0:
                # 女性检测
                # ----------性别检测----------
                if gender:
                    options = {}
                    options["type"] = "gender"
                    # 保证存在挥手
                    for locate in loaction:
                        img = image[locate[2]:locate[3], locate[0]:locate[1]]
                        img = self.msgtobody(img, "image_face.png")
                        result = self.client_body.bodyAttr(img, options)
                        try:
                            result['person_info'][0]['attributes'][
                                'gender'] == "女性"
                        except:
                            continue
                        # 女性则直接返回女性位置
                        if result['person_info'][0]['attributes'][
                                'gender'] == "女性":
                            loc = []
                            loc.append(locate[0])
                            loc.append(locate[1])
                            loc.append(locate[2])
                            loc.append(locate[3])
                            return locate
                # 随机返回一个人
                locate = loaction[0]
                loc = []
                loc.append(locate[0])
                loc.append(locate[1])
                loc.append(locate[2])
                loc.append(locate[3])
                return locate
        return None

    # 照片的回调函数,发布挥手人的位置
    def imgCallback(self, image):
        num = 0
        try:
            cv_image = self.bridge.imgmsg_to_cv2(image, "bgr8")
        except CvBridgeError as e:
            print(e)

    # 如果存在人
        roi = RegionOfInterest()
        try:
            position = self.detectWave(cv_image, self.check_gender)
            cv2.rectangle(cv_image, (position[1], position[3]),
                          (position[0], position[2]), (0, 0, 255))
            roi.x_offset = position[0]
            roi.y_offset = position[2]
            roi.width = position[1] - position[0]
            roi.height = position[3] - position[2]
            self.roi = roi
            self.roi_pub.publish(roi)
            if self.ispub == False:
                stringq = "I can tell one wave. Now I will recognize people. "
                self.word_pub.publish(stringq)
                stringq = "ok "
                self.face_pub.publish(stringq)
                self.ispub = True
            rospy.loginfo("One Wave!")
            num = 1
        except:
            self.roi = roi
        if num == 0:
            self.roi_pub.publish(roi)
        cv_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
        self.img_pub.publish(cv_image)
Ejemplo n.º 12
0
class BaiduApiUtil:

	def __init__(self, configPath, configSection):
		keyInfo = fu.readConfig(configPath, configSection)
		self.appid = keyInfo['appid']
		self.api_key = keyInfo['api_key']
		self.secret_key = keyInfo['secret_key']
		self.client = AipBodyAnalysis(self.appid, self.api_key, self.secret_key)
		self.filename = None
		self.picture = None
		self.picture_size = None
		self.picture_format = None

	def upload(self, filename):
		self.picture = fu.readPicture(filename)
		self.filename = filename.split('.')[0]
		img = Image.open(filename)
		self.picture_size = img.size
		self.picture_format = img.format

	def getAccessToken(self, configPath, configSection):
		keyInfo = fu.readConfig(configPath, configSection)
		host = keyInfo['addr'] % (keyInfo['grant_type'], keyInfo['client_id'], keyInfo['client_secret'])
		response = requests.post(host)
		if response.status_code != 200:
			print('Error Happened When Acquiring Access Token')
			return -1
		content = demjson.decode(response.text)
		if 'error' in content.keys():
			print('Invalid API Key or Secret Key')
			return -1
		return content['refresh_token']

	def getBodyAnalysis(self):
		response = self.client.bodyAnalysis(self.picture)
		if 'error_code' in response.keys():
			print(response['error_msg'])
			exit(-1)
		return response


	def getBodySeg(self):
		result = self.client.bodySeg(self.picture, {'type':'labelmap'})
		# foreground = base64.b64decode(result['foreground'])
		labelmap = base64.b64decode(result['labelmap'])
		# scoremap = base64.b64decode(result['scoremap'])

		# nparr_foreground = np.fromstring(foreground, np.uint8)
		# foregroundimg = cv2.imdecode(nparr_foreground, 1)
		# foregroundimg = cv2.resize(foregroundimg, self.picture_size, interpolation=cv2.INTER_NEAREST)
		# im_new_foreground = np.where(foregroundimg == 1, 10, foregroundimg)
		# cv2.imwrite(self.filename + '-foreground.png', im_new_foreground)

		nparr_labelmap = np.fromstring(labelmap, np.uint8)
		labelmapimg = cv2.imdecode(nparr_labelmap, 1)
		labelmapimg = cv2.resize(labelmapimg, self.picture_size, interpolation=cv2.INTER_NEAREST)
		im_new_labelmapimg = np.where(labelmapimg == 1, 255, labelmapimg)
		cv2.imwrite(self.filename + '-labelmap.png', im_new_labelmapimg)

		# nparr_scoremap = np.fromstring(scoremap, np.uint8)
		# scoremapimg = cv2.imdecode(nparr_scoremap, 1)
		# scoremapimg = cv2.resize(scoremapimg, self.picture_size, interpolation=cv2.INTER_NEAREST)
		# im_new_scoremapimg = np.where(scoremapimg == 1, 255, scoremapimg)
		# cv2.imwrite(self.filename + '-scoremap.png', im_new_scoremapimg)