Exemplo n.º 1
0
def init():
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    os.system("sudo alsactl --file data/conf/asound.state restore")  # 加载音量设置
    os.system("sudo amixer set Capture 70%")  # 设置话筒
    os.system("alsactl --file data/conf/asound.state restore")  # 加载音量设置
    os.system("amixer set Capture 70%")  # 设置话筒

    # 设定目录权限 特别是pi
    os.system("sudo chmod -R 0777 data")
    os.system("sudo chmod -R 0777 runtime")
    os.system("sudo chmod -R 0777 /music/")

    bak = "data/conf/configBAK.yaml"
    cfg = "config.yaml"
    if not os.path.exists(cfg) or os.path.getsize(cfg) < 10:
        os.system('sudo cp -f %s %s ' % (bak, cfg))
        os.system('sudo chown pi.pi %s' % cfg)
        os.system('sudo chmod 0666 %s' % cfg)
        print("修复配置文件 %s ,以前配置丢失。 " % cfg)

    # 如果屏幕配置为不启动 则也不启动前端app
    from package.mylib import mylib
    mojing = mylib.getConfig()["LoadModular"]["Screen"]
    if mojing is False:
        global tasks
        tasks = tasks[1:]
Exemplo n.º 2
0
    def main(self):
        # 获取配置
        if self.query['op'] == 'getconfig':
            config_set = os.path.abspath('./data/conf/config_set.yaml')
            conf_set = mylib.yamlLoad(config_set)
            ret_arr = {
                'code': 20000,
                'message': '获取配置数据成功',
                'data': {
                    'config': self.config,
                    'setconfig': conf_set
                }
            }
            return json.dumps(ret_arr)

        elif self.query['op'] == 'setconfig':
            data = self.query['data']
            data = json.loads(data)

            conf_set = mylib.getConfig()
            conf_set.update(data)
            mylib.saveConfig(conf_set)
            ret_arr = {
                'code': 20000,
                'message': '保存配置成功,您需要重启后生效!',
                'data': {
                    'error': '0000'
                }
            }

            return json.dumps(ret_arr)
Exemplo n.º 3
0
 def __init__(self):
     self.faceconfig = mylib.getConfig()['ApiConfig']['FaceRecognition']
     if self.faceconfig == 'Baidu':
         from module.ImageRecognition.faceBaidu import faceBaidu
         face = faceBaidu()
         self.IsFace = face.IsFace
         self.IsSameFace = face.IsSameFace
Exemplo n.º 4
0
def IsFace(imgfile):
    """ 人脸识别,文件imgfile是包含人脸图片吗? 是返回True 其它返回Flase 默认分值大于80才会返回True"""
    with open(imgfile, 'rb') as f:
        faceimg = base64.b64encode(f.read()).decode("utf-8")
        BDAip = mylib.getConfig()['BDAip']
        BDFace = face.AipFace(BDAip['APP_ID'], BDAip['API_KEY'], BDAip['SECRET_KEY'])
        result = BDFace.detect(faceimg, 'BASE64')
        if result and result['error_msg'] == 'SUCCESS':
            return result['result']['face_list'][0]['face_probability'] >= 0.8
    return False
Exemplo n.º 5
0
def IsSameFace(img1, img2):
    """ 人脸对比 文件img1,文件img2 是同一个人吗? 是返回True 其它返回 Flase """
    BDAip = mylib.getConfig()['BDAip']
    BDFace = face.AipFace(BDAip['APP_ID'], BDAip['API_KEY'], BDAip['SECRET_KEY'])
    img1 = str(base64.b64encode(open(img1, 'rb').read()), 'utf-8')
    img2 = str(base64.b64encode(open(img2, 'rb').read()), 'utf-8')
    result = BDFace.match([
        {'image': img1, 'image_type': 'BASE64'},
        {'image': img2, 'image_type': 'BASE64'}])
    if result and isinstance(result, dict) and 'result' in dict(result).keys():
        return result['result']['score'] >= 80
    return False
Exemplo n.º 6
0
 def showBindNav(self):
     ''' 显示用户绑定的二维码 '''
     if self.netStatus and self.showBind:
         self.config = mylib.getConfig()
         if self.u_list is False:
             clientid = self.config[
                 'httpapi'] + '/xiaocx/dev/' + self.config['MQTT'][
                     'clientid']
             nav_json = {
                 "event": "open",
                 "size": {
                     "width": 380,
                     "height": 380
                 },
                 "url": "desktop/Public/bind_user.html?qr=" + clientid
             }
             data = {'type': 'nav', 'data': nav_json}
             self.send(MsgType.Text, Receiver='Screen', Data=data)
             self.showBind = False
Exemplo n.º 7
0
    def BDVoicerecognition(self, data):
        BDAip = mylib.getConfig()['BDAip']
        APP_ID = BDAip['APP_ID']
        API_KEY = BDAip['API_KEY']
        SECRET_KEY = BDAip['SECRET_KEY']
        client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
        # client.setConnectionTimeoutInMillis = 5000  # 建立连接的超时毫秒
        # client.setSocketTimeoutInMillis = 5000  # 传输数据超时毫秒

        logging.info('语音识别...')
        try:       
            bdResult = client.asr(speech=data, options={'dev_pid': 1536, 'cuid': VoiceRecognition.CUID})
        except Exception as e:
            logging.error('网络故障! %s' % e)
        logging.debug('语音识别已返回')
        text = ''

        if bdResult['err_msg'] == 'success.':  # 成功识别
            for t in bdResult['result']:
                text += t
            logging.info(text)
            return text

        elif bdResult['err_no'] == 3301:  # 音频质量过差
            text = '我没有听清楚您说的话'
            logging.info(text)
            return

        elif bdResult['err_no'] == 3302:  # 鉴权失败
            text = '鉴权失败,请与开发人员联系。'
            logging.warning(text)
            return 

        elif bdResult['err_no'] == 3304 or bdResult['err_no'] == 3305:  # 请求超限
            text = '请求超限,请与开发人员联系。'
            logging.warning(text)
            return 

        text = '语音识别错误,代码{}'.format(bdResult['err_no'])
        logging.error(text)
Exemplo n.º 8
0
    def FromCaptureGetFaceImg(self, picfile, showFocus=False, timeOut=10):
        """ 打开摄像头,拍摄一张人脸照片并保存到picfile 
        timeOut超时失败退出时间。默认10秒
        参数showFocus显示聚焦框,并且只从聚焦框内取图
        仅在注册时才需要置为True
        返回值: 如果拍到人脸照片 并通过百度打分80以上则返回True 超时或失败返回False
        """
        CAMERA = mylib.getConfig()['CAMERA']
        if not CAMERA['enable']:
            logging.warning("摄像头配置为不启用")
            return False

        cap = cv2.VideoCapture(0)
        if not cap:
            logging.error('没有摄像头')
            return False

        lastTime = time.time()
        cv2.namedWindow(' ', cv2.WINDOW_AUTOSIZE)
        # cv2.namedWindow(' ', cv2.WINDOW_NORMAL)
        # cv2.setWindowProperty(' ',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) # 全屏

        ret, frame = cap.read()
        x, y = self.centerWindowPos(frame.shape[1], frame.shape[0])
        cv2.moveWindow(' ', x, y)  # 窗口居中
        classfier = cv2.CascadeClassifier(
            "data/opencv/haarcascade_frontalface_default.xml")
        if classfier.empty():
            logging.error('载入脸分类器失败')
            return False
        startTime = time.time()
        while time.time() - startTime <= timeOut:
            ret, frame = cap.read()
            flip = int(CAMERA['flip'])
            if flip >= 0:
                cv2.flip(frame, flip, frame)
            bakup = frame.copy()
            if showFocus:  # 显示一个聚焦框(320,320 ),并只从框内取图
                Xstart = int((640 - 320) / 2)
                Ystart = int((480 - 320) / 2)
                Xend = Xstart + 320
                Yend = Ystart + 320
                (xsFocus, ysFocus) = (Xstart, Ystart)
                cv2.rectangle(frame, (Xstart, Ystart), (Xend, Yend),
                              (0, 255, 0), 2)
                grayImg = frame[Ystart:Yend, Xstart:Xend]
                grayImg = cv2.cvtColor(grayImg, cv2.COLOR_BGR2GRAY)
            else:
                grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faceRects = classfier.detectMultiScale(grayImg,
                                                   scaleFactor=1.20,
                                                   minNeighbors=5,
                                                   minSize=(96, 96),
                                                   maxSize=(256, 256))
            for faceRect in faceRects:
                x, y, w, h = faceRect
                if showFocus:
                    x += xsFocus
                    y += ysFocus
                Xstart, Ystart = (x - 30, y - 30)
                Xend, Yend = (x + w + 30, y + h + 30)
                cv2.rectangle(frame, (Xstart, Ystart), (Xend, Yend),
                              (0, 0, 255), 1)
                faceOnly = bakup[Ystart:Yend, Xstart:Xend]
                # cv2.imshow('face only', faceOnly)  #显示
                cv2.imwrite(picfile, faceOnly)
                if self.IsFace(picfile):
                    cap.release()
                    cv2.destroyAllWindows()
                    return True
            fps = 'FPS:{}'.format(round(1 / (time.time() - lastTime)))
            lastTime = time.time()
            cv2.putText(frame, fps, (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 255), 2)
            cv2.imshow(' ', frame)  # 显示每一帧
            key = cv2.waitKey(1)
            if key == 27:  # 按ESC键退出。
                break
        cap.release()
        cv2.destroyAllWindows()
        return False
Exemplo n.º 9
0
# -*- coding: UTF-8 -*-
import json

from package.model import model
from package.mylib import mylib

config = mylib.getConfig()


class data():
    """数据库接口"""
    def __init__(self):
        database = r'data/config.db'
        self.db = model(database)

    # 关闭数据库
    def close(self):
        self.db.close()

    def search_list(self, lists, key):
        for item in lists:
            if item['id'] == key:
                return item
        return False

    #获取用户列表数据
    def user_list_get(self, field=True):
        res = self.db.table('user_list').field(field).sel()
        return res

    #获取单个用户信息