Beispiel #1
0
    def callback(ch, method, properties, body):
        try:
            print(" [x] %r:%r" % (method.routing_key, body))
            user_id, start_time = str(body)[2:-1].split('*')
            end_time = (
                datetime.datetime.strptime(start_time, "%Y-%m-%d_%H:%M") +
                datetime.timedelta(seconds=forward_secs)
            ).strftime('%Y-%m-%d_%H:%M')
            fixed_score, score, score_list = predict_attention(
                user_id, start_time, end_time)
            logging(
                f"{start_time}~{end_time}:\nmean - {fixed_score}\norigin_mean - {score}\nopenface_gru - {score_list[0]}\nopenface_lstm - {score_list[1]}\nopenpose_gru - {score_list[2]}\nopenpose_lstm - {score_list[3]}",
                f"logs/{user_id}/{start_time}_{end_time}")

            if score > 0:
                # TODO
                sio.emit('server_presult', [
                    'user_id',
                    f"{start_time} ~ {end_time}*{format(fixed_score, '.4f')}"
                ],
                         namespace='/record_video')
                pass
                # redis_db1.set(user_id, f"{start_time} ~ {end_time}*{format(fixed_score, '.4f')}")

        except Exception:
            logging(
                f"[run_ai_factory.py][callback|id:{user_id}|stime:{start_time}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                f"{traceback.format_exc()}", f"logs/error.log")
Beispiel #2
0
    async def on_connect(self, sid, environ):
        try:
            dict_qs = parse.parse_qs(environ['QUERY_STRING'])
            if 'uid' in dict_qs.keys():
                user_id = dict_qs['uid'][0]

                if user_id in users_db:
                    await sio.save_session(sid, {'user_id': user_id}, self.namespace)

                    rc = redis.Redis(connection_pool=redis_pool)
                    if rc.get(user_id) is not None:
                        await self.emit('warning', {"msg": "already have one recording", "gohome": False}, room=sid,
                                        namespace=self.namespace)
                    else:
                        rc.set(user_id, sid)
                        await self.emit('connect_succeed', None, room=sid,
                                        namespace=self.namespace)
                else:
                    if user_id == '_pserver':
                        await sio.save_session(sid, {'user_id': user_id}, self.namespace)
                    else:
                        await sio.disconnect(sid)

        except Exception as e:
            logging(f"[websocket|connect][user_id{user_id}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                    f"{traceback.format_exc()}",
                    f"logs/error.log")
Beispiel #3
0
    async def on_recv(self, sid, data):
        chunk_num = config["h5record_video"].getint("chunk_num")
        """"监听发送来的消息,并使用socketio向所有客户端发送消息"""
        session = await sio.get_session(sid, self.namespace)
        rc = redis.Redis(connection_pool=redis_pool)
        user_id = session.get('user_id')

        print(f"{user_id} recv data length: {len(data[2])}")
        if data is not None:
            try:
                if user_id not in rc.keys():
                    return

                start_record_time = data[0]
                cur_count = int(data[1])
                video_data = data[2]

                with open(os.path.join(config['video_format'].get('input_dir'),
                                       f"{sid}.{start_record_time}_%03d.{user_id}") % cur_count, 'wb') as f:
                    f.write(video_data)

                if cur_count == chunk_num:
                    pub("prediction", 'topic', 'predict.start', f"{user_id}*{start_record_time}")
                    # redis_db0.set(f"{user_id}*{start_record_time}",
                    #               "{session.get('fps')}")

            except Exception as e:
                logging(f"[websocket|recv][user_id|{user_id}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                        f"{traceback.format_exc()}",
                        f"logs/error.log")
Beispiel #4
0
    async def post(self, *args, **kwargs):
        # seller_id = self.current_user
        type = self.get_argument('type', None)
        area = self.get_argument('area', None)
        price = self.get_argument('price', None)
        street = self.get_argument('street', None)
        county_id = self.get_argument('county_id', None)
        city_id = self.get_argument('city_id', None)
        floors = self.get_argument('floors', None)
        layout_id = self.get_argument('layout_id', None)
        nearby_station_id = self.get_argument('nearby_station_id', None)

        try:
            await self.execute(
                "INSERT INTO house (`seller_id`,`type`,`price`,`price`,`street`,`city_id`,`floors`,`price`,)"
                "VALUES (%s,%s)",
                id,
                pwd,
            )
        except Exception:
            logging(
                f"[HouseRegisterHandler][{datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:{traceback.format_exc()}",
                f"logs/error/{datetime.now().strftime('%Y-%m-%d')}")
        self.write('ok')

        return await self.finish()
Beispiel #5
0
    async def post(self, *args, **kwargs):
        mode = self.get_argument('mode', None)
        id = self.get_argument('user_id', None)

        if mode == 'check':
            self.write({
                'id_exist':
                (await
                 self.queryone("SELECT COUNT(id) FROM seller where id=%s",
                               id))['COUNT(id)']
            })

        else:
            pwd = self.get_argument('user_password', None)
            company_name = self.get_argument('company_name', None)
            tel1 = self.get_argument('tel1', None)

            try:
                await self.execute(
                    "INSERT INTO seller (id,pwd)"
                    "VALUES (%s,%s)",
                    id,
                    pwd,
                )
            except Exception:
                logging(
                    f"[RegisterHandler][{datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:{traceback.format_exc()}",
                    f"logs/error/{datetime.now().strftime('%Y-%m-%d')}")
            self.write('ok')

        return await self.finish()
def open_pose_task(output_6pfs_fp, user_id, duration, start_time):
    # openpose feature extraction
    common_util.mkdir(os.path.join(pose_feature_dirpath, user_id))
    openpose_6fps_output_fp = os.path.join(pose_feature_dirpath, user_id,
                                           duration + '.csv')
    try:
        pose_feature_extraction_videos(output_6pfs_fp, openpose_6fps_output_fp)
    except Exception as e:
        logging(
            f"[process_procedure_all.py][pose_feature_extraction_videos|id:{user_id}|stime:{start_time}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
            f"{traceback.format_exc()}",
            f"logs/error.log")
Beispiel #7
0
    async def on_server_presult(self, sid, res):
        try:
            session = await sio.get_session(sid, self.namespace)
            user_id = session['user_id']

            rc = redis.Redis(connection_pool=redis_pool)
            record_time, score = res.split('*')
            if user_id == '_pserver':
                await self.emit('show_min_res', {"record_time": record_time, "score": score}, room=rc.get(res[0]),
                                namespace=self.namespace)

        except Exception as e:
            logging(
                f"[websocket|on_server_presult][user_id{user_id}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                f"{traceback.format_exc()}",
                f"logs/error.log")
Beispiel #8
0
    async def on_disconnect(self, sid):
        try:
            session = await sio.get_session(sid, self.namespace)
            user_id = session['user_id']
            rc = redis.Redis(connection_pool=redis_pool)

            online_sid = rc.get(user_id)
            if online_sid is not None and online_sid == sid:
                rc.delete(session['user_id'])
            await sio.disconnect(sid)

            for f in glob.glob(os.path.join(config['video_format'].get('input_dir'), f"{sid}.*")):
                os.remove(f)

        except Exception as e:
            logging(
                f"[websocket|disconnect][user_id{user_id}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                f"{traceback.format_exc()}",
                f"logs/error.log")
Beispiel #9
0
    async def on_get_hour_score(self, sid, last_hour_time):
        try:
            session = await sio.get_session(sid, self.namespace)
            user_id = session['user_id']

            """
                客户端获取每小时得分的平均值
                :param last_hour_time: 上小时时间字符串
                :return:
                """
            force_flag = 'force' in last_hour_time
            if force_flag:
                last_hour_time = last_hour_time.replace('force', '')
            last_hour = last_hour_time[-2:]
            results = sorted(glob.glob(f"logs/{session.get('id')}/{last_hour_time}:*"))
            means = []

            if results[-1][-5:-3] != last_hour or force_flag:
                cur_hour = (datetime.datetime.strptime(last_hour_time, "%Y-%m-%d_%H") + datetime.timedelta(
                    hours=1)).strftime('%Y-%m-%d_%H')
                for i in results:
                    with open(i, 'r') as f:
                        lines = f.readlines()
                        mean_str = float(lines[1].replace('mean - ', '').replace('\n', ''))
                        if mean_str != 0.0:
                            means.append(mean_str)
                if len(means) > 0:
                    await self.emit('show_hour_res', {"record_time": f"{last_hour_time}:00 ~ {cur_hour}:00",
                                                      "score": f"{format(np.array(means).mean(), '.4f')}"}, room=sid)
                else:
                    await self.emit('show_hour_res',
                                    {"record_time": f"{last_hour_time}:00 ~ {cur_hour}:00", "score": f"0"}, room=sid)
            else:
                await self.emit('client_get_hour_score', None, room=sid)

        except Exception as e:
            logging(
                f"[websocket|on_server_presult][user_id{user_id}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
                f"{traceback.format_exc()}",
                f"logs/error.log")
def main(openface_engineer_output_fp, openpose_engineer_output_fp,
         no_openpose_feature):
    video_name = os.path.join(
        os.path.basename(os.path.dirname(openface_engineer_output_fp)),
        os.path.basename(openface_engineer_output_fp))
    print(f"start predict attetion:{video_name}")

    try:

        set_sess_cfg()
        # parser config
        config_file = "config.ini"
        cp = ConfigParser()
        cp.read(config_file)
        batch_size = cp["DEFAULT"].getint("batch_size")
        # parse weights file path
        output_weights_name = cp["DEFAULT"].get("output_weights_name")
        time_steps = cp["DEFAULT"].getint("time_steps")
        bidirect = cp["DEFAULT"].getboolean("bidirect")
        input_dim_openface = cp["DEFAULT"].getint("input_dim_openface")
        input_dim_openpose = cp["DEFAULT"].getint("input_dim_openpose")

        model_dirs_openface_lstm = [
            'bs16_seed2040_unitis16_layer3',
        ]
        model_dirs_openface_gru = ['bs16_seed2040_unitis128_layer1']
        model_dirs_openpose_lstm = ['bs16_seed2040_unitis32_layer2']
        model_dirs_openpose_gru = ['bs16_seed2040_unitis512_layer1']

        prob_array_list = []
        for featrue_name in ['openface', 'openpose']:
            if no_openpose_feature and featrue_name == 'openpose':
                continue
            for modle_mode in ['gru', 'lstm']:
                print("** load model **")
                model_dirs = eval(f'model_dirs_{featrue_name}_{modle_mode}')
                for model_dir in model_dirs:
                    weights_path = os.path.join('pretrained_models',
                                                featrue_name, modle_mode,
                                                model_dir, output_weights_name)
                    params = model_dir.split('_')
                    str_layers = params[-1]
                    str_units = params[-2]
                    num_layers = int(str_layers.split('layer')[1])
                    num_units = int(str_units.split('unitis')[1])
                    units_layers = []
                    [units_layers.append(num_units) for i in range(num_layers)]
                    # model_factory = ModelFactory()
                    model_factory = ModelFactory(predict=True)
                    model_fun = getattr(model_factory,
                                        f'get_model_{modle_mode}')
                    model = model_fun(
                        TIME_STEPS=time_steps,
                        INPUT_DIM=eval(f'input_dim_{featrue_name}'),
                        weights_path=weights_path,
                        CuDNN=False,
                        bidirect=bidirect,
                        units=units_layers,
                    )
                    print("** load test generator **")
                    test_sequence = FeatruesSequence(
                        features_name=featrue_name,
                        features_subdir=video_name,
                        batch_size=batch_size)

                    gpus = len(
                        os.getenv("CUDA_VISIBLE_DEVICES", "0").split(","))
                    if gpus > 1:
                        print(f"** multi_gpu_model is used! gpus={gpus} **")
                        model_predict = multi_gpu_model(model, gpus)
                    else:
                        model_predict = model

                    print("** make prediction **")
                    prob_array = model_predict.predict_generator(
                        test_sequence,
                        max_queue_size=8,
                        workers=4,
                        use_multiprocessing=False,
                        verbose=1)
                    prob_array = np.squeeze(np.clip(prob_array, 0, 1))
                    prob_array_list.append(prob_array)
                    # df_results = pd.DataFrame()
                    # df_results['files'] = sorted(os.listdir(os.path.join('data/features/engineered', featrue_name, video_name)))
                    # df_results['probs'] = prob_array
                    # save_dir = os.path.join('results', video_name)
                    # if not os.path.exists(save_dir):
                    #     os.makedirs(save_dir, exist_ok=True)
                    # df_results.to_csv(os.path.join(save_dir, f'{featrue_name}_{modle_mode}.csv'), index=False)

        # prob_array_mean = np.mean(np.array(prob_array_list), axis=0)
        prob_array_mean = np.mean(prob_array_list, axis=0)
        # df_results_mean = pd.DataFrame()
        # df_results_mean['files'] = df_results['files']
        # df_results_mean['probs'] = prob_array_mean
        # df_results_mean.to_csv(os.path.join('results', video_name, 'results_mean.csv'), index=False)
        fixed_prob_mean = prob_array_mean
        if 0.8 > prob_array_mean > 0.2:
            fixed_prob_mean = (prob_array_mean - 0.2) / 0.6

        if no_openpose_feature:
            prob_array_list.extend([0, 0])

        return (fixed_prob_mean, prob_array_mean, prob_array_list)
    except Exception as e:
        logging(
            f"[prediction_ensemble.py][main|:{video_name}|:{openface_engineer_output_fp}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
            f"{traceback.format_exc()}", f"logs/error.log")
def predict_attention(user_id, start_time, end_time):
    no_openpose_flag = None
    chunks = sorted(glob.glob(f"{config['video_format'].get('input_dir')}/*{start_time}*.{user_id}"))
    duration = f"{start_time}_{end_time}"

    # convert to 6fps
    try:
        common_util.mkdir(os.path.join(video_6fps_dirpath, user_id))
        output_6pfs_fp = os.path.join(video_6fps_dirpath, user_id, f"{duration}.mp4")
        cv_set_video_fps_res(chunks, output_6pfs_fp)
    except Exception as e:
        logging(
            f"[process_procedure_all.py][cv_set_video_fps_res|id:{user_id}|stime:{start_time}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
            f"{traceback.format_exc()}",
            f"logs/error.log")

    for chunk in chunks:
        os.remove(chunk)

    # openpose task
    openpose_6fps_output_fp = os.path.join(pose_feature_dirpath, user_id,
                                           duration + '.csv')
    openpose_p = multiprocessing.Process(target=open_pose_task, args=(output_6pfs_fp, user_id, duration, start_time))
    openpose_p.start()

    # openface feature extraction
    common_util.mkdir(os.path.join(face_feature_dirpath, user_id))
    openface_6fps_output_fp = os.path.join(face_feature_dirpath, user_id,
                                           duration + '.csv')
    try:
        face_feature_extraction_videos(output_6pfs_fp, openface_6fps_output_fp)
    except Exception as e:
        logging(
            f"[process_procedure_all.py][face_feature_extraction_videos|id:{user_id}|stime:{start_time}][{datetime.datetime.now().strftime('%Y-%m-%d_%I:%M:%S')}]:"
            f"{traceback.format_exc()}",
            f"logs/error.log")

    if min_input_size_on:
        df_6fps_face_features = pd.read_csv(openface_6fps_output_fp)
        if not os.path.exists(openface_6fps_output_fp) or \
                df_6fps_face_features[df_6fps_face_features[' success'] == 1].shape[0] < face_min_input_size:
            return 0, (0, 0, 0, 0)

    split_csv(openface_6fps_output_fp, split_step, start_cut, end_cut)

    openpose_p.join()

    if min_input_size_on:
        no_openpose_flag = not os.path.exists(openpose_6fps_output_fp) or pd.read_csv(openpose_6fps_output_fp).shape[
            0] < pose_min_input_size
    else:
        no_openpose_flag = not os.path.exists(openpose_6fps_output_fp) or pd.read_csv(openpose_6fps_output_fp).shape[
            0] == 0

    if no_openpose_flag is not None and not no_openpose_flag:
        split_csv(openpose_6fps_output_fp, split_step, start_cut, end_cut)

    # openface feature engineer
    common_util.mkdir(os.path.join(face_engineer_dirpath, user_id))
    openface_engineer_output_fp = os.path.join(face_engineer_dirpath, user_id,
                                               duration + '.csv')
    face_feature_engineer(openface_6fps_output_fp, openface_engineer_output_fp)

    # openpose feature engineer
    common_util.mkdir(os.path.join(pose_engineer_dirpath, user_id))
    openpose_engineer_output_fp = os.path.join(pose_engineer_dirpath, user_id,
                                               duration + '.csv')
    if not no_openpose_flag:
        pose_feature_engineer(openpose_6fps_output_fp, openpose_engineer_output_fp)

    # predict attention
    return prediction_ensemble.main(openface_engineer_output_fp, openpose_engineer_output_fp, no_openpose_flag)