def error_result_wrapper(frame_id, status, filter_passed):
    result_wrapper = gabriel_pb2.ResultWrapper()
    result_wrapper.frame_id = frame_id
    result_wrapper.status = status
    result_wrapper.filter_passed = filter_passed

    return result_wrapper
示例#2
0
    def _wrap_LEGO_state(frame_id: int,
                         status: gabriel_pb2.ResultWrapper.Status,
                         lego_state: instruction_proto.LEGOState,
                         update_cnt: int,
                         img_guidance: Optional[np.ndarray] = None,
                         txt_guidance: Optional[str] = None) \
            -> gabriel_pb2.ResultWrapper:

        result = gabriel_pb2.ResultWrapper()
        result.frame_id = frame_id
        result.status = status

        engine_fields = instruction_proto.EngineFields()
        engine_fields.update_count = update_cnt
        engine_fields.lego.CopyFrom(lego_state)

        result.engine_fields.Pack(engine_fields)

        if img_guidance is not None:
            img_result = gabriel_pb2.ResultWrapper.Result()
            img_result.payload_type = gabriel_pb2.PayloadType.IMAGE
            _, img = cv2.imencode('.jpg', img_guidance)
            img_result.payload = img.tobytes()
            result.results.append(img_result)

        if txt_guidance is not None:
            txt_result = gabriel_pb2.ResultWrapper.Result()
            txt_result.payload_type = gabriel_pb2.PayloadType.TEXT
            txt_result.payload = txt_guidance.encode('utf-8')
            result.results.append(txt_result)

        return result
def gen_text_result(text):
    result_wrapper = gabriel_pb2.ResultWrapper()

    result = gabriel_pb2.ResultWrapper.Result()
    result.payload_type = gabriel_pb2.PayloadType.TEXT
    result.engine_name = ActivityRecognitionEngine.ENGINE_NAME
    result.payload = text.encode(encoding="utf-8")

    result_wrapper.results.append(result)
    return result_wrapper
示例#4
0
    async def _engine_comm(self):
        await self.wait_for_start()
        while self.is_running():
            from_client, address = await self._input_queue.get()
            self._conn.send_bytes(from_client.input_frame.SerializeToString())
            result_wrapper = gabriel_pb2.ResultWrapper()

            await self._result_ready.wait()

            result_wrapper.ParseFromString(self._conn.recv_bytes())
            await self.send_result_wrapper(
                address, from_client.source_name, from_client.frame_id,
                result_wrapper, return_token=True)

            self._result_ready.clear()
示例#5
0
    def _serialize_to_pb(self, headers, instruction, engine_fields):
        engine_fields.update_count += 1
        if 'clear_color' in headers:
            engine_fields.ribloc.clear_color = headers['clear_color']
        result_wrapper = gabriel_pb2.ResultWrapper()
        result_wrapper.engine_fields.Pack(engine_fields)

        if 'image' in instruction and instruction['image'] is not None:
            result = gabriel_pb2.ResultWrapper.Result()
            result.payload_type = gabriel_pb2.PayloadType.IMAGE
            result.engine_name = config.ENGINE_NAME
            result.payload = util.cv_image2raw(instruction['image'])
            result_wrapper.results.append(result)

        if 'speech' in instruction and instruction['speech'] is not None:
            result = gabriel_pb2.ResultWrapper.Result()
            result.payload_type = gabriel_pb2.PayloadType.TEXT
            result.engine_name = config.ENGINE_NAME
            result.payload = instruction['speech'].encode(encoding="utf-8")
            result_wrapper.results.append(result)

        result_wrapper.status = gabriel_pb2.ResultWrapper.Status.SUCCESS
        return result_wrapper
示例#6
0
def _result_without_update(engine_fields):
    result_wrapper = gabriel_pb2.ResultWrapper()
    result_wrapper.engine_fields.Pack(engine_fields)
    return result_wrapper
示例#7
0
def create_result_wrapper(status):
    result_wrapper = gabriel_pb2.ResultWrapper()
    result_wrapper.status = status
    return result_wrapper
    def handle(self, from_client):
        if from_client.payload_type != gabriel_pb2.PayloadType.IMAGE:
            return cognitive_engine.wrong_input_format_error(
                from_client.frame_id)

        engine_fields = cognitive_engine.unpack_engine_fields(
            instruction_pb2.EngineFields, from_client)

        result_wrapper = gabriel_pb2.ResultWrapper()
        result_wrapper.frame_id = from_client.frame_id
        result_wrapper.status = gabriel_pb2.ResultWrapper.Status.SUCCESS

        img_array = np.asarray(bytearray(from_client.payload), dtype=np.int8)
        img = cv2.imdecode(img_array, -1)

        if max(img.shape) != IMAGE_MAX_WH:
            resize_ratio = float(IMAGE_MAX_WH) / max(img.shape[0],
                                                     img.shape[1])
            img = cv2.resize(img, (0, 0),
                             fx=resize_ratio,
                             fy=resize_ratio,
                             interpolation=cv2.INTER_AREA)

        frame_time = current_milli_time()
        self.state['is_playing'] = self.ball_trace.is_playing(
            frame_time) and self.seen_opponent

        ## check if two frames are too close
        if (self.prev_frame_info is not None
                and frame_time - self.prev_frame_info['time'] < 80):
            logger.info("two frames too close!")
            return complete_result_wrapper(result_wrapper, engine_fields)

        ## find table
        rtn_msg, objects = pingpong_cv.find_table(img, O_IMG_HEIGHT,
                                                  O_IMG_WIDTH)
        if rtn_msg['status'] != 'success':
            logger.info(rtn_msg['message'])
            return complete_result_wrapper(result_wrapper, engine_fields)

        img_rotated, mask_table, rotation_matrix = objects

        current_frame_info = {
            'time': frame_time,
            'img': img,
            'img_rotated': img_rotated,
            'mask_ball': None
        }

        ## in case we don't have good "previous" frame, process the current one
        # and return
        mask_ball = None
        ball_stat = None
        if (self.prev_frame_info is None
                or frame_time - self.prev_frame_info['time'] > 300):
            logger.info("previous frame not good")
            rtn_msg, objects = pingpong_cv.find_pingpong(
                img, None, mask_table, None, rotation_matrix)
            if rtn_msg['status'] != 'success':
                logger.info(rtn_msg['message'])
            else:
                mask_ball, ball_stat = objects
            self.ball_trace.insert((frame_time, ball_stat))
            current_frame_info['mask_ball'] = mask_ball
            self.prev_frame_info = current_frame_info
            return complete_result_wrapper(result_wrapper, engine_fields)

        ## now we do have an okay previous frame
        rtn_msg, objects = pingpong_cv.find_pingpong(
            img, self.prev_frame_info['img'], mask_table,
            self.prev_frame_info['mask_ball'], rotation_matrix)
        if rtn_msg['status'] != 'success':
            logger.info(rtn_msg['message'])
        else:
            mask_ball, ball_stat = objects
        self.ball_trace.insert((frame_time, ball_stat))
        current_frame_info['mask_ball'] = mask_ball

        ## determine where the wall was hit to
        self.state['ball_position'] = self.ball_trace.leftOrRight()

        ## find position (relatively, left or right) of your opponent
        rtn_msg, objects = pingpong_cv.find_opponent(
            img_rotated, self.prev_frame_info['img_rotated'], O_IMG_HEIGHT)
        if rtn_msg['status'] != 'success':
            self.seen_opponent = False
            logger.info(rtn_msg['message'])
            self.prev_frame_info = current_frame_info
            return complete_result_wrapper(result_wrapper, engine_fields)
        self.seen_opponent = True
        opponent_x = objects
        # a simple averaging over history
        self.opponent_x = self.opponent_x * 0.7 + opponent_x * 0.3
        self.state['opponent_position'] = (
            "left" if self.opponent_x < O_IMG_WIDTH * 0.58 else "right")

        t = time.time()
        if self.state['is_playing']:
            if self.state['opponent_position'] == "left":
                if ((t - self.last_played_t < 3
                     and self.last_played == "right")
                        or (t - self.last_played_t < 1)):
                    return complete_result_wrapper(result_wrapper,
                                                   engine_fields)

                speech = "right"
                self.last_played_t = t
                self.last_played = speech
                result_with_update(result_wrapper, engine_fields, speech)

            elif self.state['opponent_position'] == "right":
                if ((t - self.last_played_t < 3 and self.last_played == "left")
                        or (t - self.last_played_t < 1)):
                    return complete_result_wrapper(result_wrapper,
                                                   engine_fields)

                speech = "left"
                self.last_played_t = t
                self.last_played = speech
                result_with_update(result_wrapper, engine_fields, speech)

        return complete_result_wrapper(result_wrapper, engine_fields)