コード例 #1
0
    def to_protobuf(self):
        """Convert the current object to protobuf.

        Returns:
            google.type.latlng_pb2.LatLng: The current point as a protobuf.
        """
        return latlng_pb2.LatLng(latitude=self.latitude, longitude=self.longitude)
コード例 #2
0
def _make_pb_entity():
    from google.cloud.vision_v1.proto import geometry_pb2
    from google.cloud.vision_v1.proto import image_annotator_pb2
    from google.type import latlng_pb2

    description = 'testing 1 2 3'
    locale = 'US'
    mid = 'm/w/45342234'
    score = 0.390625

    entity_annotation = image_annotator_pb2.EntityAnnotation(
        mid=mid,
        locale=locale,
        description=description,
        score=score,
        bounding_poly=geometry_pb2.BoundingPoly(
            vertices=[
                geometry_pb2.Vertex(x=1, y=2),
            ],
        ),
        locations=[
            image_annotator_pb2.LocationInfo(
                lat_lng=latlng_pb2.LatLng(latitude=1.0, longitude=2.0),
            ),
        ],
    )
    return entity_annotation
コード例 #3
0
    def test_to_protobuf(self):
        from google.type import latlng_pb2

        lat = 0.0001
        lng = 20.03
        geo_pt = self._make_one(lat, lng)
        result = geo_pt.to_protobuf()
        geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
        self.assertEqual(result, geo_pt_pb)
コード例 #4
0
    def test_geo_point(self):
        from google.type import latlng_pb2
        from google.cloud.datastore.helpers import GeoPoint

        pb = self._makePB()
        lat = 9.11
        lng = 3.337
        geo_pt = GeoPoint(latitude=lat, longitude=lng)
        geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
        self._call_fut(pb, geo_pt)
        self.assertEqual(pb.geo_point_value, geo_pt_pb)
コード例 #5
0
    def test_geo_point(self):
        from google.type import latlng_pb2
        from google.cloud.datastore.helpers import GeoPoint

        lat = 42.42
        lng = 99.0007
        geo_pt = GeoPoint(latitude=lat, longitude=lng)
        geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
        name, value = self._call_fut(geo_pt)
        self.assertEqual(name, 'geo_point_value')
        self.assertEqual(value, geo_pt_pb)
コード例 #6
0
    def test_geo_point(self):
        from google.type import latlng_pb2
        from google.cloud.proto.datastore.v1 import entity_pb2
        from google.cloud.datastore.helpers import GeoPoint

        lat = -3.14
        lng = 13.37
        geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
        pb = entity_pb2.Value(geo_point_value=geo_pt_pb)
        result = self._call_fut(pb)
        self.assertIsInstance(result, GeoPoint)
        self.assertEqual(result.latitude, lat)
        self.assertEqual(result.longitude, lng)
コード例 #7
0
                    'c': []
                }
            }
        },
        'key9': True,
        'key10': False,
        'key11': 11.123
    },
    'timestamp_key':
    dt,
    'geo_point_key':
    GeoPoint(-20.2, +160.5),
    'null_key':
    None
}
geo_point_value = latlng_pb2.LatLng(latitude=-20.2, longitude=+160.5)

EXAMPLE_DICT_DEFAULT_VALUES = {
    'bool_key': False,
    'string_key': u'',
    'int32_key': 0,
    'int64_key': 0,
    'double_key': 0.0,
    'float_key': 0.0,
    'enum_key': example_pb2.ExampleEnumModel.ENUM0,
    'bool_key': False,
    'bytes_key': b'',
    'null_key': None,
    'map_string_string': {},
    'map_string_int32': {},
    'string_array_key': [],
コード例 #8
0
        def set_model_pb_value(model_pb, prop_name, value, is_nested=False):
            model_pb_class = model_pb.__class__

            if isinstance(value, list):
                for item in value:
                    if isinstance(item, dict):
                        # Handle nested models
                        if model_pb_class.DESCRIPTOR.fields_by_name[
                                prop_name].message_type:
                            field = model_pb_class.DESCRIPTOR.fields_by_name[
                                prop_name]

                            # Dynamically import nested model from a corresponding file
                            nested_model_name = field.message_type.name
                            nested_model_module = get_python_module_for_field(
                                field=field)
                            nested_model_class = getattr(
                                nested_model_module, nested_model_name)

                            # Instantiate an instance of nested field Protobuf class
                            item_pb = nested_model_class()
                            set_model_pb_value(item_pb,
                                               prop_name,
                                               item,
                                               is_nested=True)

                            getattr(model_pb, prop_name).append(item_pb)
                    elif isinstance(model_pb, struct_pb2.Struct):
                        try:
                            model_pb[prop_name]
                        except ValueError:
                            model_pb.update({prop_name: []})

                        model_pb[prop_name].append(item)
                    else:
                        getattr(model_pb, prop_name).append(item)
            elif isinstance(value, dict):
                # We assume it's a referenced protobuf type if it doesn't contain "update()" method
                # google.protobuf.Struct and Map types contain "update()" methods so we can treat
                # them as simple dictionaries
                if is_nested:
                    for key, value in six.iteritems(value):
                        set_model_pb_value(model_pb, key, value)
                elif isinstance(model_pb, struct_pb2.Struct):
                    model_pb.update({prop_name: value})
                else:
                    field = model_pb_class.DESCRIPTOR.fields_by_name[prop_name]
                    is_nested_model_type = (bool(
                        field.message_type) and not hasattr(
                            getattr(model_pb, prop_name, {}), 'update'))

                    if is_nested_model_type:
                        # Custom type definition potentially defined in different file
                        field = model_pb_class.DESCRIPTOR.fields_by_name[
                            prop_name]

                        # Dynamically import nested model from a corresponding file
                        nested_model_name = field.message_type.name
                        nested_model_module = get_python_module_for_field(
                            field=field)
                        nested_model_class = getattr(nested_model_module,
                                                     nested_model_name)

                        item_pb = nested_model_class()
                        set_model_pb_value(item_pb,
                                           prop_name,
                                           value,
                                           is_nested=True)

                        getattr(model_pb, prop_name).CopyFrom(item_pb)
                    else:
                        getattr(model_pb, prop_name).update(dict(value))
            elif isinstance(value, datetime):
                getattr(model_pb, prop_name).FromDatetime(value)
            elif value is None:
                # NULL type
                setattr(model_pb, prop_name, 0)
            elif isinstance(value, GeoPoint):
                item_pb = latlng_pb2.LatLng(latitude=value.latitude,
                                            longitude=value.longitude)
                getattr(model_pb, prop_name).CopyFrom(item_pb)
            elif isinstance(model_pb, struct_pb2.Struct):
                model_pb.update({prop_name: value})
            else:
                setattr(model_pb, prop_name, value)
コード例 #9
0
    def detect_intent_stream(self):
        """ Send streaming audio to dialogflow and publish response """
        if self.disable_audio:
            return
        self.end_of_dialog = False
        requests = self.audio_stream_request_generator()
        responses = self.session_client.streaming_detect_intent(
            requests=requests)
        rospy.loginfo('=' * 10 + " %s " + '=' * 10, self.project_id)
        try:
            for response in responses:
                rospy.loginfo('Intermediate transcript: "{}".'.format(
                    response.recognition_result.transcript))
                response.recognition_result.transcript = response.recognition_result.transcript.replace(
                    "Lidköping", "Linköping")
                self.transcript_pub.publish(
                    response.recognition_result.transcript)
        except exceptions.OutOfRange as exc:
            rospy.logerr(
                "Dialogflow exception. Out of audio quota? "
                "No internet connection (%s)", exc)
            return

        if self.cancel_stream_intent:
            return

        # pylint: disable=undefined-loop-variable
        query_result = response.query_result
        query_result.query_text = query_result.query_text.replace(
            "Lidköping", "Linköping")
        if query_result.intent.end_interaction:
            self.end_of_dialog = True

        self.query_text_pub.publish(String(data=query_result.query_text))

        rospy.loginfo('-' * 10 + " %s " + '-' * 10, self.project_id)
        rospy.loginfo('Query text: {}'.format(query_result.query_text))
        rospy.loginfo('Detected intent: {} (confidence: {})\n'.format(
            query_result.intent.display_name,
            query_result.intent_detection_confidence))
        rospy.loginfo('Fulfillment text: {}\n'.format(
            query_result.fulfillment_text))

        if query_result.intent.display_name == "developer.linkopingMode":
            self.query_params = dialogflow.QueryParameters(
                geo_location=latlng_pb2.LatLng(latitude=58.4106611,
                                               longitude=15.6198244),
                contexts=[
                    dialogflow.Context(lifespan_count=100,
                                       name="projects/" + self.project_id +
                                       "/agent/sessions/" + self.session_id +
                                       "/contexts/linkoping")
                ])
        elif query_result.intent.display_name == "developer.bergMode":
            self.query_params = dialogflow.QueryParameters(
                geo_location=latlng_pb2.LatLng(latitude=58.48548532662494,
                                               longitude=15.530466246782007),
                contexts=[
                    dialogflow.Context(lifespan_count=100,
                                       name="projects/" + self.project_id +
                                       "/agent/sessions/" + self.session_id +
                                       "/contexts/berg")
                ])

        self.publish_response(query_result)
コード例 #10
0
    def __init__(self):
        rospy.init_node('dialogflow_node')

        self.project_id = "folke-jkih"
        self.session_id = str(uuid.uuid4())
        self.language = rospy.get_param('~default_language', 'sv')
        self.disable_audio = rospy.get_param('~disable_audio', False)

        time_before_start = rospy.get_param('~time_before_start', 0.8)
        self.save_audio_requests = rospy.get_param('~save_audio_requests',
                                                   True)

        self.session_client = dialogflow.SessionsClient()

        self.query_params = dialogflow.QueryParameters(
            geo_location=latlng_pb2.LatLng(latitude=58.4106611,
                                           longitude=15.6198244),
            contexts=[
                dialogflow.Context(lifespan_count=100,
                                   name="projects/" + self.project_id +
                                   "/agent/sessions/" + self.session_id +
                                   "/contexts/linkoping")
            ])

        self.audio_chunk_queue = deque(
            maxlen=int(time_before_start * 31.25)
        )  # 16000/512 = 31.25,  # Times 7.8 since the data is sent in 7.8Hz (16000 / 2048)

        # Note: hard coding audio_encoding and sample_rate_hertz for simplicity.
        audio_encoding = dialogflow.AudioEncoding.AUDIO_ENCODING_LINEAR_16
        sample_rate_hertz = 16000
        self.audio_config = dialogflow.InputAudioConfig(
            audio_encoding=audio_encoding,
            language_code=self.language,
            sample_rate_hertz=sample_rate_hertz,
            single_utterance=True)

        self.query_result_pub = rospy.Publisher('response',
                                                Response,
                                                queue_size=2)
        self.query_text_pub = rospy.Publisher('query_text',
                                              String,
                                              queue_size=2)
        self.transcript_pub = rospy.Publisher('transcript',
                                              String,
                                              queue_size=2)
        self.fulfillment_pub = rospy.Publisher('fulfillment_text',
                                               String,
                                               queue_size=10)
        self.is_listening_pub = rospy.Publisher('is_listening',
                                                Bool,
                                                queue_size=2,
                                                latch=True)
        self.is_waiting_for_hot_word = rospy.Publisher('waiting_for_hot_word',
                                                       Bool,
                                                       queue_size=2,
                                                       latch=True)
        self.volume = 0
        self.is_talking = False
        self.is_in_dialog = False
        self.detected_wake_word = False
        self.head_visible = False
        self.waiting_for_wake_word = False
        self.cancel_stream_intent = False
        self.skip_audio = False
        rospy.wait_for_service('/qt_robot/audio/play')
        self.audio_play_srv = rospy.ServiceProxy('/qt_robot/audio/play',
                                                 audio_play)
        rospy.wait_for_service('/qt_robot/speech/config')
        self.speech_config_srv = rospy.ServiceProxy('/qt_robot/speech/config',
                                                    speech_config)

        rospy.Subscriber('text', String, self.text_callback)
        rospy.Subscriber('is_talking', Bool, self.is_talking_callback)
        rospy.Subscriber('event', Event, self.event_callback)
        rospy.Subscriber('head_visible', Bool, self.head_visible_callback)
        rospy.Subscriber('detected_wake_word', String,
                         self.detected_wake_word_callback)
        rospy.Subscriber('end_of_conversation', EmptyMsg,
                         self.end_of_conversation_callback)

        if not self.disable_audio:
            rospy.Subscriber('sound', AudioData, self.audio_callback)
            rospy.Subscriber('volume', UInt16, self.volume_callback)

        self.list_intents_sevice = rospy.Service('list_intents', Empty,
                                                 self.handle_list_intents)

        self.list_context_sevice = rospy.Service('list_context', Empty,
                                                 self.handle_list_context)

        self.list_context_sevice = rospy.Service('clear_context', Empty,
                                                 self.handle_clear_context)