コード例 #1
0
def recognize(pub):
    with cloud_speech.beta_create_Speech_stub(
            make_channel('speech.googleapis.com', 443)) as service:
        # For streaming audio from the microphone, there are three threads.
        # First, a thread that collects audio data as it comes in
        # print("entered make_channel")
        with record_audio(RATE, CHUNK) as buffered_audio_data:
            # print("entered record_audio")

            # Second, a thread that sends requests with that data
            # requests is a generator
            # when we have a new_context, we should make a new generator
            requests = request_stream(buffered_audio_data, RATE)
            # Third, a thread that listens for transcription responses
            recognize_stream = service.StreamingRecognize(
                requests, DEADLINE_SECS)

            # Exit things cleanly on interrupt
            # signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())

            # Now, put the transcription responses to use.
            try:
                #listen_print_loop(recognize_stream,pub)
                dump_output(recognize_stream,pub)
                recognize_stream.cancel()
            except face.CancellationError:
                # This happens because of the interrupt handler
                pass
            print("end of session")
コード例 #2
0
ファイル: mic_parser.py プロジェクト: SungwooPark/Spectre
    def run(self):
        """
        Run speech recognition script
        """
        with cloud_speech.beta_create_Speech_stub(
                self.make_channel('speech.googleapis.com', 443)) as service:
            # For streaming audio from the microphone, there are three threads.
            # First, a thread that collects audio data as it comes in
            with self.record_audio(self.RATE,
                                   self.CHUNK) as buffered_audio_data:
                # Second, a thread that sends requests with that data
                requests = self.request_stream(buffered_audio_data, self.RATE)
                # Third, a thread that listens for transcription responses
                recognize_stream = service.StreamingRecognize(
                    requests, self.DEADLINE_SECS)

                # Exit things cleanly on interrupt
                #signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())

                # Now, put the transcription responses to use.
                try:
                    while True:
                        self.listen_print_loop(recognize_stream)
                        print('done???')
                    recognize_stream.cancel()
                except face.CancellationError:
                    # This happens because of the interrupt handler
                    pass
コード例 #3
0
def main(input_uri, encoding, sample_rate, language_code='en-US'):
    service = cloud_speech.beta_create_Speech_stub(
        make_channel('speech.googleapis.com', 443))
    # The method and parameters can be inferred from the proto from which the
    # grpc client lib was generated. See:
    # https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1beta1/cloud_speech.proto
    response = service.SyncRecognize(
        cloud_speech.SyncRecognizeRequest(
            config=cloud_speech.RecognitionConfig(
                # There are a bunch of config options you can specify. See
                # https://goo.gl/KPZn97 for the full list.
                encoding=encoding,  # one of LINEAR16, FLAC, MULAW, AMR, AMR_WB
                sample_rate=sample_rate,  # the rate in hertz
                # See https://g.co/cloud/speech/docs/languages for a list of
                # supported languages.
                language_code=language_code,  # a BCP-47 language tag
            ),
            audio=cloud_speech.RecognitionAudio(uri=input_uri, )),
        DEADLINE_SECS)

    # Print the recognition result alternatives and confidence scores.
    for result in response.results:
        print('Result:')
        for alternative in result.alternatives:
            print(u'  ({}): {}'.format(alternative.confidence,
                                       alternative.transcript))
コード例 #4
0
def main(input_uri, encoding, sample_rate, language_code='en-US'):
    service = cloud_speech.beta_create_Speech_stub(
            make_channel('speech.googleapis.com', 443))
    # The method and parameters can be inferred from the proto from which the
    # grpc client lib was generated. See:
    # https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1beta1/cloud_speech.proto
    response = service.SyncRecognize(cloud_speech.SyncRecognizeRequest(
        config=cloud_speech.RecognitionConfig(
            # There are a bunch of config options you can specify. See
            # https://goo.gl/KPZn97 for the full list.
            encoding=encoding,  # one of LINEAR16, FLAC, MULAW, AMR, AMR_WB
            sample_rate=sample_rate,  # the rate in hertz
            # See https://g.co/cloud/speech/docs/languages for a list of
            # supported languages.
            language_code=language_code,  # a BCP-47 language tag
        ),
        audio=cloud_speech.RecognitionAudio(
            uri=input_uri,
        )
    ), DEADLINE_SECS)

    # Print the recognition result alternatives and confidence scores.
    for result in response.results:
        print('Result:')
        for alternative in result.alternatives:
            print(u'  ({}): {}'.format(
                alternative.confidence, alternative.transcript))
コード例 #5
0
def main(input_uri, encoding, sample_rate, language_code='en-US'):
    channel = make_channel('speech.googleapis.com', 443)
    service = cloud_speech_pb2.beta_create_Speech_stub(channel)
    # The method and parameters can be inferred from the proto from which the
    # grpc client lib was generated. See:
    # https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1beta1/cloud_speech.proto
    operation = service.AsyncRecognize(
        cloud_speech_pb2.AsyncRecognizeRequest(
            config=cloud_speech_pb2.RecognitionConfig(
                # There are a bunch of config options you can specify. See
                # https://goo.gl/KPZn97 for the full list.
                encoding=encoding,  # one of LINEAR16, FLAC, MULAW, AMR, AMR_WB
                sample_rate=sample_rate,  # the rate in hertz
                # See https://g.co/cloud/speech/docs/languages for a list of
                # supported languages.
                language_code=language_code,  # a BCP-47 language tag
            ),
            audio=cloud_speech_pb2.RecognitionAudio(uri=input_uri, )),
        DEADLINE_SECS)

    # Print the longrunning operation handle.
    print(operation)

    # Construct a long running operation endpoint.
    service = operations_pb2.beta_create_Operations_stub(channel)

    name = operation.name

    while True:
        # Give the server a few seconds to process.
        print('Waiting for server processing...')
        time.sleep(1)
        operation = service.GetOperation(
            operations_pb2.GetOperationRequest(name=name), DEADLINE_SECS)

        if operation.error.message:
            print('\nOperation error:\n{}'.format(operation.error))

        if operation.done:
            break

    response = cloud_speech_pb2.AsyncRecognizeResponse()
    operation.response.Unpack(response)
    # Print the recognition result alternatives and confidence scores.
    for result in response.results:
        print('Result:')
        for alternative in result.alternatives:
            print(u'  ({}): {}'.format(alternative.confidence,
                                       alternative.transcript))
コード例 #6
0
def main():
    with cloud_speech.beta_create_Speech_stub(
            make_channel('speech.googleapis.com', 443)) as service:
        # For streaming audio from the microphone, there are three threads.
        # First, a thread that collects audio data as it comes in
        with record_audio(RATE, CHUNK) as buffered_audio_data:
            # Second, a thread that sends requests with that data
            requests = request_stream(buffered_audio_data, RATE)
            # Third, a thread that listens for transcription responses
            recognize_stream = service.StreamingRecognize(
                requests, DEADLINE_SECS)

            # Exit things cleanly on interrupt
            signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())

            # Now, put the transcription responses to use.
            try:
                listen_print_loop(recognize_stream)

                recognize_stream.cancel()
            except face.CancellationError:
                # This happens because of the interrupt handler
                pass
コード例 #7
0
def create_new_service():
    channel = make_channel('speech.googleapis.com', 443)
    service = cloud_speech.beta_create_Speech_stub(channel)
    return service