def handler(conn, addr, watson_settings, credentials, profile): print(addr, 'connected') with silence_stdout(): pipeline = get_pipeline(watson_settings, credentials, addr) responder = nodes.Responder(conn) pipeline.append(responder) if not profile: pipeline = [n for n in pipeline if not isinstance(n, nodes.Profiler)] fluteline.connect(pipeline) fluteline.start(pipeline) try: print(addr, 'ready') responder.put({'state': 'ready'}) while True: incoming = conn.recv(TCP_INPUT_BUFFER_SIZE) if not incoming: break pipeline[0].input.put(incoming) finally: fluteline.stop(pipeline) print(addr, 'disconnected')
def main(): args = parse_arguments() settings = { 'inactivity_timeout': -1, # Don't kill me after 30 seconds 'interim_results': True, 'timestamps': True } nodes = [ watson_streaming.utilities.MicAudioGen(), watson_streaming.Transcriber(settings, args.credentials), # watson_streaming.utilities.Printer(), IBMWatsonAdapter(), DeepTaggerModule(), Printer() ] fluteline.connect(nodes) fluteline.start(nodes) try: while True: time.sleep(10) except KeyboardInterrupt: fluteline.stop(nodes)
def main(): nodes = [ RandomNumberGenerator(), Max(), Printer(), ] fluteline.connect(nodes) fluteline.start(nodes) time.sleep(5) fluteline.stop(nodes)
def main(): args = parse_arguments() settings = { 'interim_results': True, } nodes = [ watson_streaming.utilities.FileAudioGen(args.audio_file), watson_streaming.Transcriber(settings, args.credentials), watson_streaming.utilities.Printer(), ] fluteline.connect(nodes) fluteline.start(nodes) try: with contextlib.closing(wave.open(args.audio_file)) as f: wav_length = f.getnframes() / f.getnchannels() / f.getframerate() # Sleep till the end of the file + some seconds slack time.sleep(wav_length + 5) finally: fluteline.stop(nodes)
def main(): args = directory settings = { 'inactivity_timeout': -1, # Don't kill me after 30 seconds 'interim_results': True, } nodes = [ watson_streaming.utilities.MicAudioGen(), watson_streaming.Transcriber(settings, directory), slu.Interpret(), ] fluteline.connect(nodes) fluteline.start(nodes) try: while True: time.sleep(1) except KeyboardInterrupt: pass finally: fluteline.stop(nodes)
def main(): args = parse_arguments() settings = { 'inactivity_timeout': -1, # Don't kill me after 30 seconds 'interim_results': True, } nodes = [ watson_streaming.utilities.MicAudioGen(), watson_streaming.Transcriber(settings, args.credentials, None, None, args.parameters), watson_streaming.utilities.Printer(), ] fluteline.connect(nodes) fluteline.start(nodes) try: while True: time.sleep(10) except KeyboardInterrupt: pass finally: fluteline.stop(nodes)
def test_sanity(self): transcriber = watson_streaming.Transcriber( settings={'interim_results': True}, apikey=self.apikey, hostname=self.hostname, ) file_audio_gen = watson_streaming.utilities.FileAudioGen(AUDIO_PATH) pipeline = [file_audio_gen, transcriber] fluteline.connect(pipeline) fluteline.start(pipeline) while True: result = transcriber.output.get() if 'results' in result: transcript = result['results'][0]['alternatives'][0][ 'transcript'] expected = 'several tornadoes' if transcript.startswith(expected): break else: raise AssertionError("Didn't get expected transcript") fluteline.stop(pipeline)
[('once', 3.4, 4), ('upon', 4.2, 4.6), ('on', 4.3, 4.8) ] ] # create a fake list of incoming transcription result dicts from watson fake_updates_data = [] result_index = 0 for update in fake_updates_raw: data = { 'result_index': result_index, 'results': [{'alternatives': [{'timestamps': update}]}] } fake_updates_data.append(data) nodes = [ FakeIBMWatsonStreamer(fake_updates_data), IBMWatsonAdapter() ] tic = time.clock() fluteline.connect(nodes) fluteline.start(nodes) print time.clock() - tic, "seconds" time.sleep(1) fluteline.stop(nodes)