class MultiColorLed: Config = namedtuple('Config', ['channels', 'pattern']) OFF = Config(channels=lambda color: Leds.rgb_off(), pattern=None) ON = Config(channels=Leds.rgb_on, pattern=None) BLINK = Config(channels=Leds.rgb_pattern, pattern=Pattern.blink(500)) BLINK_3 = BLINK BEACON = BLINK BEACON_DARK = BLINK DECAY = BLINK PULSE_SLOW = Config(channels=Leds.rgb_pattern, pattern=Pattern.breathe(500)) PULSE_QUICK = Config(channels=Leds.rgb_pattern, pattern=Pattern.breathe(100)) def _update(self, state, brightness): with self._lock: if state is not None: self._state = state if brightness is not None: self._brightness = brightness color = (int(255 * self._brightness), 0, 0) if self._state.pattern: self._leds.pattern = self._state.pattern self._leds.update(self._state.channels(color)) def __init__(self, channel): self._lock = threading.Lock() self._brightness = 1.0 # Read and written atomically. self._state = self.OFF self._leds = Leds() def close(self): self._leds.reset() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() @property def brightness(self): return self._brightness @brightness.setter def brightness(self, value): if value < 0.0 or value > 1.0: raise ValueError('Brightness must be between 0.0 and 1.0.') self._update(state=None, brightness=value) def _set_state(self, state): self._update(state=state, brightness=None) state = property(None, _set_state)
def wakeup(self): from aiy.board import Board, Led from aiy.leds import (Leds, Pattern, Color) self._wakeup = True with Board() as board: with Leds() as leds: while self._wakeup: board.led.state = Led.ON leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() leds = Leds() leds.pattern = Pattern.breathe(4000) leds.update(Leds.rgb_on((0, 8, 0))) pygame.init() pygame.mixer.init() mix = alsaaudio.Mixer() mix.setvolume(30) # Files all_files = [] for (dirpath, dirnames, filenames) in walk('/home/pi/jukidbox_store'): all_files.extend([path.join(dirpath, file) for file in filenames]) while True: leds.update(Leds.rgb_on((0, 8, 0))) try: with Board() as board: while True: print('Press button to start.') board.button.wait_for_press() done = threading.Event() board.button.when_pressed = done.set print('Playing...') leds.update(Leds.rgb_pattern(Color.PURPLE)) # Get random file file = numpy.random.choice(all_files) print(file) pygame.mixer.music.load(file) pygame.mixer.music.play(-1) while mixer.music.get_busy(): if done.is_set(): leds.update(Leds.rgb_on((32, 0, 0))) mixer.music.stop() time.sleep(0.5) print("Finished ..") leds.update(Leds.rgb_on((0, 8, 0))) except Exception as e: print(e) leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(2)
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: #board.led.state = Led.ON with Leds() as leds: while True: if hints: logging.info('Say something, e.g. %s.' % ', '.join(hints)) else: logging.info('Say something.') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('You said nothing.') continue logging.info('You said: "%s"' % text) text = text.lower() if 'turn on the light' in text: board.led.state = Led.ON elif 'turn off the light' in text: board.led.state = Led.OFF elif 'blink the light' in text: board.led.state = Led.BLINK elif 'goodbye' in text: break elif 'happy' in text: leds.pattern = Pattern.blink(50) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) audio.play_wav('laugh.wav') elif 'creep' in text: leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) elif 'cheer' in text: leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) audio.play_wav('people-cheering.wav')
def main(): # if no entry has been made that day... check_for_entry_today() print("not journaled today ππΌ") # callback to run when button is released board.button.when_pressed = journal print('waiting for press ππ½') leds.pattern = Pattern.breathe(2000) leds.update(Leds.rgb_pattern(Color.YELLOW)) # board.button.wait_for_press(60*15) # 15 minutes board.button.wait_for_press(15) # 15 seconds # if no press... print('no press, exiting ππ½...') board.led.state = Led.OFF
def alarm(done, leds): print("alarm thread") intensity = 0 start = time.monotonic() duration = 0 while not done.is_set(): if (intensity < 1): intensity += (5. / 70.) if (intensity > 1): intensity = 1 set_volume(intensity * MAX_VOLUME) leds.pattern = Pattern.breathe(map(intensity, 0., 1., 1000., 100.)) leds.update(Leds.rgb_pattern((0, 0, intensity * MAX_BRIGHTNESS))) duration = time.monotonic() - start print('Alarm [Press button to stop] %.02fs, intensity: %.02f' % (duration, intensity)) play_wav(ALARM_SOUND_PATH) time.sleep(SLEEP_TIME)
def main(): with Leds() as leds: print('Windows Up') tuned_servo.min() # blueLED1.blink(.2,.2) # risk of servo burning if kept # blueLED2.blink(.2,.2) leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('Windows Down') tuned_servo.max() interior.on() yellowLED.on() leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.YELLOW)) # Fade from yellow to red for i in range(32): color = Color.blend(Color.RED, Color.YELLOW, i / 32) leds.update(Leds.rgb_on(color)) time.sleep(0.1) # leds.update({ # 1: Leds.Channel(Leds.Channel.PATTERN, 64), # 2: Leds.Channel(Leds.Channel.OFF, 128), # 3: Leds.Channel(Leds.Channel.ON, 128), # 4: Leds.Channel(Leds.Channel.PATTERN, 64), # }) time.sleep(5) leds.update(Leds.rgb_off()) tuned_servo.close() yellowLED.close() interior.close() blueLED2.close()
def main(): with Board() as board: with Leds() as leds: # init volume and brightness set_volume(0) leds.pattern = Pattern.breathe(750) leds.update(Leds.rgb_pattern(Color.BLACK)) done = threading.Event() board.button.when_pressed = done.set alarm_thread = threading.Thread(target=alarm, args=(done, leds), daemon=True) alarm_thread.start() if done.wait(timeout=TIMEOUT_LIMIT): set_volume(MAX_VOLUME) leds.update(Leds.rgb_on(Color.GREEN)) print('GOOD MORNING!') play_wav(GOOD_MORNING_SOUND_PATH) else: print('Timed out.')
def button(): with Leds() as leds: with Board() as board: st_play = True while True: leds.pattern = Pattern.breathe(3000) if st_play: leds.update(Leds.rgb_pattern(Color.GREEN)) else: leds.update(Leds.rgb_pattern(Color.BLUE)) board.button.wait_for_press() if st_play: send_cmd("STOP") print("> STOP") else: send_cmd("PLAY") print("> PLAY") board.led.state = Led.ON board.button.wait_for_release() board.led.state = Led.OFF st_play = not st_play
leds.pattern = Pattern.blink(500) print('RGB: Blink RED for 5 seconds') leds.update(Leds.rgb_pattern(RED)) time.sleep(5) print('RGB: Blink GREEN for 5 seconds') leds.update(Leds.rgb_pattern(GREEN)) time.sleep(5) print('RGB: Blink BLUE for 5 seconds') leds.update(Leds.rgb_pattern(BLUE)) time.sleep(5) print('Set breathe pattern: period=1000ms (1Hz)') leds.pattern = Pattern.breathe(1000) print('RGB: Breathe RED for 5 seconds') leds.update(Leds.rgb_pattern(RED)) time.sleep(5) print('RGB: Breathe GREEN for 5 seconds') leds.update(Leds.rgb_pattern(GREEN)) time.sleep(5) print('RGB: Breathe BLUE for 5 seconds') leds.update(Leds.rgb_pattern(BLUE)) time.sleep(5) print('RGB: Increase RED brightness for 3.2 seconds') for i in range(32):
def main(): with Leds() as leds: print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(Color.RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(Color.GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(Color.BLUE)) time.sleep(1) print('RGB: Solid PURPLE for 1 second') leds.update(Leds.rgb_on(Color.PURPLE)) time.sleep(1) print('RGB: Solid CYAN for 1 second') leds.update(Leds.rgb_on(Color.CYAN)) time.sleep(1) print('RGB: Solid WHITE for 1 second') leds.update(Leds.rgb_on(Color.WHITE)) time.sleep(1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=default)') leds.update(Leds.privacy_on()) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=5)') leds.update(Leds.privacy_on(5)) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) print('Set blink pattern: period=500ms (2Hz)') leds.pattern = Pattern.blink(500) print('RGB: Blink RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Blink GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Blink BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('Set breathe pattern: period=1000ms (1Hz)') leds.pattern = Pattern.breathe(1000) print('RGB: Breathe RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Breathe GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Breathe BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('RGB: Increase RED brightness for 3.2 seconds') for i in range(32): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Decrease RED brightness for 3.2 seconds') for i in reversed(range(32)): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Blend between GREEN and BLUE for 3.2 seconds') for i in range(32): color = Color.blend(Color.BLUE, Color.GREEN, i / 32) leds.update(Leds.rgb_on(color)) time.sleep(0.1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) print('Privacy: On for 2 seconds') with PrivacyLed(leds): time.sleep(2) print('RGB: Solid GREEN for 2 seconds') with RgbLeds(leds, Leds.rgb_on(Color.GREEN)): time.sleep(2) print('Custom configuration for 5 seconds') leds.update({ 1: Leds.Channel(Leds.Channel.PATTERN, 128), # Red channel 2: Leds.Channel(Leds.Channel.OFF, 0), # Green channel 3: Leds.Channel(Leds.Channel.ON, 128), # Blue channel 4: Leds.Channel(Leds.Channel.PATTERN, 64), # Privacy channel }) time.sleep(5) print('Done')
def listen_me(): global text, duration parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() # λΌμ΄λΈλ¬λ¦¬ μ€λΉ Vokaturi.load("/home/pi/lib/piZero.so") # ν΄λΌμ°λ μ€νΌμΉ, ν μ€νΈ μμ°μ΄μ²λ¦¬, tts ν΄λΌμ΄μΈνΈ κ°κ° μ΄κΈ°ν client = CloudSpeechClient() nlp_client = language.LanguageServiceClient() tts_client = texttospeech.TextToSpeechClient() pos_wavs = [] neut_wavs = [] neg_wavs = [] intro_wavs = [] pos_wavs.append(text_to_audio(tts_client, 'μ§μ§?', '0.wav')) pos_wavs.append(text_to_audio(tts_client, 'λλ°', '1.wav')) pos_wavs.append(text_to_audio(tts_client, 'μ°μ', '2.wav')) pos_wavs.append(text_to_audio(tts_client, 'νν', '3.wav')) neut_wavs.append(text_to_audio(tts_client, 'μ', '10.wav')) neut_wavs.append(text_to_audio(tts_client, 'κ·Έλ ꡬλ', '11.wav')) neut_wavs.append(text_to_audio(tts_client, 'κ·Έλμ?', '12.wav')) neut_wavs.append(text_to_audio(tts_client, 'μμ', '13.wav')) neg_wavs.append(text_to_audio(tts_client, 'μ λ°', '4.wav')) neg_wavs.append(text_to_audio(tts_client, 'νλ΄', '5.wav')) neg_wavs.append(text_to_audio(tts_client, 'μν΄', '6.wav')) intro_wavs.append(text_to_audio(tts_client, 'λ€μ΄μ€κ². μκΈ°ν΄λ΄', 'intro0.wav')) intro_wavs.append(text_to_audio(tts_client, 'λ¬΄μ¨ μΌ μ΄μΌ?', 'intro1.wav')) play_wav(random.choice(intro_wavs)) logging.basicConfig(level=logging.INFO) with Board() as board: while True: print('λ§ν΄λ³΄μ.') text = None duration = 0. emotion = None def wait(): global text, duration start = time.monotonic() while text is None: # ν μ€νΈλ‘ μΈμ text = client.recognize(language_code='ko-KR') duration = time.monotonic() - start # λ Ήμνλ©΄μ record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print(text) print('Recorded: %.02f seconds' % duration) if text in ['λ€μ΄μ€μ κ³ λ§μ', 'λ΄ μκΈ° λ€μ΄μ€μ κ³ λ§μ', 'μ΄μμ€ν΄νΈ', 'μκ°', 'μ κ°']: return # ν μ€νΈ κ°μ λΆμ document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) sentiment = nlp_client.analyze_sentiment( document=document).document_sentiment print('ν μ€νΈ κ°μ λΆμ*********************************') print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) ##################### μ€νν λ°κΏλ λ¨ #################### pos_standard = 0.6 neg_standard = 0.1 # magnitude_standard = 0.1 # text sentiment analysis is enough if (sentiment.score < neg_standard or sentiment.score > pos_standard): if sentiment.score < neg_standard: emotion = False print("@@@negative") else: emotion = True print("@@@positive") else: # λ Ήμ νμΌ κ°μ λΆμ print('μ€λμ€ κ°μ λΆμ*********************************') (sample_rate, samples) = scipy.io.wavfile.read(args.filename) # print (" sample rate %.3f Hz" % sample_rate) # print ("Allocating Vokaturi sample array...") buffer_length = len(samples) print(" %d samples, %d channels" % (buffer_length, samples.ndim)) c_buffer = Vokaturi.SampleArrayC(buffer_length) if samples.ndim == 1: # mono c_buffer[:] = samples[:] / 32768.0 else: # stereo c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0 # print ("Creating VokaturiVoice...") voice = Vokaturi.Voice(sample_rate, buffer_length) # print ("Filling VokaturiVoice with samples...") voice.fill(buffer_length, c_buffer) # print ("Extracting emotions from VokaturiVoice...") quality = Vokaturi.Quality() emotionProbabilities = Vokaturi.EmotionProbabilities() voice.extract(quality, emotionProbabilities) if quality.valid: # print ("Neutral: %.3f" % emotionProbabilities.neutrality) # print ("Happy: %.3f" % emotionProbabilities.happiness) # print ("Sad: %.3f" % emotionProbabilities.sadness) # print ("Angry: %.3f" % emotionProbabilities.anger) # print ("Fear: %.3f" % emotionProbabilities.fear) # fear λ 무μνλλ‘ νμ. wave_score = emotionProbabilities.happiness - ( emotionProbabilities.sadness + emotionProbabilities.anger) if wave_score > 0 and sentiment.score > 0.4: print('@@@κΈμ ') emotion = True elif wave_score < 0 and sentiment.score < 0.4: print('@@@λΆμ ') emotion = False # text μ€μ½μ΄μ wave μ€μ½μ΄κ° λΆμΌμΉ ν λλ μ€λ¦½λ°μ (emotion = None) # μ¬κΈ°μ λΆν° λ°μ. with Leds() as leds: if emotion is True: play_wav(random.choice(pos_wavs)) leds.pattern = Pattern.blink(100) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) time.sleep(1) # play_wav('laugh.wav') elif emotion is False: play_wav(random.choice(neg_wavs)) leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) time.sleep(1) # play_wav('people-cheering.wav') # μ€λ¦½ 리μ‘μ else: play_wav(random.choice(neut_wavs)) leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) time.sleep(1)
def listen_me(): global text, duration parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() # λΌμ΄λΈλ¬λ¦¬ μ€λΉ Vokaturi.load("/home/pi/lib/piZero.so") # ν΄λΌμ°λ μ€νΌμΉλ ν μ€νΈ μμ°μ΄μ²λ¦¬ ν΄λΌμ΄μΈνΈ κ°κ° μ΄κΈ°ν client = CloudSpeechClient() nlp_client = language.LanguageServiceClient() logging.basicConfig(level=logging.INFO) with Board() as board: while True: print('λ§ν΄λ³΄μ.') text = None duration = 0. emotion = None def wait(): global text, duration start = time.monotonic() while text is None: # ν μ€νΈλ‘ μΈμ text = client.recognize(language_code='ko-KR') duration = time.monotonic() - start # λ Ήμνλ©΄μ record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print(text) print('Recorded: %.02f seconds' % duration) if text in ['λ€μ΄μ€μ κ³ λ§μ', 'λ΄ μκΈ° λ€μ΄μ€μ κ³ λ§μ', 'μ΄μμ€ν΄νΈ', 'μκ°', 'μ κ°']: return # ν μ€νΈ κ°μ λΆμ document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) sentiment = nlp_client.analyze_sentiment( document=document).document_sentiment print('ν μ€νΈ κ°μ λΆμ*********************************') print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) ##################### μ€νν λ°κΏλ λ¨ #################### pos_standard = 0.6 neg_standard = 0.1 # magnitude_standard = 0.1 # text sentiment analysis is enough if (sentiment.score < neg_standard or sentiment.score > pos_standard): if sentiment.score < neg_standard: emotion = False print("@@@negative") else: emotion = True print("@@@positive") else: # λ Ήμ νμΌ κ°μ λΆμ print('μ€λμ€ κ°μ λΆμ*********************************') (sample_rate, samples) = scipy.io.wavfile.read(args.filename) # print (" sample rate %.3f Hz" % sample_rate) # print ("Allocating Vokaturi sample array...") buffer_length = len(samples) print(" %d samples, %d channels" % (buffer_length, samples.ndim)) c_buffer = Vokaturi.SampleArrayC(buffer_length) if samples.ndim == 1: # mono c_buffer[:] = samples[:] / 32768.0 else: # stereo c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0 # print ("Creating VokaturiVoice...") voice = Vokaturi.Voice(sample_rate, buffer_length) # print ("Filling VokaturiVoice with samples...") voice.fill(buffer_length, c_buffer) # print ("Extracting emotions from VokaturiVoice...") quality = Vokaturi.Quality() emotionProbabilities = Vokaturi.EmotionProbabilities() voice.extract(quality, emotionProbabilities) if quality.valid: # print ("Neutral: %.3f" % emotionProbabilities.neutrality) # print ("Happy: %.3f" % emotionProbabilities.happiness) # print ("Sad: %.3f" % emotionProbabilities.sadness) # print ("Angry: %.3f" % emotionProbabilities.anger) # print ("Fear: %.3f" % emotionProbabilities.fear) # fear λ 무μνλλ‘ νμ. wave_score = emotionProbabilities.happiness - ( emotionProbabilities.sadness + emotionProbabilities.anger) if wave_score > 0: print('@@@κΈμ ') emotion = True else: print('@@@λΆμ ') emotion = False # text λΆμ λͺ¨νΈνκ³ wave λΆμ μ€ν¨νμλ (μ£Όλ‘ λͺ©μ리 짧μμ) if emotion is None: print('please say again') # μλ μ€λ¦½μ λ°μ λ£μ΄λ λ¨. continue # μ¬κΈ°μ λΆν° λ°μ. with Leds() as leds: if emotion is True: # tts.say('I am glad to hear that.') # tts.say('μ§μ§? λλ°.') leds.pattern = Pattern.blink(100) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) time.sleep(1) # play_wav('laugh.wav') else: # tts.say('I am sorry to hear that.') # tts.say('μ λ°. νλ΄.') leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) time.sleep(1)
def record_journal_entry(): # turn light blue as we start up leds = Leds() parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() heading = "" file_path = "" try: paths = gen_paths() heading = paths["heading"] file_path = paths["file_path"] except: print( ">>> π there was an error setting the path...\n>>> saving dirty entry locally." ) logging.warning('Unable to get the location. Using default paths.') date = str(datetime.now()) heading = date + "\n\n\n" file_path = os.getcwd() + "/je_error_dump_%s.txt" % date with Board() as board: with open(file_path, 'w') as dump: dump.write(heading) print('>>> please tell me about your day ππΌ') while True: leds.pattern = Pattern.breathe(2000) leds.update(Leds.rgb_pattern(Color.RED)) text = client.recognize( language_code=args.language, hint_phrases=hints, punctuation=True, ) # client must return None when it gets a pause in speech if text is None: continue logging.info(' You said: "%s"' % text) print("+ %s" % text) dump.write(text + " ") if 'new line' in text.lower(): dump.write('\n\n') logging.info('\n\n') elif 'cancel cancel cancel' in text.lower(): board.led.state = Led.OFF exit(0) elif 'goodbye' in text.lower(): break leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.GREEN)) logging.info('>>> wrapping and saving journal entry π') # try: # with open(file_path) as file: # lines = file.readlines() # print("read the lines") # with open(file_path, 'w') as wrapper: # size = 70 # for line in lines: # print("+" + line) # if len(line) > size: # collated = collate(line, size) # for short in collated: # wrapper.write(short) # wrapper.write('\n') # else: # writer.write(line) # except: # logging.error('There was an error wrapping %s' % file_path) time.sleep(3) board.led.state = Led.OFF