def say(self, phrase): cmd = ["pico2wave", "--wave", fullpath('libs/tts/response.wav')] cmd += ['-l', self.language] cmd.append(phrase) proc = subprocess.Popen(cmd) proc.wait() audio.play(fullpath('libs/tts/response.wav'))
def handle_input(self, keypress): handled = False if keypress.has_action(key_input.Action.BACK) or keypress.has_action( key_input.Action.INVENTORY_CLOSE): handled = True self.hide() if keypress.has_action(key_input.Action.DIRECTION): handled = True audio.play("click3", replace=True) if keypress.has_action(key_input.Action.UP): self.cursorpos[1] = max(self.cursorpos[1] - 1, 0) if keypress.has_action(key_input.Action.DOWN): self.cursorpos[1] = min(self.cursorpos[1] + 1, self.size[1] - 1) if keypress.has_action(key_input.Action.LEFT): self.cursorpos[0] = max(self.cursorpos[0] - 1, 0) if keypress.has_action(key_input.Action.RIGHT): self.cursorpos[0] = min(self.cursorpos[0] + 1, self.size[0] - 1) if keypress.has_action(key_input.Action.ACCEPT): handled = True itempos = self.cursorpos[0] * self.size[1] + self.cursorpos[1] items = self.parent.world.entity_component( self.parent.world.tags.player, c.Inventory).contents if itempos < len(items): self.game.set_focus( self.add_child_scene(InventoryOptions, items[itempos])) return handled
def run(self): # set up presentation window color, and size bgcolor = 'black' txtcolor = 'white' self.win = visual.Window(fullscr=True, color=bgcolor) #self.win = visual.Window((1200, 900), color=bgcolor) # temporary presentation window setup, exchange for line above when running actual experiment self.text = visual.TextStim(self.win, color=txtcolor) words = [ audio.read(self.stimuli_prefix + str(i + 1) + '.wav') for i in range(24) ] recordings = [] self.text.text = '||' self.text.draw() self.win.flip() audio.play(audio.read(self.instructions_folder + self.mode + '.wav'), wait=True) key = event.waitKeys(keyList=['return']) for word in words: self.text.text = '+' self.text.draw() self.win.flip() audio.play(word, wait=True) self.text.text = '-' self.text.draw() self.win.flip() recordings += [audio.record(((len(word) / 44100) + 1), wait=True)] for i in range(len(words)): audio.write(self.log_prefix + str(i + 1) + '.wav', recordings[i]) self.win.close()
def time_signal(): # 時報を行う # TODO wavファイルがないときは例外を投げるようにしたい global now audio.play(main.AUDIO_URL + "Init/master.wav") say_text = str(now.hour % 12) + '時' + str(now.minute) + "分です" jtalk.jtalk(say_text)
def test_trial(self, trial): # present instruction trial self.image.image = self.stimuli_folder + trial['Picture'] self.image.draw() self.win.callOnFlip(self.clock.reset) self.isi.complete() self.win.flip() if trial['trialAudio'] != '': audio.play(self.instructions[trial['trialAudio']], wait=True) if trial['answer_type'] == 'spoken': audio.write(self.log_prefix + '_' + trial['Picture'][:-4] + '.wav', audio.record(25, wait=True)) else: keys = event.waitKeys(keyList=['escape'] + trial['keyboard'].split(' '), timeStamped=self.clock) trial['keypress'], trial['RT'] = keys[0] if trial['keypress'] == 'escape': core.quit() if trial['keypress'] == trial['key']: trial['ACC'] = 1 else: trial['ACC'] = 0 self.win.callOnFlip(self.isi.start, float(trial['ITI']) / 1000 - self.frame_dur) # flip buffer again and start ISI timer self.win.flip() return trial
def play_file(name, delay=80, reflect=0.5): #Do allocation here, as we can't do it in an interrupt. frame = audio.AudioFrame() with open(name) as file: gen = from_file(file, frame) r = reverb(gen, delay, reflect) audio.play(r)
def __init__(self, item, **kwargs): super().__init__(**kwargs) self.world = self.parent.parent.world self.item = item self.options = [] self.visible = True if self.world.has_component(item, c.UseEffect): self.options.append("use") if self.world.has_component(item, c.Explosive): self.options.append("prime") self.options.append("throw") self.options.append("drop") self.size = len(self.options) inv_slot_size = constants.TILE_SIZE*constants.MENU_SCALE self.pos = (40 + inv_slot_size*2 + 12*constants.MENU_SCALE, self.game.height/2 - inv_slot_size*3) image_bottom = self.pos[1]+inv_slot_size*1.5 self.options_pos = [DynamicPos((self.pos[0], image_bottom + (10 + i*12)*constants.MENU_SCALE), speed=20) for i in range(self.size)] self.cursorpos = 0 audio.play("snap2", replace=True) self.widgets = [] if self.world.has_component(self.item, c.Describable): text_x = constants.TILE_SIZE * constants.MENU_SCALE * 1.6 describe = self.world.entity_component(self.item, c.Describable) self.widgets.extend(( wgt.Text(renderer=self.game.renderer, size=10*constants.MENU_SCALE, text=describe.name, offset=(text_x, 0)), wgt.Text(renderer=self.game.renderer, size=5*constants.MENU_SCALE, text=describe.desc, offset=(text_x, 15*constants.MENU_SCALE)), ))
def play_snd(fname): frame = audio.AudioFrame() with open(fname, 'rb') as sndfile: audio.play(frames_from_file(sndfile, frame), wait=False) sleep(1000) audio.stop() del frame
def main(): if 0: print ("boice timer active") print (datetime.now().strftime("%Y/%m/%d %H:%M:%S")) print (os.getcwd()) print ("\n") url = AUDIO_URL + "Init/init_voice.wav" audio.play(url) # とりあえず時間しゃべらせとく b = datetime.now() say_text = str(b.hour%12) + '時' + str(b.minute) + "分です" jtalk.jtalk(say_text) # 天気しゃべらす wea.say(2) # ニュースしゃべらす new = news_file.NewsClass() new.say_news("ねとらぼ",1) # threads start t=threading.Timer(1,clock_func.clock) t.start()
def activeListen(self, use_local=None): self.stop_itunes() if use_local is None: use_local = self.use_local audio.play(fullpath('static/beep_hi.wav')) if self.prompt: self.userinput = raw_input("YOU: ") else: self.wav, energies = audio.record( verbose=True, threshold=self.threshold, emptyFrames=20 ) with open('test.wav', 'wb') as fp: fp.write(self.wav) if sum(energies) / len(energies) < self.threshold: self.undo_itunes() return "" try: if use_local: self.userinput = self.localSTT.transcribe(self.wav) else: self.userinput = self.cloudSTT.transcribe(self.wav) except: print "Something went wrong" return '' print "YOU:", self.userinput audio.play(fullpath('static/beep_lo.wav')) self.undo_itunes() return self.userinput
def make_softer(audio_file): # the following function will play the .wav file audio.play(audio_file) # storing the audio as a list of floats samples = audio.read_wav(audio_file)
def test_tts(): texts = ['早上好!', '即将为您播放周杰伦的《简单爱》。', '早上好!', '北京今天晴转多云,微风,空气质量优'] tts_engine = BasicTTS() for text in texts: speech_file = tts_engine.tts(text, ext='wav', speaker=6, volume=10) play(speech_file) tts_engine.say(text, ext='wav', speaker=6, volume=10)
def play_snd(fname, num_times): frame = audio.AudioFrame() for i in range(num_times): with open(fname, 'rb') as sndfile: audio.play(frames_from_file(sndfile, frame), wait=True) del frame audio.stop()
def callback(keyword): ding = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'snowboy/resources/ding.wav') dong = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'snowboy/resources/dong.wav') if keyword == 1: play(ding) else: play(dong)
def show_wave(name, frame, duration=1500): display.scroll(name + " wave", wait=False, delay=100) audio.play(repeated_frame(frame, duration), wait=False) for i in range(75): sleep(100) if button_a.is_pressed(): display.clear() audio.stop() break
def show_wave(name, frame, duration=1500): display.scroll(name + " wave", wait=False,delay=100) audio.play(repeated_frame(frame, duration),wait=False) for i in range(75): sleep(100) if button_a.is_pressed(): display.clear() audio.stop() break
def process(self, **args): if self.world.get_system(InitiativeSystem).tick: for entity, explosive in self.world.get_component(c.Explosive): if explosive.primed: explosive.fuse -= 1 if explosive.fuse <= 1 and self.world.has_component( entity, c.Render): self.world.entity_component(entity, c.Render).blinking = True if explosive.fuse <= 0: self.world.add_component(entity, c.Explode()) for entity, explode in self.world.get_component(c.Explode): if self.world.has_component(entity, c.Bomber): self.world.remove_component(entity, c.Bomber) self.world.add_component(entity, c.Dead()) iterentity = entity while self.world.has_component(iterentity, c.Stored): # Getting carrier entity iterentity = self.world.entity_component(iterentity, c.Stored).carrier if self.world.has_component( iterentity, c.TilePosition): # Damaging things around it pos = self.world.entity_component(iterentity, c.TilePosition) for x in range(pos.x - explode.radius, pos.x + explode.radius + 1): for y in range(pos.y - explode.radius, pos.y + explode.radius + 1): if not self.world.get_system(GridSystem).on_grid( (x, y)): continue for target_entity in self.world.get_system( GridSystem).get_entities_at((x, y)): if target_entity == entity: continue if self.world.has_component( target_entity, c.Destructible ) and not self.world.has_component( target_entity, c.Health): self.world.add_component( target_entity, c.Dead()) self.world.create_entity( c.Damage(target_entity, explode.damage)) dist_to_player = dist( pos, self.world.entity_component(self.world.tags.player, c.TilePosition)) if dist_to_player < 10: self.game.camera.shake(40 - dist_to_player * 3) audio.play("explosion", 0.6 - dist_to_player * 0.05)
def render_speech(text, file='/tmp/temp.mp3', voice='Joanna', engine='standard', text_type='text'): if text_type == 'ssml': speech_text = "<speak>" + text + "</speak>" else: speech_text = text speech = polly.synthesize_speech(Text=speech_text, OutputFormat='mp3', TextType=text_type, VoiceId=voice, Engine=engine) with open(file, 'wb') as writer: writer.write(speech['AudioStream'].read()) writer.close() audio.play(file)
def update(self, gamemap, stats): # Update and then blit if self.rect.top > self.screen_rect.bottom + 80: # Has Mario fallen offscreen? stats.decrement_lives() self.settings.game_active = False self.settings.game_status = "Reset" return if not self.jumping and not gamemap.object_touching_ground(self): self.gravity.perform(self) if self.jumping: if gamemap.object_touching_ground(self): audio.play(0) self.jump() elif self.jumping and gamemap.object_touching_ground(self): self.jump() elif self.gamemap.object_touching_ground: self.jump_start = False self.jump_speed = 0 if self.moving_left: if self.dir == 1 and self.speed != 0: self.turn() elif self.dir == 0: self.dir = -1 self.accelerate() if self.rect.x > 0: self.rect.x += self.dir * self.speed elif self.moving_right: if self.dir == -1 and self.speed != 0: self.turn() elif self.dir == 0: self.dir = 1 self.accelerate() if self.rect.centerx >= self.screen_rect.width / 2: self.gamemap.scroll(self.speed) else: self.rect.x += self.dir * self.speed else: self.decelerate() if self.rect.x > 0 and self.rect.right < self.screen_rect.width: self.rect.x += self.dir * self.speed if self.rect.centerx >= self.screen_rect.width / 2: self.gamemap.scroll(self.speed) self.rect.y -= self.jump_speed self.gamemap.collide(self) self.animate() self.blitme()
def mutter(): # つぶやき global now # 1 ~ 100 の整数値をランダムに生成 rand_int = random.randint(1, 100) if 1 <= now.hour and now.hour <= 4: # 真夜中の場合の一般リアクション # 0を割り算するとエラーが起こる可能性を考慮 if (rand_int % 3 == 0): print("真夜中") url = main.AUDIO_URL + "other/midnight" url = filer.GetFileName(url) audio.play(url) elif 18 <= now.hour: # 夕方から夜にかけての場合の一般リアクション if (rand_int % 5 == 0): print("夕方") url = main.AUDIO_URL + "other/evening" url = filer.GetFileName(url) audio.play(url) elif 5 <= now.hour and now.hour <= 9: # 朝の場合の一般リアクション if (rand_int % 5 == 0): print("朝") url = main.AUDIO_URL + "other/morning" url = filer.GetFileName(url) audio.play(url) else: # 昼の場合の一般リアクション if (rand_int % 6 == 0): print("昼") url = main.AUDIO_URL + "other/daytime" url = filer.GetFileName(url) audio.play(url)
def main(): init() if len(argv) > 1: words = argv[1:] else: words = raw_input('gogogo: ').split() for word in words: location = findWord(word) if -1 in location: print '%s is not in the text' % word audio.terminate() exit() fn = '%d.%03d.wav' % location audio.play(path.join(recordDir, fn)) audio.terminate()
def play(): display.clear() radio.on() radio.config(channel=90, queue=12) count = -1 def gen(): recvd = audio.AudioFrame() empty = audio.AudioFrame() while True: if radio.receive_bytes_into(recvd) == 32: yield recvd else: yield empty if button_a.is_pressed() and button_b.is_pressed(): return audio.play(gen())
def message_trial(self, trial): # present instruction trial self.title.text = trial['title'] self.title.draw() self.win.callOnFlip(self.clock.reset) self.win.flip() audio.play(audio.read(self.instructions_folder + 'ravens.wav'), wait=True) keys = event.waitKeys(keyList=['escape'] + trial['keyboard'].split(','), timeStamped=self.clock) trial['keypress'], trial['RT'] = keys[0] if trial['keypress'] == 'escape': core.quit() self.expclock.reset() return trial
def _connect_wifi(self): dp.drawPixel(0, 0, 0x0044BB) dp.flush() if not wifi.status(): audio.play('/cache/system/wifi_connecting.mp3') wifi.connect() dp.drawPixel(1, 0, 0x0044BB) dp.flush() wifi.wait() if not wifi.status(): dp.drawLine(0, 0, 1, 0, 0xFF0000) dp.flush() audio.play('/cache/system/wifi_failed.mp3', on_finished=system.launcher)
def process(self, **args): for entity, (pos, bump, _) in self.world.get_components(c.TilePosition, c.Bump, c.MyTurn): bumppos = (pos.x + bump.x, pos.y + bump.y) if not self.world.get_system(GridSystem).on_grid(bumppos): continue targetent = self.world.get_system(GridSystem).get_blocker_at( bumppos) if targetent == 0: self.world.get_system(GridSystem).move_entity(entity, bumppos) self.world.remove_component(entity, c.MyTurn) else: if self.world.has_component( targetent, c.Health) and self.world.has_component( entity, c.Attack): if entity == self.world.tags.player or targetent == self.world.tags.player: # The player must be involved for damage to be inflicted in a bump. # This is so that AI don't attack each other when trying to move. damage = self.world.entity_component(entity, c.Attack).damage self.world.create_entity( c.Damage(targetent, damage, burn=self.world.has_component( entity, c.FireElement), freeze=self.world.has_component( entity, c.IceElement))) if entity == self.world.tags.player: self.game.camera.shake(5) audio.play("punch", 0.5) if self.world.has_component(entity, c.Bomber): self.world.add_component(entity, c.Explode()) self.world.remove_component(entity, c.Bomber) self.world.remove_component(entity, c.MyTurn) for entity, _ in self.world.get_component(c.Bump): self.world.remove_component(entity, c.Bump)
def process(self, **args): for message_entity, damage in self.world.get_component(c.Damage): if self.world.has_component(damage.target, c.Health): targethealth = self.world.entity_component( damage.target, c.Health) targethealth.current -= damage.amount if damage.target == self.world.tags.player: self.game.camera.shake(5 + damage.amount * 2) audio.play("ow", 0.4) if targethealth.current <= 0: self.world.add_component(damage.target, c.Dead()) if damage.target != self.world.tags.player: self.game.parent.kills += 1 if self.world.has_component(self.world.tags.player, c.SpeedOnKill): self.game.speed_entity(self.world.tags.player, 1) if damage.burn and not self.world.has_component( damage.target, c.FireElement): self.world.add_component(damage.target, c.Burning(5)) if damage.freeze and not self.world.has_component( damage.target, c.IceElement): self.world.add_component(damage.target, c.Frozen()) if self.world.has_component( damage.target, c.Item) and not self.world.has_component( damage.target, c.Explosive): self.world.add_component(damage.target, c.Dead()) if self.world.has_component(damage.target, c.Explosive): self.world.entity_component(damage.target, c.Explosive).primed = True if self.world.has_component(damage.target, c.AIFlyWizard): if self.world.entity_component(damage.target, c.AIFlyWizard).state == "angry": next_state = "normal" else: next_state = "angry" self.world.get_system(AIFlyWizardSystem).change_state( damage.target, next_state) self.world.delete_entity(message_entity)
def handle_input(self, keypress): handled = False if keypress.has_action(key_input.Action.DOWN): handled = True self.cursorpos = min(self.cursorpos + 1, self.size-1) audio.play("click2", replace=True) if keypress.has_action(key_input.Action.UP): handled = True self.cursorpos = max(self.cursorpos - 1, 0) audio.play("click2", replace=True) if keypress.has_action(key_input.Action.BACK): handled = True self.remove_scene() audio.play("drop", replace=True) if keypress.has_action(key_input.Action.ACCEPT): handled = True audio.play("snap1", replace=True) selection = self.options[self.cursorpos] if selection == "use": self.remove_scene() use = self.world.entity_component(self.item, c.UseEffect) if self.world.has_component(self.world.tags.player, c.WeakPotions): for effect in use.effects: # Weak potions for mecha getattr(self.parent.parent, effect[0])(self.world.tags.player, int(effect[1]/2), *effect[2:]) else: for effect in use.effects: # Normal effect getattr(self.parent.parent, effect[0])(self.world.tags.player, *effect[1:]) if self.world.entity_component(self.item, c.Item).consumable: self.world.entity_component(self.world.tags.player, c.Inventory).contents.remove(self.item) self.world.delete_entity(self.item) if selection == "prime": self.world.entity_component(self.item, c.Explosive).primed = True if selection == "throw": self.visible = False throw_scene = self.parent.add_child_scene(ThrowOptions, self.item) self.game.set_focus(throw_scene) self.remove_scene() if selection == "drop": self.remove_scene() self.world.entity_component(self.world.tags.player, c.Inventory).contents.remove(self.item) self.world.remove_component(self.item, c.Stored) pos = self.world.entity_component(self.world.tags.player, c.TilePosition) self.world.add_component(self.item, c.TilePosition(pos.x, pos.y)) if keypress.has_action(key_input.Action.INVENTORY_CLOSE): self.remove_scene() return handled
def countdown(): import time import audio import mc_clock pos = mc.player.getTilePos() mc.player.setTilePos(pos.x, pos.y, pos.z+50) mc_clock.init(mc) mc_clock.draw_face(pos) time.sleep(3) audio.play("mc_keynote/countdown.wav") for secs in range(30): mc_clock.draw_secs(secs, block.GOLD_BLOCK.id) time.sleep(1) mc_clock.draw_secs(secs, block.AIR.id) #audio.play("bang.wav") mc_clock.clear_face()
def run(self): # set up presentation window color, and size bgcolor = 'black' txtcolor = 'white' self.win = visual.Window(fullscr=True, color=bgcolor) #self.win = visual.Window((1200, 900), color=bgcolor) # temporary presentation window setup, exchange for line above when running actual experiment self.text = visual.TextStim(self.win, color=txtcolor) with open(self.trials_fname, 'rU') as trial_file: # read trial structure trials = list(csv.DictReader(trial_file, delimiter='\t')) # preload stimuli stimuli = [audio.read(self.stimuli_folder + trial['stimulus']) for trial in trials] recordings = [] self.text.text = '||' self.text.draw() self.win.flip() audio.play(audio.read(self.instructions_folder + self.mode + '.wav'), wait=True) key = event.waitKeys(keyList=['return']) self.win.flip() for stimulus in stimuli: self.text.text = '+' self.text.draw() self.win.flip() audio.play(stimulus, wait=True) self.text.text = '-' self.text.draw() self.win.flip() recordings += [audio.record((len(stimulus) / 44100.0) + 1, wait=True)] keys = event.getKeys(['escape']) if 'escape' in keys: break for i in range(len(recordings)): audio.write(self.log_prefix + trials[i]['stimulus'], recordings[i]) self.win.close()
def present_trial(self, trial): if trial['stim1'] != '': self.image.image = self.stimuli_folder + trial['stim1'] + trial[ 'stimFormat'] self.image.draw() elif trial['text'] != '': self.text.text = trial['text'] self.text.draw() self.win.callOnFlip(self.clock.reset) self.isi.complete() self.win.flip() if trial['trialAudio'] != '': #self.instructions[trial['trialAudio']].play() audio.play(self.instructions[trial['trialAudio']], wait=False) if trial['keyboard'] != '': keys = event.waitKeys(keyList=['escape'] + trial['keyboard'].split(' '), timeStamped=self.clock) trial['keypress'], trial['RT'] = keys[0] if trial['keypress'] == 'escape': core.quit() if trial['keypress'] == trial['key']: trial['ACC'] = 1 else: trial['ACC'] = 0 if trial['trialAudio'] != '': #self.instructions[trial['trialAudio']].stop() audio.stop() else: if trial['presTime'] != '': core.wait(float(trial['presTime']) / 1000 - self.frame_dur) elif trial['trialAudio'] != '': core.wait( len(self.instructions[trial['trialAudio']]) / 44100.0) #core.wait(self.instructions[trial['trialAudio']].getDuration()) self.win.callOnFlip(self.isi.start, float(trial['ITI']) / 1000 - self.frame_dur) # flip buffer again and start ISI timer self.win.flip() return trial
def h_play(self, key): """Plays currently selected track Also applies filters, if any are selected. """ app = self.parentApp if not app.current_track: app.notify('No track selected') return for filename in app.current_track_nos: track_no = app._filenames_list.index(filename) + 1 try: self.get_widget('track-list').values.remove(track_no) except ValueError: pass self.get_widget('track-list').value = [] app.notify('Applying filters...') track = filters.apply(app.current_track, self.parentApp.filters) track = track[:app._track_length] self.get_widget('position').entry_widget.out_of = len(track) / 1000 self.get_widget('position').display() audio.play(track, notifier=self.update_slider) app.notify('Playing!') self.set_status('Playing')
def post_request(url, data): print("Request sent to", url, " with data : ", data) r = requests.post(url = url, data = data) if(r.status_code == 200): print("Server answers : ", r.text) if(r.text.find("yes") != -1): play('sons/coin_coin_fort.wav') elif(r.text.find("Non existant") != -1): play('sons/coin_coin_inexistant_fort.wav') else: play('sons/coin_coin_faux_fort.wav') else: print("Something went terribly wrong.")
def detected_callback(): global detector global mic global config global stt global tuling global rap detector.terminate() print("hotword detected") play('./resources/ding.wav') # os.system("aplay resources/ding.wav") # os.system("arecord -d 4 -r 16000 -c 1 -t wav -f S16_LE record.wav") mic.activeListenToAllOptions() res = stt.recognize() if (res['err_no']): print(res) else: text = res['result'][0] print(text) action = rap.isValid(text) if (action): print('handled by rapiro') rap.do(action) elif (Hass.isValid(text)): print('handled by hass') res = Hass.handle(text, config) stt.synthesis('已执行' + res) #stt.say() play('./speak.mp3') else: print('no handler matched , use default tuling') res = tuling.answer(text) stt.synthesis(res) #stt.say() play('./speak.mp3') detector.start(detected_callback=detected_callback, interrupt_check=interrupt_callback, sleep_time=0.03)
elif pattern[x + y * 4] == 3: if counter % 2 == 0: color = settings['color_on'] else: color = settings['color_off'] display.drawPixel(x, y, color) display.flush() return interval # initiate WiFi connection for the Text-to-Speach interface if not wifi.status(): print("Connecting to WiFi, please wait.") audio.play('/cache/system/wifi_connecting.mp3') wifi.connect() if not wifi.wait(): audio.play('/cache/system/wifi_failed.mp3') print("Unable to connect to the WiFi network.") else: connected = True audio.play('/cache/system/wifi_connected.mp3') print("Connected to the WiFi network.") sndmixer.begin(1) synth = sndmixer.synth() sndmixer.waveform(synth, 0) sndmixer.volume(synth, 0) # Adding callbacks
def beh(obst, level, user, frame): if obst.get_rect().colliderect(user.get_rect()): if level.spawn != [x, y]: audio.play(audio.sfx_cp) level.spawn = [x, y]
def play_audio(): """ play audio from the pi connected to the server """ play("record_one")
def play_data(data): weighted = map(operator.mul, data, WEIGHTS) notes = map(operator.add, BASE_NOTES, weighted) notes = [audio.NOTES['C']] + notes # So we always have a baseline print notes audio.play(notes, duration=.2)
def play(self) : audio.play(self.samples)
def say(self, phrase): tts = gtts.gTTS(text=phrase, lang=self.language) tts.save(fullpath("libs/tts/response.mp3")) audio.play(fullpath("libs/tts/response.mp3"))
def play_file(name, pin=None, return_pin=None): #Do allocation here, as we can't do it in an interrupt. frame = audio.AudioFrame() with open(name) as file: audio.play(audio_generator(file, frame), pin=pin, return_pin=return_pin)
def ring(self): audio.play(fullpath('static/alarm.wav'))
def play(evt): fn = os.path.join(recordDir, "%d.%03d.wav" % (p, w - 1)) audio.play(fn)
def startTransmitting(): audio.play(ContinuousTransmitter(), Fs)
def play_audio(): play("record_one")
def xmit(self, samples): audio.play(np.array(samples_tx), self.samplerate)
square = audio.AudioFrame() HALF = len(square)//2 for i in range(HALF): square[i] = 8 square[i+HALF] = 248 show_wave("Square", square) sleep(1000) for i in range(len(frame)): frame[i] = 252-i*8 show_wave("Sawtooth", frame) del frame #Generate a waveform that goes from triangle to square wave, reasonably smoothly. frames = [ None ] * 32 for i in range(32): frames[i] = frame = audio.AudioFrame() for j in range(len(triangle)): frame[j] = (triangle[j]*(32-i) + square[j]*i)>>5 def repeated_frames(frames, count): for frame in frames: for i in range(count): yield frame display.scroll("Ascending wave", wait=False) audio.play(repeated_frames(frames, 60))
from microbit import * import audio print("Hello!") multiple = 1 # initialise the counter while True: if button_a.was_pressed(): # get the next multiple of the thirteen result = multiple * 13 # Calculate the result print("13 times " + str(multiple) + " is " + str(result)) # print the multiplication multiple = multiple + 1 # increment the multiple if button_b.was_pressed(): # Say Hello display.show(Image.HAPPY) audio.play(Sound.HAPPY) display.clear() sleep(10) # a 10 millisecond delay
def audioOut(output, loopback_Fs, loopback_Fc, upsample_factor, mask_noise): output = processOutput(output, loopback_Fs, loopback_Fc, upsample_factor, mask_noise) audio.play(output, loopback_Fs)