def on_message(self, mosclient, userdata, msg): messageparts = str(msg.payload).split("/") if len(messageparts) == 3 and messageparts[1] == "Sound": full_command = messageparts[2] self.app_log.info("Message received on mqtt: " + full_command) if messageparts[2] == "time": audio = Audio.Audio() audio.say_time("12") elif messageparts[2] == "morning+": audio = Audio.Audio() audio.say_good_greeting() elif messageparts[2] == "inspiration": audio = Audio.Audio() audio.say_inspiration()
def __init__(self, window, window_title, video_source=0, customer=None, database_ref=None): self.window = window self.window.title(window_title) self.video_source = video_source self.fr = fr.FacialRecognition() self.audioFile = None self.current_order = None self.gg = gg.adios() self.ac = Audio_clean.Audio() self.database_ref = database_ref self.id_list = [] # Customer Info self.customer_detected = customer self.customer_label_text = tkinter.StringVar() self.customer_label_text.set("customer") # Current Order Transcribe self.current_order_transcribe_text = tkinter.StringVar() self.current_order_transcribe_text.set("customer_current_order") # open video source (by default this will try to open the computer webcam) self.vid = MyVideoCapture(self.video_source) # Create a canvas that can fit the above video source size self.canvas = tkinter.Canvas(window, width=self.vid.width, height=self.vid.height) self.canvas.pack() # Button that lets the user record the order self.btn_text = tkinter.StringVar() self.btn_text.set("Start Order") self.btn_snapshot = tkinter.Button(window, textvariable=(self.btn_text), width=50, command=self.record) self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True) # Customer Label self.customer_label = tkinter.Label( window, textvariable=self.customer_label_text) self.customer_label.pack(anchor=tkinter.CENTER, expand=True) # Current Order Label self.customer_order_transcribe_label = tkinter.Label( window, textvariable=self.current_order_transcribe_text) self.customer_order_transcribe_label.pack(anchor=tkinter.CENTER, expand=True) # After it is called once, the update method will be automatically called every delay milliseconds self.delay = 15 self.update() self.window.mainloop()
def __init__(self): self.pywindow = pygame.display.set_mode((900, 600), 0, 32) #窗口大小 self.image = Image.Image() #导入图片 self.botany = Botany.Botany() #植物类 self.zombie = Zombie.Zombie() #僵尸类 self.collision = Collision.Collision() #碰撞类 self.audio = Audio.Audio() #音频类 self.Sunny_Collect = [] #回收的阳光坐标列表 self.Sunny_Collect_x = 0 self.Sunny_Collect_y = 0 self.Botany_Type = 0 #植物类型 self.Botany_Animation_Type = 0 #植物动画类型 self.Zombie_Animation_Type = 0 #僵尸动画类型 self.Stop_image = self.image.game_pause_nor #暂停初始图标 self.Zombie_image_list = 0 #僵尸图片列表索引 self.SunFlower_image_list = 16 #向日葵图片列表索引(从后往前索引只是想测试一下,并无大碍) self.Peashooter_image_list = 12 #豌豆列表索引 self.ZombieAttack_image_list = 0 #僵尸吃植物索引 self.FootballZombie_image_list = 0 #重装僵尸图片索引 self.FootballZombieAttack_image_list = 0 #重装僵尸吃植物图片索引 self.FootballZombieDie_image_list = 0 #重装僵尸死亡图片索引 self.evillaugh = pygame.mixer.Sound(self.audio.evillaugh) #僵尸暴走音效 self.Zombie_Go_ballistic_audio = 0 #暴走音效控制变量 self.scream = pygame.mixer.Sound(self.audio.scream) #僵尸胜利音效 self.buttonclick = pygame.mixer.Sound(self.audio.buttonclick) # 按钮音效 self.Zdie = pygame.mixer.Sound(self.audio.groan4) #僵尸死亡音效 self.ZWon = 0 #僵尸胜利后音效控制
def __init__(self, mode, sample_list=None): self.cv = CvDriver() self.midi = MidiHandler(self._note_on_callback, self._note_off_callback, self._message_callback) self.audio = Audio(0, 2, 2, 48000) self.library = SampleLibrary(sample_list) self._mode = mode self._recording_note = None self._held_midi_notes = [] self.verbose = False
def init(): global globs os.system("mode 200,60") os.system("color 5E") globs.dungeon = Dungeon.genDungeon() globs.player = Player.Player() globs.audio = Audio.Audio() globs.event( "You have entered pony dungeon in the quest for the sword of awesomeness!" ) globs.event("Press h at anytime to view a help menu.") globs.event("Press q at anytime to quit the game.")
def loadFile(self, file_path=None, num_frames_total=None): if (file_path): self.setPath(file_path=file_path) if ('file_path' in self.a_info): self.reader = imageio.get_reader(self.a_info['file_path'], 'ffmpeg') self.meta_data = self.reader.get_meta_data() self.sampling_rate = self.meta_data['fps'] if (num_frames_total is not None): self.num_frames_total = num_frames_total else: self.num_frames_total = self.calcNumFramesTotal() try: self.audio = Audio(self.a_info['file_path']) self.audio.name = self.name #except RuntimeError: except Exception: print("Issue loading audio for {}".format( self.a_info['file_path'].encode('utf-8'))) self.audio = Audio(sampling_rate=16000) self.audio.x = np.zeros( int(np.ceil(self.audio.sampling_rate * self.getDuration())))
def playGameInit(data): # synchronize song and game dist = Player.y + Obstacle.Obstacle.size speed = data.fps * Obstacle.Obstacle.vy timeToGoDown = dist / speed Obstacle.Obstacle.vy = dist / (data.songGameOffset * data.fps) # play song data.song = Audio(data.song) data.song.getBeats() audioThread = threading.Thread(target=playSong, args=(data, )) audioThread.start() data.playStartTime = time.time() if data.gameMode == "Rhythm Mode": rhythmModeInit(data) elif data.gameMode == "Groove Mode": grooveModeInit(data)
def read_settings(self): try: #check if the database has changed, if so, re-read it. filemodifiedtime = time.ctime( os.path.getmtime(self.db.get_file_path())) if (filemodifiedtime == self.file_modified_time_at_last_read ): #database has not changed sonce last time we read return if self.db.get_value("confirmationbeep") == "on" and self.started: audio = Audio.Audio() audio.play_beep() self.file_modified_time_at_last_read = time.ctime( os.path.getmtime(self.db.get_file_path())) self.db.load_settings() app_log.info("settings reloaded") except Exception as e: app_log.exception('Exception: %s', e)
from Audio import * Audio = Audio() while True: test = input("Say: ") Audio.cria_audio(test)
import pyaudio import Audio import socket import thread import base64 from Tkinter import * #globals audio = Audio.Audio(CHANNELS=1,FORMAT=pyaudio.paInt16, RATE=44100, CHUNK=32, NOFFRAMES=0.2) sock_server = socket.socket() sock_server.connect(("127.0.0.1", 23)) #server ip #get name and pass from gui, reg to the server and check if successful def reg(x): def commitreg(): name = namevar.get() password = passwordvar.get() datatosend = "reg|"+base64.b64encode(name)+"|"+base64.b64encode(password) sock_server.send(datatosend) answer = sock_server.recv(1024) regGUI.destroy() if answer[:7] is "SUCCEED": pass #TODO add after reg else: reg("prob")
if not robot_connected: Robot = move.Robot() Robot.connect() robot_connected = True print("Odometry localization") Robot.get_position(output_odo) #--------------------------- 4.3 Acoustic Localization ------------------# if DEBUG: choice = 'n' else: choice = input(ROBOT_ACO) if choice == 'y': print("Acoustic localization") out_au = output_au.replace('/', '/' + str(loop_counter) + '_') Au = Audio.Audio(input_au, out_au, N_CHANNELS, 3, RATE, CHUNK) frames = Au.play_and_record_long() Au.save_wav_files(frames) #--------------------------- 5. Make Robot Move --------------------# if DEBUG: choice = 'n' else: choice = input(ROBOT_MOVE) if choice == 'y': if not robot_connected: Robot = move.Robot() Robot.connect() robot_connected = True if commands == '': (times, commands) = move.read_file(Robot, input_mov) if loop_counter > max(times.keys()):
try: # p = multiprocessing.Process(target=self.rek.listen, args=(source,)) #res = pool.apply_async(self.rek.listen, [source]) self.audio = self.rek.listen(source, timeout=0, phrase_time_limit=5) # p.start() # p.join() except Exception as e: print('something timed out, ' + str(e)) #print('audio recorded') # write audio to a WAV file #with open("Recording1.wav", "wb") as f: #f.write(self.audio.get_wav_data()) #f.close() def get_adios(self): # print('Google transcribe:') # print('{}'.format(self.rek.recognize_google(self.audio))) return (self.rek.recognize_google(self.audio, language='en-EN')) if __name__ == "__main__": adddd = Audio.Audio() aids = adios() aids.record() a = adddd.getOrder(aids.get_adios()) print(a)
def CreateFromVideoAndAudio(video_path=None, audio_path=None, video_object=None, audio_object=None, output_path=None, clip_to_video_length=True, return_vid=True, codec='libx264', bitrate=None, **kwargs): assert ( not (video_path and video_object) ), "provided both video path and object to CreateFromVideoAndAudio" assert ( not (audio_path and audio_object) ), "provided both audio path and object to CreateFromVideoAndAudio" assert (output_path ), "Must provide output path for CreateFromVideoAndAudio" if (video_path): video_object = Video(video_path) if (audio_path): audio_object = Audio(path=audio_path) output_path = output_path.encode(sys.getfilesystemencoding()).strip() make_sure_dir_exists(output_path) # audio_sig = audio_object.getSignal(); audio_sig = audio_object.getStereo() audio_sampling_rate = audio_object.getStereoSamplingRate() is_stereo = True if (audio_sig is None): is_stereo = False audio_sig = audio_object.getSignal() audio_sampling_rate = audio_object.sampling_rate n_audio_samples_sig = len(audio_sig) else: n_audio_samples_sig = audio_sig.shape[1] print("stereo is {}".format(is_stereo)) audio_duration = audio_object.getDuration() video_duration = video_object.getDuration() if (clip_to_video_length): n_audio_samples_in_vid = int( math.ceil(video_duration * audio_sampling_rate)) if (n_audio_samples_in_vid < n_audio_samples_sig): if (is_stereo): audio_sig = audio_sig[:, :int(n_audio_samples_in_vid)] else: audio_sig = audio_sig[:int(n_audio_samples_in_vid)] else: if (n_audio_samples_in_vid > n_audio_samples_sig): nreps = math.ceil( truediv(n_audio_samples_in_vid, n_audio_samples_sig)) if (is_stereo): audio_sig = np.concatenate( (audio_sig, np.zeros((2, n_audio_samples_in_vid - n_audio_samples_sig))), axis=1) else: audio_sig = np.tile(audio_sig, (int(nreps))) audio_sig = audio_sig[:int(n_audio_samples_in_vid)] if (is_stereo): # reshapex = np.reshape(audio_sig, (audio_sig.shape[1], audio_sig.shape[0]), order='F'); reshapex = np.transpose(audio_sig) audio_clip = MPYAudioArrayClip(reshapex, fps=audio_sampling_rate) # from a numeric arra else: reshapex = audio_sig.reshape(len(audio_sig), 1) reshapex = np.concatenate((reshapex, reshapex), axis=1) audio_clip = MPYAudioArrayClip(reshapex, fps=audio_sampling_rate) # from a numeric arra video_clip = video_object.getMPYClip() video_clip = video_clip.set_audio(audio_clip) # video_clip.write_videofile(output_path,codec='libx264', write_logfile=False); if (bitrate is None): # video_clip.write_videofile(output_path, codec=codec, write_logfile=False); MPYWriteVideoFile(video_clip, output_path, codec=codec, write_logfile=False) else: MPYWriteVideoFile(video_clip, output_path, codec=codec, write_logfile=False, bitrate=bitrate) # video_clip.write_videofile(output_path, codec=codec, write_logfile=False, bitrate=bitrate); if (return_vid): return Video(output_path) else: return True