def main(): """ Initializes PyGame and pgs4a and starts the game loop. """ # Variables. screen = None # Init PyGame. pygame.init() pygame.font.init() mixer.init() if android: # Init pgs4a and map Android's back button to PyGame's escape key. android.init() android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE) # Get the device's screen size and request a surface of that size. screen_size = (pygame.display.Info().current_w, pygame.display.Info().current_h) screen = pygame.display.set_mode(screen_size) else: screen = pygame.display.set_mode((1024, 768), pygame.FULLSCREEN | pygame.HWSURFACE) pygame.display.set_caption("Super HUGS Revolution 98") pygame.mouse.set_visible(False) # Create the game object and start the main game loop. game = Game(screen) game.game_loop() # Cleanly terminate PyGame. database.scores.close() pygame.quit()
def main(): pygame.init() mixer.init() screen = pygame.display.set_mode([480, 700]) if android: android.init() android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE) android.accelerometer_enable(True) game = Game(screen, "images") ret = game.startScreen() if ret == False: pygame.quit() sys.exit(1) while 1: game.init() for life in range(3): ball = game.createBall() game.balls.append(ball) ret = game.play() if ret == False: pygame.quit() sys.exit(1) ret = game.gameOver() if ret == False: pygame.quit() sys.exit(1)
def __init__(self, display_w, display_h): pygame.init() mixer.init() font.init() # check if the mixer was successfully initialized if mixer.get_init() is None: print("Failed to initialize the audio mixer module.") if font.get_init() is None: print("Failed to initialize the font module.") self.fps = 120 self.world = None self.gui = Gui(self) # Create screen display with 32 bits per pixel, no flags set self.display = pygame.display.set_mode((display_w, display_h), pygame.HWSURFACE, 32) self.delta_time = 0.0 self.debug = False self.paused = False self.print_fps = False self.worlds = list() self.game = None
def pronounce(self, what): ie = 'UTF-8' if what == 'src': tl = self.language_code[self.src_lang.get().encode('utf8')] whole_text = self.input_box.get('1.0', END).strip() elif what == 'target': tl = self.language_code[self.target_lang.get().encode('utf8')] whole_text = self.output_box.get('1.0', END).strip() # Split the original text into small chunks and make separate requests to Google. # All the returned mp3 files will be concatenated and then played. total_length = len(whole_text) max_chunk_length = 100 total = total_length / max_chunk_length + 1 if total_length % max_chunk_length else total_length / max_chunk_length # total number of chunk temp_mp3 = tempfile.TemporaryFile(mode='r+') for i in xrange(total): if i == total - 1: q = whole_text.encode('utf8') else: q = whole_text[:max_chunk_length].encode('utf8') whole_text = whole_text[max_chunk_length:] data = {'ie': ie, 'tl': tl, 'q': q, 'total': total, 'idx': str(i)} data = urllib.urlencode(data) url = 'https://translate.google.cn/translate_tts' header = {'User-Agent': 'Mozilla/5.0'} req = urllib2.Request(url, data, header) temp_mp3.write(urllib2.urlopen(req).read()) temp_mp3.seek(0) mixer.init() mixer.music.load(temp_mp3) mixer.music.play()
def initPygame(rate=22050, bits=16, stereo=True, buffer=1024): """If you need a specific format for sounds you need to run this init function. Run this *before creating your visual.Window*. The format cannot be changed once initialised or once a Window has been created. If a Sound object is created before this function is run it will be executed with default format (signed 16bit stereo at 22KHz). For more details see pygame help page for the mixer. """ global Sound, audioDriver Sound = SoundPygame audioDriver='n/a' if stereo==True: stereoChans=2 else: stereoChans=0 if bits==16: bits=-16 #for pygame bits are signed for 16bit, signified by the minus mixer.init(rate, bits, stereoChans, buffer) #defaults: 22050Hz, 16bit, stereo, sndarray.use_arraytype("numpy") setRate, setBits, setStereo = mixer.get_init() if setRate!=rate: logging.warn('Requested sound sample rate was not poossible') if setBits!=bits: logging.warn('Requested sound depth (bits) was not possible') if setStereo!=2 and stereo==True: logging.warn('Requested stereo setting was not possible')
def test_get_init__returns_exact_values_used_for_init(self): return # fix in 1.9 - I think it's a SDL_mixer bug. # TODO: When this bug is fixed, testing through every combination # will be too slow so adjust as necessary, at the moment it # breaks the loop after first failure configs = [] for f in FREQUENCIES: for s in SIZES: for c in CHANNELS: configs.append ((f,s,c)) print (configs) for init_conf in configs: print (init_conf) f,s,c = init_conf if (f,s) == (22050,16):continue mixer.init(f,s,c) mixer_conf = mixer.get_init() import time time.sleep(0.1) mixer.quit() time.sleep(0.1) if init_conf != mixer_conf: continue self.assertEquals(init_conf, mixer_conf)
def todo_test_pre_init__keyword_args(self): # Fails on Mac; probably older SDL_mixer ## Probably don't need to be so exhaustive. Besides being slow the repeated ## init/quit calls may be causing problems on the Mac. ## configs = ( {'frequency' : f, 'size' : s, 'channels': c } ## for f in FREQUENCIES ## for s in SIZES ## for c in CHANNELS ) configs = [{'frequency' : 44100, 'size' : 16, 'channels' : 1}] for kw_conf in configs: mixer.pre_init(**kw_conf) mixer.init() mixer_conf = mixer.get_init() self.assertEquals( # Not all "sizes" are supported on all systems. (mixer_conf[0], abs(mixer_conf[1]), mixer_conf[2]), (kw_conf['frequency'], abs(kw_conf['size']), kw_conf['channels']) ) mixer.quit()
def playBell(name, t): mixer.init() print name sound = mixer.Sound(name) sound.play() time.sleep(t) mixer.quit()
def test_get_raw_more(self): """ test the array interface a bit better. """ import platform IS_PYPY = 'PyPy' == platform.python_implementation() if IS_PYPY: return from ctypes import pythonapi, c_void_p, py_object try: Bytes_FromString = pythonapi.PyBytes_FromString except: Bytes_FromString = pythonapi.PyString_FromString Bytes_FromString.restype = c_void_p Bytes_FromString.argtypes = [py_object] mixer.init() try: samples = as_bytes('abcdefgh') # keep byte size a multiple of 4 snd = mixer.Sound(buffer=samples) raw = snd.get_raw() self.assertTrue(isinstance(raw, bytes_)) self.assertNotEqual(snd._samples_address, Bytes_FromString(samples)) self.assertEqual(raw, samples) finally: mixer.quit()
def __init__(self): logging.basicConfig(filename="SoundPlayer.log", level=logging.DEBUG) mixer.init() mixer.set_num_channels(8) self.audio_root = "../../sound_effects/animals" self.playlist = read_playlist(self.audio_root) self.sounds = {}
def play(self, song_thread): music_file = "output" + str(self.id) + ".mid" clock = time.Clock() freq = 44100 # audio CD quality bitsize = -16 # unsigned 16 bit channels = 2 # 1 is mono, 2 is stereo buffer = 1024 # number of samples mixer.init(freq, bitsize, channels, buffer) # optional volume 0 to 1.0 mixer.music.set_volume(0.8) try: try: mixer.music.load(music_file) print "Music file %s loaded!" % music_file except error: print "File %s not found! (%s)" % (music_file, get_error()) return mixer.music.play() while mixer.music.get_busy(): # check if playback has finished clock.tick(30) except KeyboardInterrupt: #if user hits Ctrl/C then exit #(works only in console mode) #modify to work on GUI mixer.music.fadeout(1000) mixer.music.stop() print 'lolno' raise SystemExit song_thread.join()
def Giris(self,nesne): if(self.kontrol==False): mixer.init() mixer.music.load('Dosyalar/resim.mp3') mixer.music.play() self.kontrol=True self.dugme.bind(on_press=self.Tikla)
def Konusma(self): try: mixer.init() mixer.music.load('Dosyalar/x.wav') mixer.music.play() r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) konusma = r.recognize_google(audio, language="tr") print(konusma) return konusma except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) except: print('Kelime Algılanamadı') mixer.init() mixer.music.load('Dosyalar/x.wav') mixer.music.play() self.dugme.bind(on_press=self.Tikla)
def text_to_speech(): s = textPad.get('1.0', textPad.index(INSERT)) try: tts = gTTS(text=s, lang='en') except: tkinter.messagebox.showinfo("Error", "An error occured make sure you have connected to internet!!") try: tts.save("temp.mp3") mixer.init() mixer.music.load("temp.mp3") mixer.music.play() try: os.remove("temp2.mp3") except: pass except: tts.save("temp2.mp3") mixer.music.load("temp2.mp3") mixer.music.play() os.remove("temp.mp3")
def send(self, info): url_tuling = self.apiurl + 'key=' + self.key + '&' + 'info=' + str(info) tuling = urllib2.urlopen(url_tuling) re = tuling.read() re_dict = json.loads(re) text = re_dict['text'] text = text.encode('utf-8') #print '回答是: ', text #文本轉語音 url = 'http://apis.baidu.com/apistore/baidutts/tts?text='+str(text)+'&ctp=1&per=0' f1 = open('test.mp3', 'w') req = urllib2.Request(url) req.add_header("apikey", " f6805eda3bd2a46de02ab4afa7a506a0") resp = urllib2.urlopen(req) content = resp.read() result = json.loads(content, 'utf-8') answer = result['retData'] decoded_content = base64.b64decode(answer) f1.write(decoded_content) f1.close() print '- ', text mixer.init() mixer.music.load('test.mp3') mixer.music.play() time.sleep(2) self.get()
def main(): mixer.init(44100) music = mixer.Sound("sounds/test.wav") #music.play(loops=-1) pygame.display.set_caption("Simple Tower Enemy") screen.blit(background, bRect) enemies = [] for index in range(1, 10): enemy = Enemy((index*(-100))) enemies.append(enemy) allSprites = pygame.sprite.Group(enemies) clock = pygame.time.Clock() keepGoing = True while keepGoing: clock.tick(100) pygame.mouse.set_visible(False) for event in pygame.event.get(): if event.type == pygame.QUIT: keepGoing = False allSprites.clear(screen, background) allSprites.update() allSprites.draw(screen) pygame.display.flip() #return mouse cursor pygame.mouse.set_visible(True)
def updateDisplay(self, msg): """ recibe datos desde el hilo y actualiza el control """ self.mpc.Pause() mixer.init() mixer.music.load("campana.mp3") mixer.music.play() t = msg.data if t.strip() == 'adios': self.cerrar_form(self) if '^' in t: lcPaciente, lcEspecialidad = t.split('^') lcPacienteDecode = lcPaciente.decode('latin-1') #Llama al paciente con Voz #comando_de_voz = "espeak -s140 -v 'es-la'+f2 '%s'" % (lcPacienteDecode) comando_de_voz = "espeapeak -p 80 -s 120 -v mb-vz1 '%s'" % (lcPacienteDecode) os.system(comando_de_voz) ultimo = lcPacienteDecode + '-' + lcEspecialidad if len(ultimo) >0: self.list_ctrl_llamados.InsertStringItem(self.pos, ultimo) self.pos = 0 self.text_ctrl_turno.Value = lcPacienteDecode.strip() self.text_ctrl_especialidad.Value = lcEspecialidad.strip() time.sleep(1) self.mpc.Pause() self.mpc.SetProperty('volume', 0) '''
def sample(args): with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f: saved_args = cPickle.load(f) with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f: chars, vocab = cPickle.load(f) model = Model(saved_args, True) with tf.Session() as sess: tf.initialize_all_variables().run() saver = tf.train.Saver(tf.all_variables()) ckpt = tf.train.get_checkpoint_state(args.save_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) ts = model.sample(sess, chars, vocab, args.n, args.prime, args.sample) print("Sampled Output\n") print(ts) print("Converting Text to Speech") tts = gTTS(text=ts, lang='en-uk') tts.save("ts.mp3") audio = MP3("ts.mp3") audio_length = audio.info.length print("Speaker is Getting Ready") mixer.init() mixer.music.load('ts.mp3') mixer.music.play() time.sleep(audio_length+5)
def main(file_path=None): """Play an audio file as a buffered sound sample Option argument: the name of an audio file (default data/secosmic_low.wav """ if file_path is None: file_path = os.path.join(main_dir, 'data', 'secosmic_lo.wav') #choose a desired audio format mixer.init(11025) #raises exception on fail #load the sound sound = mixer.Sound(file_path) #start playing print ('Playing Sound...') channel = sound.play() #poll until finished while channel.get_busy(): #still playing print (' ...still going...') time.wait(1000) print ('...Finished')
def __init__(self): tk.Tk.__init__(self) self.title("kalambury 0.1") self.wm_iconbitmap("icon.ico") mixer.init(44100) self.clock = mixer.Sound("Ticking-clock.wav") self.alarm = mixer.Sound("alarm.wav") self.timer = None # --- wczytanie z CSV --- with open('hasla.txt') as csvfile: reader = csv.reader(csvfile, delimiter=';') self.data = list(reader) #print self.data # --- naglowek --- tk.Label(self, text='Kalambury by Mariusz').grid(column=0, row=0, columnspan=3) # --- przyciski --- tk.Button(self, text="Losuj", height=8, width=15, bg="pink", command=self.losuj).grid(column=0, row=3) tk.Button(self, text="Czas start", height=8, width=15, bg="pink", command=lambda:self.countdown(60)).grid(column=2, row=3) # --- kategoria i haslo --- self.category = tk.StringVar() tk.Label(self, textvariable=self.category, wraplength=500, justify=tk.LEFT, bg="blue", fg='ivory', font=LARGE_FONT, width=50, height=5).grid(column=0, row=1, columnspan=3) self.subject = tk.StringVar() tk.Label(self, textvariable=self.subject, justify=tk.LEFT, bg="blue", fg='ivory', font=LARGE_FONT, width=50, height=5).grid(column=0, row=2, columnspan=3) # --- zegar --- self.time = tk.StringVar() self.time.set("00:00") tk.Label(self, textvariable=self.time, justify=tk.LEFT, bg="blue", fg='ivory', font=LARGE_FONT, width=10, height=5).grid(column=1, row=3) # --- inne --- self.losuj()
def playSound(nom): """Joue un son une fois""" global volumeGlobal, volumeSons if not mixer.get_init() : mixer.init() son = mixer.Sound('Sons/' + nom + '.ogg') son.set_volume(float(volumeGlobal)*float(volumeSound)/10000) son.play()
def __init__(self): self.clearStatus() self.room = "" self.visibility = 0 self.inventory = [] # Graphics: # xairete (Goodbye) xairete = open("lib/xairete", "r") self.xairete = [] for line in xairete: self.xairete.append(line[:-1]) # The splash (title) screen: splash = open("lib/splash", "r") self.splash = [] for line in splash: self.splash.append(line[:-1]) # The skull skull = open("lib/skull", "r") self.skull = [] for line in skull: self.skull.append(line[:-1]) # The ending graphics: eurekas = open("lib/eurekas", "r") self.eurekas = [] for line in eurekas: self.eurekas.append(line[:-1]) mixer.init()
def GET(self, cmd): mixer.init(44100) mixer.music.set_volume(1.0) p = paused #I feel it would have been too robust or unsuitable #to use a Command pattern in this scenario if(cmd == "play"): songloc = list(mydb.getPlaylist()) if(songloc): mixer.music.load(songloc[0].loc) mixer.music.play(0) if(cmd == "pause"): if(p): mixer.music.unpause() p = False else: mixer.music.pause() p = True if(cmd == "next"): delfirst = mydb.delFirstPl() nextsong = list(mydb.getPlaylist()) if(nextsong): mixer.music.load(nextsong[0].loc) mixer.music.play(0) if(cmd == "stop"): mixer.music.stop() return cmd
def __init__(self): mixer.init() self.track = interruption mixer.music.load(self.track) self.volume = 0 mixer.music.play(-1) self.playing = True
def __init__(self, settings): self.speed = int(settings['-r']) if '-r' in settings else 20 self.scale = float(settings['-s']) if '-s' in settings else 1.0 self.screen = curses.initscr() self.START_INTENSITY = int(settings['-i']) if '-i' in settings else self.MAX_INTENSITY self.START_OFFSET = int(settings['-w']) if '-w' in settings else 0 self.START_HEIGHT = int(settings['-h']) if '-h' in settings else 0 self.screen.clear() self.screen.nodelay(1) curses.curs_set(0) curses.start_color() curses.use_default_colors() for i in range(0, curses.COLORS): curses.init_pair(i, i, -1) def color(r, g, b): return (16+r//48*36+g//48*6+b//48) self.heat = [color(16 * i,0,0) for i in range(0,16)] + [color(255,16 * i,0) for i in range(0,16)] self.particles = [ord(i) for i in (' ', '.', '*', '#', '@')] assert(len(self.particles) == self.NUM_PARTICLES) self.resize() self.volume = 1.0 if pygame_available: mixer.init() music.load('fire.wav') music.play(-1) elif pyaudio_available: self.loop = True self.lock = threading.Lock() t = threading.Thread(target=self.play_fire) t.start()
def play_songfile(self): print('playing song...') mixer.init() mixer.music.load(self.song_file) mixer.music.play(-1) while mixer.music.get_busy() == True: continue
def setup(): random.seed() mixer.init() #screen = pygame.display.set_mode ((640, 480), 0, 32) samples = get_samples("./samples") init_playfield(samples) tick() print_playfield() pygame.init () #screen.fill ((100, 100, 100)) #pygame.display.flip () #pygame.key.set_repeat (500, 30) #mixer.init(11025) #mixer.init(44100) #sample = samples[random.randint(0,len(samples)-1)] #print("playing sample:",sample) #sound = mixer.Sound(sample) #channel = sound.play() while True: play_sounds() #for i in range(2): mutate_playfield() tick() print_playfield() #time.wait(int((1000*60)/80)) # 128bpm time.wait(50) #while channel.get_busy(): #still playing # print(" ...still going...") # time.wait(1000) #print("...Finished") pygame.quit()
def send(self, info): url_tuling = self.apiurl + 'key=' + self.key + '&' + 'info=' + info tuling = urllib2.urlopen(url_tuling) re = tuling.read() re_dict = json.loads(re) text = re_dict['text'] text = text.encode('utf-8') #text = '你好啊,希望你今天过的快乐' #print(chardet.detect(text)) url = 'http://apis.baidu.com/apistore/baidutts/tts?text='+text+'&ctp=1&per=0' f1 = open('test.mp3', 'w') req = urllib2.Request(url) req.add_header("apikey", " f6805eda3bd2a46de02ab4afa7a506a0") resp = urllib2.urlopen(req) content = resp.read() result = json.loads(content, 'utf-8') decoded_content = base64.b64decode(result['retData']) f1.write(decoded_content) f1.close() print '- ', text mixer.init() mixer.music.load('test.mp3') mixer.music.play() self.get()
def main(argv): mixer.init(44100) mixer.set_num_channels(20) load_res("./res") app = App() app.MainLoop()
def moteur(freq,temps): mixer.init() nom=str(freq)+"Hz" mixer.music.load("Sons\\{}.wav".format(nom)) for i in range(temps): mixer.music.play() sleep(0.1)
submenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="File", menu=submenu) submenu.add_command(label="Open", command=browseFile) submenu.add_command(label="Exit", command=root.destroy) def about_us(): tkinter.messagebox.showinfo( "Muzzic", "Created by Aravind M Krishnan , NIT Puducherry") submenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="Help", menu=submenu) submenu.add_command(label="About Us", command=about_us) mixer.init() # Intitializing the mixer root.title("Muzzic") leftFrame = Frame(root) leftFrame.pack(side=LEFT) playlistbox = Listbox(leftFrame) playlistbox.pack() rightFrame = Frame(root) rightFrame.pack() topFrame = Frame(rightFrame) topFrame.pack()
from tkinter import ttk from ttkthemes import themed_tk as tk import tkinter.messagebox from tkinter import filedialog from pygame import mixer from mutagen.mp3 import MP3 import time import threading root = tk.ThemedTk() root.get_themes() root.set_theme("radiance") mixer.init() # initializing the mixer root.title("Melody") root.iconbitmap(r'images/melody.ico') statusbar = Label(root, text="Wlcome To Melody", relief=SUNKEN, anchor=W, font="Times 15 italic") statusbar.pack(side=BOTTOM, fill=X) filelabel = ttk.Label(root, text='Welcome') filelabel.pack() lengthlabel = ttk.Label(root, text='Total Length - --:--') lengthlabel.pack(pady=5)
import os from tkinter import * import tkinter.messagebox from tkinter import filedialog from pygame import mixer root = Tk() mixer.init(buffer=1) menubar = Menu(root) root.configure(menu=menubar) SubMenu = Menu(menubar, tearoff=0) def browse_file(): global file file = filedialog.askopenfilename() mixer.music.load(file) status['text'] = f"Loaded file: {os.path.basename(file)}" menubar.add_cascade(label="File", menu=SubMenu) SubMenu.add_command(label="Open", command=browse_file) SubMenu.add_command(label="Exit", command=root.quit) def about_us(): project_credit = "tkinter/pygame learning project\nProject by @buildwithpython on YouTube" tkinter.messagebox.showinfo('About', project_credit)
import cv2 import os from keras.models import load_model import numpy as np from pygame import mixer mixer.init() sound = mixer.Sound('alarm.wav') face = cv2.CascadeClassifier( 'haar cascade files\haarcascade_frontalface_alt.xml') leye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_lefteye_2splits.xml') reye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_righteye_2splits.xml') lbl = ['Close', 'Open'] model = load_model('models/cnn.h5') path = os.getcwd() cap = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_COMPLEX_SMALL count = 0 score = 0 thicc = 2 rpred = [99] lpred = [99] while (True): ret, frame = cap.read() height, width = frame.shape[:2]
def __init__(self, master=None): self.cur_path = os.getcwd() + '\\downloads' self.button_src = os.getcwd() + '\\button' self.filelist = [] self.playlist = [] self.count = 0 self.ispause = False self.isloop_play = False self.israndom_play = False self.nowplaying = str() mixer.init() self.window = master self.window.geometry("960x720") self.window.title("mp3 player") self.window.resizable(False, False) #self.window.configure(background = "#367B34") self.play_png = photoconverter(self.button_src + "\\play.png", 144, 147) self.pause_png = photoconverter(self.button_src + "\\pause.png", 150, 150) self.not_loop_play_png = photoconverter( self.button_src + "\\not_loop.png", 195, 160) self.loop_play_png = photoconverter(self.button_src + "\\is_loop.png", 195, 160) self.not_random_play_png = photoconverter( self.button_src + "\\not_random.png", 194, 185) self.random_play_png = photoconverter( self.button_src + "\\is_random.png", 194, 185) self.previous_song_png = photoconverter( self.button_src + "\\previous_song.png", 210, 200) self.next_song_png = photoconverter( self.button_src + "\\next_song.png", 125, 131) self.nowplaying_png = photoconverter(self.button_src + "\\title.png", 957, 228) self.back_png = photoconverter(self.button_src + "\\back.png", 369, 295) self.slider_png = photoconverter( self.button_src + "\\volume_lever.png", 99, 54) self.player_background = photoconverter( self.button_src + "\\background.png", 1440, 1080) self.background = Canvas(self.window, width=960, height=960) self.background.pack(fill="both", expand=True) self.background.create_image(480, 360, image=self.player_background) files = os.listdir(self.cur_path) self.filelist = ['downloads\\' + x for x in files] self.filelist.remove('downloads\\.gitignore') self.playlist = [x for x in self.filelist] self.label_text = StringVar() self.volume = 30 self.button1 = Button(self.background, image=self.pause_png, command=self.pause, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(240, 612, window=self.button1) self.button2 = Button(self.background, image=self.not_loop_play_png, command=self.loop_play, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(720, 565, window=self.button2) self.button3 = Button(self.background, image=self.not_random_play_png, command=self.random_play, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(535, 595, window=self.button3) self.button4 = Button(self.background, image=self.previous_song_png, command=self.previous_song, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(90, 630, window=self.button4) self.button5 = Button(self.background, image=self.next_song_png, command=self.next_song, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(380, 598, window=self.button5) self.nowplaying_label = Label(self.background, textvariable=self.label_text, bg="#FFFFFF") self.nowplaying_label.config(font=("Arial", 16), width=40) self.background.create_window(330, 100, window=self.nowplaying_label) self.back_button = Button(self.background, image=self.back_png, command=self.Goback, relief=FLAT, bg="#347B36", bd=0, activebackground="#347B36", highlightthickness=0) self.background.create_window(835, 410, window=self.back_button) self.slider_y = 680 self.background.create_image(860, 680, image=self.slider_png, tags="slider") self.background.bind("<B1-Motion>", self.drag) self.background.bind("<ButtonRelease-1>", self.release) mixer.music.load(self.playlist[self.count]) mixer.music.set_volume(self.volume / 100) mixer.music.play(loops=0) self.nowplaying = self.playlist[self.count] self.nowplaying = self.nowplaying.replace(".mp3", "") self.label_text.set(self.nowplaying.replace("downloads\\", "")) t = threading.Thread(target=self.play) t.setDaemon(True) t.start() self.window.protocol("WM_DELETE_WINDOW", self.stop) self.window.mainloop()
# along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Do pretty much everything to do with I/O""" import os, random from math import atan2, pi import Image import pygame from pygame import mixer, sprite from vg import config from vg.utils import id_generator, yaml_load from vg.misc import GameEscape, GameQuit, CycleJump pygame.init() mixer.init(config.AUDIO_FREQ, config.AUDIO_BITS, config.AUDIO_CHANNELS) def stretch(im, size, filter=Image.NEAREST): im.load() im = im._new(im.im.stretch(size, filter)) return im def echo_event(e): print "got event %s" % e for k in dir(e): print "%10s: %s" % (k, getattr(e, k)) class Flash:
from random import uniform, randint from time import time, sleep from threading import Thread from pygame import mixer mixer.init(frequency=22050, size=-16, channels=2, buffer=512) # minimises delays from bluetooth import * splat_sound = mixer.Sound('splat.wav') fly_sound = mixer.Sound('fly.wav') flying = (0.5, 10.0) hiding = (0.5, 4.0) landing = (0.5, 2.0) FLY_FLYING = -1 FLY_HIDING = -2 event_due = -1 fly_volume = -1 fly_location = FLY_FLYING def noisy_fly(): global fly_volume if fly_volume == -1: fly_sound.play(loops=-1)
def __init__(self): mixer.init() mixer.music.set_volume(0.5) self.pstate = 0 self.songs = [] self.song_number = 0
) # создаем объект, который выделяет лицо прямоугольником predictor = dlib.shape_predictor( dat_path + 'shape_predictor_68_face_landmarks.dat' ) # загрузка(шаблона) данных обучения для точек на лице facerec = dlib.face_recognition_model_v1( dat_path + 'dlib_face_recognition_resnet_model_v1.dat' ) # загрузка данных обучения нейросети resnet video_capture = cv2.VideoCapture(0) # подключение камеры video_capture.set(3, 360) # задаем размеры кадра камеры 160x120 video_capture.set(4, 240) # 360x240 img_path = ['p_D.jpg', 'p_N.jpg', 'p_I.jpg', 'p_V.jpg'] #'p_V.jpg' fdi = [] audio = ['u_D.mp3', 'u_N.mp3', 'u_I.mp3', 'u_V.mp3', 'u_U.mp3'] names = ['Даня', 'Никита', 'Белла', 'Вадимчик'] ind_u = 0 mixer.init() for im in img_path: img = cv2.imread(mp3_pic_path + im) detections = detector(img, 1) # ф-ция выделяет лицо в прямоугольник for k, d in enumerate( detections): # цикл по всем найденным на изображении лицам shape = predictor(img, d) #возвращает координаты точек на лице face_descriptor_img = facerec.compute_face_descriptor( img, shape) # получаем 128 дискрипторов лица fdi.append(face_descriptor_img) ind_d, ind_u, ind_p, j = 0, 0, 0, 0 while (1): # цикл обработки nn кадров ret, frame = video_capture.read() # запуск камеры detections = detector(frame, 1) # ф-ция выделяет лицо в прямоугольник face_descriptor_frame = 0 if len(detections) == 0:
import cv2 import os from keras.models import load_model import numpy as np from pygame import mixer import time mixer.init() #Sound to alert the driver sound = mixer.Sound('alarm.wav') #files for extracted features. face = cv2.CascadeClassifier( 'haar cascade files\haarcascade_frontalface_alt.xml') leye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_lefteye_2splits.xml') reye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_righteye_2splits.xml') lbl = ['Close', 'Open'] #Classifier model = load_model('models/cnncat2.h5') path = os.getcwd() cap = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_COMPLEX_SMALL count = 0 score = 0 thicc = 2 rpred = [99] lpred = [99] while (True):
def phy_ex(): mixer.init() mixer.music.load("physical.mp3") mixer.music.play() time.sleep(10)
subtype='PCM_24') w_str = get_window_contents() curses.curs_set(1) curses.endwin() print w_str.rstrip() print sys.exit() # important to make sure the mixer is setup with the # same sample rate as the audio. Otherwise the playback will # sound too slow/fast/awful mixer.init(frequency=jukebox.sample_rate) channel = mixer.Channel(0) # go through the playback list, start playing each beat, display the progress # and wait for the playback to complete. Playback happens on another thread # in the pygame library, so we have to wait for the beat's duration. for v in jukebox.play_vector: beat_to_play = jukebox.beats[v['beat']] snd = mixer.Sound(buffer=beat_to_play['buffer']) channel.queue(snd) how_long_this_took = display_playback_progress(v)
def alert(): mixer.init() alert = mixer.Sound('alarm.wav') alert.play() time.sleep(1)
def drink_water(): mixer.init() mixer.music.load("water.mp3") mixer.music.play() time.sleep(10)
# wav, mp3, ogg, flv, (need ffmpeg supports ->)mp4, wma, aac #sound = AudioSegment.from_file("sound.m4a", "m4a") path = u".\\sound\\sound.mp3" print( path ) # for windows #pip install playsound import playsound playsound.playsound(path, True) """ p = vlc.MediaPlayer(path) p.play() """ """ mixer.init() mixer.music.load(path) mixer.music.play() """ """ #for linux sound = AudioSegment.from_mp3(path) #チャンネル数(1:mono, 2:stereo) channel_count = sound.channels print( 'チャンネル数:' + str(channel_count ) ) #サンプルレート(Hz) frames_per_second = sound.frame_rate print( 'サンプルレート:' + str(frames_per_second ) )
def eyes_ex(): mixer.init() mixer.music.load("eyes.mp3") mixer.music.play() time.sleep(10)
def play_music(file): mixer.init() mixer.music.load(file) mixer.music.play()
# CREATE MENUBAR menubar = Menu(root) root.config(menu=menubar) # CREATE SUBMENU submenu = Menu(menubar, tearoff=0) menubar.add_cascade(label='FILE', menu=submenu) submenu.add_command(label='Open', command=browse_file) submenu.add_command(label='Exit', command=root.destroy) # CREATE SUBMENU submenu = Menu(menubar, tearoff=0) menubar.add_cascade(label='HELP', menu=submenu) submenu.add_command(label='About Us', command=about_us) mixer.init() #initializing mixer # SET SIZE OF APP # root.geometry('300x300') # SET TITLE OF APP root.title('PLAY NOW') # SET ICON FOR APP root.iconbitmap('icon.ico') # for playList # listbox playlist = Listbox(leftframe) playlist.pack(pady=5) #button
def alarmaStop(): mixer.init() sound = mixer.Sound('alarma1.wav') sound.stop()
def song_init(cls): Func_Class.path_dir = 'mp3/' #mp3 root 위치 변수 Func_Class.file_list = os.listdir( Func_Class.path_dir) #폴더내의 파일 list 생성 Func_Class.file_list.sort() #리스트내 이름정렬 mixer.init() #mixer init
' .|||||||||||||||||||||||||||||||| !||||||||||||||||||||||||||||||i. !|||||||||||||||||||||||||||||||||||||||||||||||||| ' ) print( ' . !|||||||||||||||||||||||||||||| !||||||||||||||||||||||||||||||i. .!||||||||||||||||||||||||||||||||||||||||||||| ' ) print( ' !|||||||||||||||||||||||||||| !||||||||||||||||||||||||||||||i. .!|||||||||||||||||||||||||||||||||||||||| ' ) print( ' .!||||||||||||||||||||||| .!||||||||||||||||||||||||||||||i. .!!|||||||||||||||||||||||||||||| ' ) import pygame.mixer as m import time m.init(47000) m.music.load("AC.wav") m.music.play() m.music.play(1) time.sleep(10) # 音楽の再生時間 m.music.stop() # 再生の終了 break elif result_text == 'WA' or result_text == 'CE' or result_text == 'TLE' \ or result_text == 'MLE' or result_text == 'RE' or result_text == 'OLE' \ or result_text == 'IE': print("\r【{result_text}】".format(result_text=result_text), end="") break else: print("\r【{result_text}】".format(result_text=result_text), end="") # ------------------------------judge part end-----------------------------
def alarmaE(): mixer.init() sound = mixer.Sound('alarma1.wav') sound.play()
def enError(): mixer.init() mixer.music.load(audioErrEn) print("An error has occurred.") mixer.music.play() time.sleep(3)
def sound(): mixer.init() mixer.music.load('sound.mp3') mixer.music.play()
for contour in cnts: if cv2.contourArea(contour) < 10000: continue status = 1 (x, y, w, h) = cv2.boundingRect(contour) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) status_list.append(status) status_list = status_list[-2:] if status_list[-1] == 1 and status_list[-2] == 0: times.append( datetime.now()) #thisisthe timestamp function of the python mixer.init() #sound mixer.music.load("a.mp3") #sound mixer.music.play() #sound #######################print photo################### # save_path='C:\Users\DELL\Downloads\Compressed\webcam_motion_detection-master\res' filname = str(count) + "motion.jpg" count += 1 cv2.imwrite(filename=filname, img=frame) img_new = cv2.imread(filname, cv2.IMREAD_GRAYSCALE) img_new = cv2.imshow("Captured Image", img_new) ######################print photo############################## if status_list[-1] == 0 and status_list[-2] == 1: times.append(datetime.now())
def __init__(self, world, reset_callback=None, reward_callback=None, observation_callback=None, info_callback=None, done_callback=None, shared_viewer=True): self.ob_rms = None self.world = world self.agents = self.world.policy_agents # set required vectorized gym env property self.n = len(world.policy_agents) # scenario callbacks self.reset_callback = reset_callback self.reward_callback = reward_callback self.observation_callback = observation_callback self.info_callback = info_callback self.done_callback = done_callback # environment parameters self.discrete_action_space = True # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector self.discrete_action_input = True #False # if true, even the action is continuous, action will be performed discretely self.force_discrete_action = world.discrete_action if hasattr( world, 'discrete_action') else False # if true, every agent has the same reward self.shared_reward = world.collaborative if hasattr( world, 'collaborative') else False # configure spaces self.action_space = [] self.observation_space = [] obs_shapes = [] self.agent_num = len(self.agents) for agent in self.agents: total_action_space = [] # physical action space if self.discrete_action_space: u_action_space = spaces.Discrete((world.dim_p) * 2 + 2) ## else: u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p, ), dtype=np.float32) if agent.movable: total_action_space.append(u_action_space) # communication action space if self.discrete_action_space: c_action_space = spaces.Discrete(world.dim_c) else: c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c, ), dtype=np.float32) if not agent.silent: total_action_space.append(c_action_space) # total action space if len(total_action_space) > 1: # all action spaces are discrete, so simplify to MultiDiscrete action space if all([ isinstance(act_space, spaces.Discrete) for act_space in total_action_space ]): act_space = MultiDiscrete( [[0, act_space.n - 1] for act_space in total_action_space]) else: act_space = spaces.Tuple(total_action_space) self.action_space.append(act_space) else: self.action_space.append(total_action_space[0]) # observation space obs_dim = len(observation_callback(agent, self.world)) obs_shapes.append((obs_dim, )) self.observation_space.append( spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim, ), dtype=np.float32)) agent.action.c = np.zeros(self.world.dim_c) # simpified for non-comm game # self.action_spaces = MASpace(tuple(Box(low=-1., high=1., shape=(1,)) for _ in range(self.agent_num))) # self.observation_spaces = MASpace(tuple(Discrete(1) for _ in range(self.agent_num))) # action has 8 values: # nothing, +forcex, -forcex, +forcey, -forcey, +rot, -rot, shoot self.action_spaces = MASpace( tuple( Box(low=0., high=1., shape=((world.dim_p) * 2 + 2, )) for _ in range(self.agent_num))) ## self.observation_spaces = MASpace( tuple( Box(low=-np.inf, high=+np.inf, shape=obs_shape) for obs_shape in obs_shapes)) self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces) self.action_range = [0., 1.] # rendering self.shared_viewer = shared_viewer if self.shared_viewer: self.viewers = [None] else: self.viewers = [None] * self.n mixer.init() soundFiles = gym_fortattack.__file__[:-11] + 'envs/Game/' # bulletFile = os.path.realpath(__file__)[:-13]+'Game/bullet.mp3' mixer.music.load(soundFiles + 'bullet.mp3') # print(gym_fortattack.__file__) # time.sleep(5) self.prevShot, self.shot = False, False # used for rendering self._reset_render()
def get_sound(self, level): mixer.init() return mixer.Sound('music/' + SOUND_FILES[self.color_id][level])
def __init__(self): mixer.init()
def alert1(): mixer.init() alert=mixer.Sound('beep-07.wav') alert.play()
def initializeParameters(self): with open('./.pathToAP', 'r') as textFile: self.pathToAP = textFile.readline() sys.path.append(self.pathToAP) from reader import reader reader = reader() reader.readParameters() parameters = reader.getParameters() for item in parameters: try: setattr(self, item[:item.find('=')], int(item[item.find('=') + 1:])) except ValueError: setattr(self, item[:item.find('=')], item[item.find('=') + 1:]) # with open( self.pathToAP + 'ewritingParameters', 'r' ) as parametersFile: # for line in parametersFile: # if line[ :line.find('=')-1 ] == 'textSize': # self.textSize = int( line[ line.rfind('=')+2:-1 ]) # elif line[ :line.find('=')-1 ] == 'maxPoints': # self.maxPoints = int(line[ line.rfind('=')+2:-1 ]) # elif line[ :line.find('=')-1 ] == 'checkTime': # self.checkTime = int(line[ line.rfind('=')+2:-1 ]) # elif line[ :line.find('=')-1 ] == 'colorGrat': # self.colorGrat = line[ line.rfind('=')+2:-1 ] # elif line[ :line.find('=')-1 ] == 'colorNiest': # self.colorNiest = line[ line.rfind('=')+2:-1 ] # elif line[ :line.find('=')-1 ] == 'ileLuk': # self.ileLuk = int(line[ line.rfind('=')+2:-1 ]) # elif line[ :line.find('=')-1 ] == 'sex': # self.sex = line[ line.rfind('=')+2:-1 ] # elif not line.isspace( ): # print 'Niewłaściwie opisane parametry' # print 'Błąd w linii', line # print 'Wczytano parametry domyślne' self.textSize = 80 self.checkTime = 8000 self.colorGrat = 'lime green' self.colorNiest = 'indian red' self.ileLuk = 1 self.maxPoints = 2 self.sex = 'D' self.ownWord = '' self.flaga = 0 self.PicNr = 0 self.result = 0 self.WordsList = os.listdir(self.pathToAP + 'multimedia/ewriting/pictures') shuffle(self.WordsList) self.poczatek = True self.czyBack = False self.numberOfPresses = 1 self.mouseCursor = PyMouse() self.mousePosition = self.winWidth - 8, self.winHeight - 8 self.mouseCursor.move(*self.mousePosition) mixer.init()