def main(): """ main body, responsible for running the show. :return: """ start_chime = sa.play_buffer(Note_B3, 1, 2, 44100) time.sleep(.2) start_chime.stop() pygame.key.set_repeat() playing = {} while True: for event in pygame.event.get(): if event.type == pygame.KEYDOWN: current_key = event.unicode if current_key == '': break try: note = key_lookup.get(current_key) print(current_key) note = sa.play_buffer(note, 1, 2, 44100) playing[event.key] = note except TypeError: break elif event.type == pygame.KEYUP: while event.key in playing: playing.get(event.key).stop() del playing[event.key] else: break
def play_white_noise(): print("play_white_noise") state.is_sleeping = True while state.is_sleeping: try: if state.white_noise_play is not None: print("white noise is already playing") time.sleep(1) else: try: print("~~ white noise loop ~~") state.white_noise_play = simpleaudio.play_buffer( state.white_noise.raw_data, num_channels=state.white_noise.channels, bytes_per_sample=state.white_noise.sample_width, sample_rate=state.white_noise.frame_rate) state.white_noise_play.wait_done() state.white_noise_play = None except Exception as e: logging.error(f"Error on audio playback: {e}") except: try: print("starting white noise") state.white_noise_play = simpleaudio.play_buffer( state.white_noise.raw_data, num_channels=state.white_noise.channels, bytes_per_sample=state.white_noise.sample_width, sample_rate=state.white_noise.frame_rate) state.white_noise_play.wait_done() state.white_noise_play = None except Exception as e: logging.error(f"Error on audio playback: {e}")
def run(self): #print("开始线程:" + self.name) sa.play_buffer(self.content, 1, 2, 16000) #print('sleep_time',len(self.content) / 32000) time.sleep(len(self.content)/32000) #print("退出线程:" + self.name) self.event.set()
def associate_morph_tone(exp, trial, houses_tones, faces_tones, time_waiting): """Function which plays, for a trial of the experiment, the adequate tone before presenting morph. For one trial, the function detects if the morph is a house or a face, associate a random pur or 80% tone taken in houses_tones or faces_tones lists and plays it. It returns morph type as image_noise and the tone played as tone_name.""" sample_rate = 44100 # 44100 samples per second expyriment.stimuli.BlankScreen(colour=BLACK).present() # clear screen exp.clock.wait(time_waiting) image_noise = trial.get_factor('name')[-10:] #get noise of the morph if image_noise == 'noise1.jpg' or image_noise == 'noise2.jpg': #the morph is a face, so we play randomly a low or 80% low tone tone, tone_name = random.choice(faces_tones) #unzip to save the name picked play_tone = sa.play_buffer(tone, 1, 2, sample_rate) play_tone.wait_done() exp.clock.wait(time_waiting) elif image_noise == 'noise4.jpg' or image_noise == 'noise5.jpg': #the morph is a house, so we play randomly a high or 80% high tone tone, tone_name = random.choice(houses_tones) #unzip to save the name picked play_tone = sa.play_buffer(tone, 1, 2, sample_rate) play_tone.wait_done() exp.clock.wait(time_waiting) return image_noise, tone_name
def plotHz(): #Bandbredd bb = 0.05 #Polradien R följer av bandbredden R = 1-bb/2 #Resonansfrekvensen, normaliserad (0.5 motsvarar nyquistfrekvensen) f = 0.1 #Polvinkeln theta följer av resonansfrekvensen theta = 2*np.pi*f #Filtrets nämnare A = [1, -2*R*np.cos(theta), R**2] #Filtrets täljare B = [(1-R**2)*np.sin(theta)] Xi = np.append([1], np.zeros(100-1 )) Y = scipy.signal.lfilter(B,A,X) Yi = scipy.signal.lfilter(B, A, Xi) w, h = signal.freqz(B,A) Y = np.int16(Y) sa.play_buffer(Y, 1, 2, fs) fig = plt.figure() a = fig.add_subplot(2, 1, 1) plt.plot(w/np.pi, abs(h)) a.set_title('') a = fig.add_subplot(2, 1, 2) plt.stem(Yi) a.set_title('') plt.show()
def play(self, halfPitch=False, volume=1.0): rate = self.samplerate // 2 if halfPitch else self.samplerate if volume != 1.0: data = (self.data * volume).astype(numpy.int16) else: data = self.data sa.play_buffer(data, 1, 2, rate)
def Testy(num): john = random.randint(0,2**32-1) print(bin(john)) for i in range(num): try: sa.play_buffer(main.make_bar(i,aScale,3,main.decode_config(john)),1,2,44100) except: continue
def play(self): """ Plays output waveform. """ y = self._get_int16at16K() sa.play_buffer(y, num_channels=1, bytes_per_sample=2, sample_rate=16_000)
def play_sound(data, fs): """ Uses Sounddevice to produce sound. ===================================== :param data: numpy array :param fs: Sample Rate """ sa.stop_all() sa.play_buffer(data, len(data.shape), 2, fs) time.sleep(1)
def play_beat_at(dm, beat, at, t): while True: if time.time() - t > at: try: snd = filters.run(beat) drift = time.time() - t - at sa.play_buffer(snd, 1, 2, 44100) dm.set_drift(drift) except Exception as e: print(e) return
def play_notes(A_low, A_80_low, A_high, A_80_high, sample_rate=44100): """ Function which plays the 4 tones""" play_A_low = sa.play_buffer(A_low, 1, 2, sample_rate) play_A_low.wait_done() play_A_80_low = sa.play_buffer(A_80_low, 1, 2, sample_rate) play_A_80_low.wait_done() # play_A_medium = sa.play_buffer(A_medium, 1, 2, sample_rate) # play_A_medium.wait_done() play_A_80_high = sa.play_buffer(A_80_high, 1, 2, sample_rate) play_A_80_high.wait_done() play_A_high = sa.play_buffer(A_high, 1, 2, sample_rate) play_A_high.wait_done()
def _play_sound(self, x_pos, y_pos): # calculate frequency of "piano key" based on y_pos pkey = np.floor(88 * (self.canvas.winfo_height() - (y_pos + 1)) / self.canvas.winfo_height() + 1) frequency = 440 * 2**((pkey - 49)/12) # 440Hz is key #49 on a piano # calculate volume based on x_pos volume = x_pos / self.canvas.winfo_width() # generate and play audio samples audio = np.round(np.sin(frequency * TIME_VECT * 2 * np.pi)) audio *= volume * 32767 audio = audio.astype(np.int16) sa.stop_all() sa.play_buffer(audio, 1, 2, SAMPLE_RATE)
def perfect_fifth(): global cached_fifth if cached_fifth is None: # Create interval audio = np.zeros((44100, 2)) n = len(t) audio[0:n, 0] += A_note audio[0:n, 1] += A_note audio[n:2 * n, 0] += E_note audio[n:2 * n, 1] += E_note # Convert to 16-bit data audio *= 32767 / np.max(np.abs(audio)) cached_fifth = audio.astype(np.int16) # Play sa.play_buffer(cached_fifth, 2, 2, sample_rate)
def play(self, data): """ plays the result in app player """ self.logger.debug("Playing .. ") if self.channels[data] is not None: sa.stop_all() sa.play_buffer(self.channels[data], 1, 2, self.rate) sleep(1) else: self.logger.debug("Fail to play") self.show_message("Warning", "Load A file First", QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Warning) pass
def toggle_playback(self): if self.playing: sa.stop_all() else: loop = self.render_loop(self, self.current_sound) self.playing = True play_obj = sa.play_buffer(loop, self.nchannels, self.sampwidth, self.framerate) while self.playing: if not play_obj.is_playing(): play_obj = sa.play_buffer(loop, self.nchannels, self.sampwidth, self.framerate) else: continue
def playNote(x, y): #winsound.Beep(((x+50)*2), y) # calculate note frequencies A_freq = (x * 2) + 50 Csh_freq = A_freq * 2**(4 / 12) E_freq = A_freq * 2**(7 / 12) # get timesteps for each sample, T is note duration in seconds sample_rate = 44100 // 2 T = (y / 390) + 0.0001 t = np.linspace(0, T, int(T * sample_rate), False) # generate sine wave notes A_note = np.sin(A_freq * t * 2 * np.pi) Csh_note = np.sin(Csh_freq * t * 2 * np.pi) E_note = np.sin(E_freq * t * 2 * np.pi) # concatenate notes audio = np.hstack((A_note, Csh_note, E_note)) # normalize to 16-bit range audio *= 32767 / np.max(np.abs(audio)) # convert to 16-bit data audio = audio.astype(np.int16) // 100 # start playback play_obj = sa.play_buffer(audio, 1, 2, sample_rate) # wait for playback to finish before exiting play_obj.wait_done()
def drag_slider(self): self.audioPointer = self.audioSlider.value() if self.audioPlayer: if self.audioPlayer.is_playing(): self.audioPlayer.stop() self.audioPlayer = sa.play_buffer( self.mono[self.audioPointer:], 2, 2, self.sample_rate)
def sound(freq, duration): """La fonction prend comme argument la frequence de son et sa durée et le joue""" # gettimesteps for each sample , "duration" is note duration in sec on ds sample_rate = 44100 t = np.linspace(0, duration, int(duration * sample_rate), False) # generates in ewave tone tone = np.sin(freq * t * 2 * np.pi) # normalize to 24−bit range if freq != 0: tone *= 8388607 / np.max(np.abs(tone)) # convert to 32−bit data tone = tone.astype(np.int32) # convert from 32−bit to 24−bit by building a new byte buffer, # skipping every fourth bit # note: this also works for 2−channel audio i = 0 byte_array = [] for b in tone.tobytes(): if i % 4 != 3: byte_array.append(b) i += 1 audio = bytearray(byte_array) # start playback play_obj = sa.play_buffer(audio, 1, 3, sample_rate) # wait for playback to finish before exiting play_obj.wait_done()
def play(self, config): # Start playback play_obj = sa.play_buffer(self.audio, 1, 2, self.fs) # Wait for playback to finish before exiting if mode = wait if config == "wait": play_obj.wait_done()
def playSound(): # calculate note frequencies A_freq = 440 Csh_freq = A_freq * 2**(4 / 12) E_freq = A_freq * 2**(7 / 12) # get timesteps for each sample, T is note duration in seconds sample_rate = 44100 T = 0.25 t = np.linspace(0, T, T * sample_rate, False) # generate sine wave notes A_note = np.sin(A_freq * t * 2 * np.pi) Csh_note = np.sin(Csh_freq * t * 2 * np.pi) E_note = np.sin(E_freq * t * 2 * np.pi) # concatenate notes audio = np.hstack((A_note, Csh_note, E_note)) # normalize to 16-bit range audio *= 32767 / np.max(np.abs(audio)) # convert to 16-bit data audio = audio.astype(np.int16) # start playback play_obj = sa.play_buffer(audio, 1, 2, sample_rate) # wait for playback to finish before exiting play_obj.wait_done()
def play(self): """Useful for debugging""" play = simpleaudio.play_buffer(self.data, num_channels=self.channels, bytes_per_sample=self.width, sample_rate=self.rate) try: play.wait_done() except KeyboardInterrupt: play.stop()
def playSound(arr, fs=44100, volume=100, runWhenEnd=None): arr *= 32767 / max(abs(arr)) * (volume / 100) arr = arr.astype(np.int16) play_obj = sa.play_buffer(arr, 1, 2, fs) play_obj.wait_done() if runWhenEnd: runWhenEnd()
def playInvertSound(waveFile): soundTmp = AudioSegment.from_mp3(waveFile) sound = soundTmp.invert_phase() playback = simpleaudio.play_buffer(sound.raw_data, num_channels=sound.channels, bytes_per_sample=sound.sample_width, sample_rate=sound.frame_rate)
def speak(self, sequence): #try: data = bytes() get_params = True for word in sequence: fn = word.strip() if not fn: continue res_name = os.path.join(self._res_path, fn + '.wav') try: with wave.open(res_name, 'rb') as wr: if get_params: get_params = False nc = wr.getnchannels() bps = wr.getsampwidth() fr = wr.getframerate() data = data + wr.readframes(wr.getnframes()) except FileNotFoundError as e: raise Exception("Can't open resource file.") from e # log.error("file not found '{}'".format(res_name)) # return False except wave.Error as e: raise Exception("Resource file is not valid wave file.") from e # log.error("Resource file '{}' is not valid wave file.".format(res_name)) # return False try: self._play_obj = SA.play_buffer(data, nc, bps, fr) except Exception as e: raise Exception("can't play sound") from e
def stereo_beep(left, right): if left == 0 and right == 0: print("divison 0") else: wave_read = wave.open( "/home/socialab/JetsonNano-CompVision/beep1_stereo.wav", 'rb') audio_data = wave_read.readframes(wave_read.getnframes()) data = np.fromstring(audio_data, dtype=np.uint16) data[0::2] = data[ 0::2] / right # atenção com a divisão por 0, depois resolver. data[1::2] = data[1::2] / left num_channels = wave_read.getnchannels() bytes_per_sample = wave_read.getsampwidth() sample_rate = wave_read.getframerate() listt = [left, right] med = min(listt) play_obj = sa.play_buffer(data.tostring(), 2, bytes_per_sample, sample_rate) play_obj.wait_done() if med > 100: time.sleep(0.1) elif med > 300: time.sleep(0.15)
def play(self): """Play a music. :raises KeyboardInterrupt: if user stops the music with his keyboard (by pressing Ctrl-C for example) """ try: # Open music file wave_file = wave.open("%s/%s.wav" % (settings.MUSICS_DIR, self.music.name), 'rb') # Create variables audio_data = wave_file.readframes(wave_file.getnframes()) n_channels = wave_file.getnchannels() bytes_per_sample = wave_file.getsampwidth() sample_rate = wave_file.getframerate() music = simpleaudio.play_buffer(audio_data, n_channels, bytes_per_sample, sample_rate) # Play music music.wait_done() except (KeyboardInterrupt): print("Musique arrêtée par l'utilisateur.") # Stop music when pressing "Ctrl-C" music.stop() except: print("Impossible de lire la musique.")
def play_loaded_audio(self, event): import numpy as np player_button = str(event.widget).split('.')[-1] if player_button == 'play_unfiltered': selected_audio_file = self.audio_file button_label = self.play_button_text elif player_button == 'play_filtered': selected_audio_file = self.filtered_audio_file button_label = self.filtered_play_button_text else: selected_audio_file = self.human_hearing button_label = self.hh_play_button_text data = (selected_audio_file * 32767 / max(abs(selected_audio_file))) data = data.astype(np.int16) if event.widget['text'] == 'Play': self.play_button_text.set('Play') self.filtered_play_button_text.set('Play') self.hh_play_button_text.set('Play') button_label.set('Stop') sa.stop_all() play_obj = sa.play_buffer(data, 1, self.bytes_per_sample, self.rate) else: button_label.set('Play') sa.stop_all() return
def generateNotes(): """ Provides 7 .wav files with the note from C to B """ fs = 44100 # hertz seconds = 3 # Note duration of 3 seconds noteNames = ["C4", "D4", "E4", "F4", "G4", "A4", "B4"] for noteName in noteNames: myNote = music21.note.Note(noteName) noteFrequency = myNote.pitch.frequency # Generate array with seconds*sample_rate steps, ranging between 0 and seconds t = np.linspace(0, seconds, seconds * fs, False) # Generate a 440 Hz sine wave sound = np.sin(noteFrequency * t * 2 * np.pi) # Ensure that highest value is in 16-bit range audio = sound * (2**15 - 1) / np.max(np.abs(sound)) # Convert to 16-bit data audio = audio.astype(np.int16) # Start playback play_obj = sa.play_buffer(audio, 1, 2, fs) # Wait for playback to finish before exiting play_obj.wait_done() #Write sound to file sf.write('assets/patterns/' + noteName + '.wav', audio, fs)
def _play_with_simpleaudio(seg): import simpleaudio print("in simpleaudio") return simpleaudio.play_buffer(seg.raw_data, num_channels=seg.channels, bytes_per_sample=seg.sample_width, sample_rate=seg.frame_rate)
def play_note(note, octave, duration=1, fs=44100): if note == "Cb": octave -= 1 try: frequency = floor( next(item for item in notes if note in item["note"].split('/') and item["octave"] == octave)['frequency']) except StopIteration: raise Exception(f'Note "{note}{octave}" was not found') ################################################################### #### simple_audio tutorial, didn't understand a thing #### #### but tweaked it a little to make it better #### ################################################################### # Generate array with duration*sample_rate steps, ranging between 0 and duration t = np.linspace(0, ceil(duration), ceil(duration * fs), False) # Generate a 440 Hz sine wave note = np.sin(frequency * t * 2 * np.pi) # Ensure that highest value is in 16-bit range audio = note * (2**15 - 1) / np.max(np.abs(note)) # Convert to 16-bit data audio = audio.astype(np.int16) # Start playback play_obj = sa.play_buffer(audio, 1, 2, fs) # Wait for playback to finish before exiting play_obj.wait_done()
def play(self): """Play a music. :raises KeyboardInterrupt: if user stops the music with his keyboard (by pressing Ctrl-C for example) """ try: # Open music file wave_file = wave.open( "%s/%s.wav" % (settings.MUSICS_DIR, self.music.name), 'rb') # Create variables audio_data = wave_file.readframes(wave_file.getnframes()) n_channels = wave_file.getnchannels() bytes_per_sample = wave_file.getsampwidth() sample_rate = wave_file.getframerate() music = simpleaudio.play_buffer(audio_data, n_channels, bytes_per_sample, sample_rate) # Play music music.wait_done() except (KeyboardInterrupt): print("Musique arrêtée par l'utilisateur.") # Stop music when pressing "Ctrl-C" music.stop() except: print("Impossible de lire la musique.")
def _play_with_simpleaudio(seg): import simpleaudio return simpleaudio.play_buffer( seg.raw_data, num_channels=seg.channels, bytes_per_sample=seg.sample_width, sample_rate=seg.frame_rate )
def speak(text): global apiKey params = "" headers = {"Ocp-Apim-Subscription-Key": apiKey} #AccessTokenUri = "https://westus.api.cognitive.microsoft.com/sts/v1.0/issueToken"; AccessTokenHost = "api.cognitive.microsoft.com" path = "/sts/v1.0/issueToken" # Connect to server to get the Access Token print ("Connect to server to get the Access Token") conn = http.client.HTTPSConnection(AccessTokenHost) conn.request("POST", path, params, headers) response = conn.getresponse() print(response.status, response.reason) data = response.read() conn.close() accesstoken = data.decode("UTF-8") #print ("Access Token: " + accesstoken) body = ElementTree.Element('speak', version='1.0') body.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-us') voice = ElementTree.SubElement(body, 'voice') voice.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-US') voice.set('{http://www.w3.org/XML/1998/namespace}gender', 'Male') voice.set('name', 'Microsoft Server Speech Text to Speech Voice (en-US, Guy24KRUS)') voice.text = 'This is a demo to call microsoft text to speech service in Python.' default_output = 'riff-16khz-16bit-mono-pcm' headers = {"Content-type": "application/ssml+xml", "X-Microsoft-OutputFormat": default_output, "Authorization": "Bearer " + accesstoken, "X-Search-AppId": "07D3234E49CE426DAA29772419F436CA", "X-Search-ClientID": "1ECFAE91408841A480F00935DC390960", "User-Agent": "TTSForPython"} voice.text = text #Connect to server to synthesize the wave print ("\nConnect to server to synthesize the wave") conn = http.client.HTTPSConnection("westus.tts.speech.microsoft.com") conn.request("POST", "/cognitiveservices/v1", ElementTree.tostring(body), headers) response = conn.getresponse() print(response.status, response.reason) data = response.read() conn.close() print("The synthesized wave length: %d" %(len(data))) print(type(data)) play_obj = sa.play_buffer(data, 1, 2, 16000) while play_obj.is_playing(): pass
def play(self): # normalize to 16-bit range if not 16-bit already audio = self.chain.render(self._audio) if(audio.dtype != np.int16): audio *= 32767 / np.max(np.abs(audio)) # convert to 16-bit data audio = audio.astype(np.int16) # start playback # self._play_obj = self.getPlayObject(audio) self._play_obj = sa.play_buffer(audio, 1, 2, self._sample_rate)
def run(width=300,height=300): def redrawAllWrapper(canvas,data): canvas.delete(ALL) redrawAll(canvas,data) canvas.update() def mousePressedWrapper(event,canvas,data): mousePressed(event,data) def mouseClickedWrapper(event,canvas,data): mouseClicked(event,data) def keyPressedWrapper(event,canvas,data): keyPressed(event,data) redrawAllWrapper(canvas,data) def timeFiredWrapper(canvas,data): timeFired(data) redrawAllWrapper(canvas,data) canvas.after(data["timeDelay"],timeFiredWrapper,canvas,data) #set up data and init data=dict() data['width']=width data['height']=height data['timeDelay']=30 data["totalWidth"]=5000 data["totalHeight"]=4000 data["name"]="user" data["stage"]="menu" data["snake"]=body(data["totalWidth"]/2,data["totalHeight"]/2,20,"orange red",data["name"]) paly_obj = sa.play_buffer(audio_data, num_channels, bytes_per_sample, sample_rate) root=Tk() canvas=Canvas(root,width=data['width'],height=data['height']) canvas.pack() #set up events root.bind("<Motion>", lambda event: mousePressedWrapper(event, canvas, data)) root.bind("<Button-1>", lambda event: mouseClickedWrapper(event,canvas,data)) root.bind('<Key>',lambda event: keyPressedWrapper(event,canvas,data)) timeFiredWrapper(canvas,data) #launch the app root.mainloop() print("bye!")
def getPlayObject(self, audio): return sa.play_buffer(audio, 1, 2, self._sample_rate)
defaultsnakelength=15 maxwidth=5000 maxheight=4000 ##quote from## ##http://simpleaudio.readthedocs.io/en/latest/simpleaudio.html#examples## bgmpath="bgm.wav" eatpath="eat.wav" wave_read=wave.open(bgmpath,'rb') audio_data = wave_read.readframes(wave_read.getnframes()) num_channels = wave_read.getnchannels() bytes_per_sample = wave_read.getsampwidth() sample_rate = wave_read.getframerate() wave_obj = sa.WaveObject.from_wave_file(eatpath) global play_obj play_obj = sa.play_buffer(audio_data, num_channels, bytes_per_sample, sample_rate) ##end quote## class circle(object): def __init__(self,x,y,r,color): self.x=x self.y=y self.r=r self.color=color self.speed=defaultspeed self.acceleratespeed=acceleratespeed self.angle=0 def follow(self,x,y,accelerate):#accelerate is a bool flag prevangle=self.angle if(x==self.x and y == self.y): self.angle=prevangle
def timeFired(data): global play_obj if(not play_obj.is_playing()): play_obj = sa.play_buffer(audio_data, num_channels, bytes_per_sample, sample_rate) if(data["stage"]=="game"): (x,y)=data["mousePos"] (x,y)=(x+data["snake"].bodypart[0].x-data["width"]/2,y+data["snake"].bodypart[0].y-data["height"]/2) if(data["snake"].follow(x,y,data["accelerate"])=="die"): data["stage"]="gameover" for i in range(data["snake"].num): (xi,yi)=(data["snake"].bodypart[i].x,data["snake"].bodypart[i].y) randx=random.randint(-10,10) randy=random.randint(-10,10) data["food"].append(food(xi+randx,yi+randy,10,data["snake"].bodypart[i].color)) data["mouseClickedTimer"]+=1 if(data["mouseClickedTimer"]==10): data["accelerate"]=False data["mouseClickedTimer"]=0 if(data["stage"]!="menu"): #AI snakes find path for l in data["aisnake"]: dis=random.randint(0,10) if(dis>5): l.follow(data["snake"].bodypart[0].x,data["snake"].bodypart[0].y,False) else: if(l.automove(data["food"])=="die"): data["aisnake"].remove(l) #if collide with AI snakes for l in data["aisnake"]: if(data["stage"]=="game"): if(data["snake"].check_collision(l)=="die"): for i in range(data["snake"].num): (xi,yi)=(data["snake"].bodypart[i].x,data["snake"].bodypart[i].y) randx=random.randint(-10,10) randy=random.randint(-10,10) data["food"].append(food(xi+randx,yi+randy,10,data["snake"].bodypart[i].color)) data["stage"]="gameover" #if ate something for foods in data["food"]: if(data["snake"].check_collision(foods)=="eat"): data["food"].remove(foods) data["foodnum"]-=1 #if ai collide and die for l in data["aisnake"]: if(data["stage"]=="game"): if(l.check_collision(data["snake"])=="die"): for i in range(l.num): (xi,yi)=(l.bodypart[i].x,l.bodypart[i].y) randx=random.randint(-10,10) randy=random.randint(-10,10) data["food"].append(food(xi+randx,yi+randy,3,l.bodypart[i].color)) data["aisnake"].remove(l) #print("remove",l) for l in data["aisnake"]: for m in data["aisnake"]: if(l==m): continue elif(l.check_collision(m)=="die"): for i in range(l.num): (xi,yi)=(l.bodypart[i].x,l.bodypart[i].y) randx=random.randint(-10,10) randy=random.randint(-10,10) data["food"].append(food(xi+randx,yi+randy,10,l.bodypart[i].color)) data["aisnake"].remove(l) #print("remove",l) break #if ai ate something: for l in data["aisnake"]: for foods in data["food"]: if(l.check_collision(foods)=="eat"): data["food"].remove(foods) data["foodnum"]-=1 if(data["foodnum"]<480): randnum=random.randint(10,30) for i in range(randnum): randx=random.randint(1,data["width"]) randy=random.randint(1,data["height"]) randcolor=random.choice(data["food_color"]) randr=random.randint(3,6) data["food"].append(food(randx,randy,randr,randcolor)) data["foodnum"]+=randnum #calculate leader board d=dict() d[data["snake"].name]=int(data["snake"].energy) for i in data["aisnake"]: d[i.name]=i.energy d=sorted(d.items(),key=lambda d:d[1],reverse=True) data["leaderboard"]=d