def fire_laser(self): if not self.substract_energy(LASER_ENERGY): return laser = Laser(self.get_point_to(offset=LASER_OFFSET), self.orientation, self.remove_element) self.add_new_element(laser) sound.sound(SOUND_LASER).play()
def fire_missile(self): if not self.substract_energy(MISSILE_ENERGY): return missile = Missile(self.get_point_to(), self.orientation, self.acceleration) self.add_new_element(missile) sound.sound(SOUND_MISSILE).play()
def learning(unknowV): def all(): for ww in unknowV: print ww['word'], print '' print '' print len(unknowV) all() while True: i = raw_input('>') if i == '': continue if i == ',quit'or i == ',q': break if i == ',l': all() continue if i[0] == ',': for w in unknowV: if w['word'] == i[1:]: sound.sound(i[1:]) print '%s\t /%s/ %s' % (w['word'], w['phonetic'][1:-1], w['translation']) else: continue print ''
def repeat(word): while True: sound.sound(word) c = getch.getch() if '3' == c: continue break return c
def move(self): super(Snacker, self).move() for i in Piece.pieces: #eat any snack you come into contact with if i.touch(self): if type(i) == Snack: self.bite() self.score += i.points if i.points >= 0: sound('chomp.wav') else: sound('chomp2.wav')
def getTomorrowWeather(): """ 夜10時の時報。 ・夜中の気温 ・明日の気圧 ・明日の天気 ・明日の最高気温と最低気温 これらを知らせる。 """ chrome_options = Options() chrome_options.add_argument("--headless") pattern_lv1 = re.compile(".*map_kiatu_icon_lv1.png.*") pattern_lv2 = re.compile(".*map_kiatu_icon_lv2.png.*") pattern_lv3 = re.compile(".*map_kiatu_icon_lv3.png.*") pattern_lv4 = re.compile(".*map_kiatu_icon_lv4.png.*") pattern_sunny = re.compile(".*tenki_100.png.*") pattern_cloudy = re.compile(".*tenki_200.png.*") pattern_rainy = re.compile(".*tenki_300.png.*") careful_lv1 = 0 careful_lv2 = 0 careful_lv3 = 0 careful_lv4 = 0 sunny, cloudy, rainy = 0, 0, 0 url_test = "https://zutool.jp/" driver = webdriver.Chrome(executable_path="chromedriver.exe", options=chrome_options) driver.get(url_test) time.sleep(5) html = driver.page_source.encode('utf-8') page_data = BeautifulSoup(html, "html.parser") tomorrow_temp = page_data.find(class_="weatherpoint-data_item_tomorrow") hour_data = tomorrow_temp.find_all(class_="weatherpoint-data_item") # TODO: 気圧の注意警報のラインを再考する。 for data in hour_data[5:18]: container = str(data.find(class_="weatherpoint-data_item_level")) if pattern_lv3.match(container): careful_lv3 += 1 elif pattern_lv4.match(container): careful_lv4 += 1 if careful_lv4 >= 4: sound(filename="soundFile/strongWind.mp3", playback_time=10) elif careful_lv3 >= 4: sound(filename="soundFile/wind.mp3", playback_time=10) # 天気に関する記述 for data in hour_data[9:22]: container = data.find(class_="weatherpoint-data_item_weather") if pattern_rainy.match(str(container)): sound(filename="soundFile/rain.mp3", playback_time=10) break sound()
def cluster_names(self): ''' This function forms clusters according to the requirements. It converts the names to their soundex versions and creates a matrix of levenshtein distances, where each entry is the distance between two soundex names. This matrix is used as the input for an agglomerative clustering model, which assigns each name to a cluster. It populates self.clusters as a list of cluster assignments according to the output of the model. :return: does not return anything ''' from sound import sound from hw1.linguistic_distance import linguistic_distance from sklearn.cluster import AgglomerativeClustering sound = sound() linguistic_distance = linguistic_distance() # Call get_soundex() from the sound class to get the soundex conversion for each name soundex = [] for name in self.names: soundex.append(sound.get_soundex(name)) # Call levenshtein() from the linguistic_distance class to get the matrix of distances distance = [] for i in range(len(soundex)): row = [] for j in range(len(soundex)): row.append( linguistic_distance.levenshtein(soundex[i], soundex[j])) distance.append(row) # Cluster the names using agglomerative clustering and populate self.clusters model = AgglomerativeClustering(n_clusters=self.clusters_to_form) model.fit(distance) self.clusters = model.labels_
def button_sound_clicked(self): """ """ _debug('GUICounts: button_sound_clicked') if self.is_playing_sounds == False: # Note that we are playing sounds self.button_sound.set_text('Stop sounds') self.button_sound.set_style('background-color:rgb(255, 155, 0);') self.is_playing_sounds = True # Create the sound object self.sound = sound.sound() # Deternmine the bounds, which will be useful for mapping the counts to a frequency self.cmax = 1e6 # Maximum count to consider self.cmin = 0 # Minimum couts to consider self.crange = self.cmax - self.cmin # Range of count to consider self.fmax = 2000 # Maximum frequency for the maximum counts self.fmin = 150 # Minimum frequenmcy for the minimimum counts self.frange = self.fmax - self.fmin # Range of frequency to play # The attribute "self.is_playing_sounds" will determined in other # loop if the sound is emite or not. else: # Stop the sound if it was on self.is_playing_sounds = False # Update the GUI self.button_sound.set_text('Sound with counts :3') self.button_sound.set_style( 'background-color: rgb(255, 155, 255);')
def playback(self, index, start_t=0, stop_t=None, from_connect=False): ''' play entire wav file from beginning to end for call at index''' # obtain cid cid = self.entry(index)[self._cid_index] # grab call log obj cl = self._logs_pack.call_logs[cid] if cl.wav: if cl.wav not in self.sig_set: self.sig_set.add(cl.wav) if from_connect: start_t=cl.audio_connect_time # TODO: need to introduce the signal obj to visig... # play the signal sound.sound(self.sig_set.vector(cl.wav), 8e3, start=start_t, stop=stop_t) else: print("WARNING : no wave files were found for index",index,"- cid",cid) print("no playback available...")
def mainmenu(): MENU = pygame.image.load('../media/images/mainmenu.jpg') i = 0 while True: process() SCREEN.blit(MENU, (0, 0)) pygame.display.update() clicked = pygame.mouse.get_pressed() pos = pygame.mouse.get_pos() if clicked[0] is 1: if 330 < pos[0] < 625 and 160 < pos[1] < 220: pygame.time.delay(500) break elif 330 < pos[0] < 625 and 250 < pos[1] < 300: sound() elif 330 < pos[0] < 625 and 330 < pos[1] < 390: helps() elif 330 < pos[0] < 625 and 425 < pos[1] < 480: credit() elif 330 < pos[0] < 625 and 510 < pos[1] < 570: i = asktoquit() if i is 1: pygame.quit() exit() CLOCK.tick(FPS)
def __init__(self, spaceship, left=True): GameObject.__init__(self) self.ship = spaceship self.position = (0, 0) self.left = left self.blink_time = 0 self.blink_visible = True self.salarm = sound.sound(SOUND_ALARM, play=False) self.in_alarm = False if left: self.s_position = (BAR_X_1 + BAR_TEXT_MARGIN_X, BAR_Y_S + BAR_TEXT_MARGIN_Y) self.e_position = (BAR_X_1 + BAR_TEXT_MARGIN_X, BAR_Y_E + BAR_TEXT_MARGIN_Y) else: self.s_position = (BAR_X_2 - BAR_TEXT_MARGIN_X/2, BAR_Y_S + BAR_TEXT_MARGIN_Y) self.e_position = (BAR_X_2 - BAR_TEXT_MARGIN_X/2, BAR_Y_E + BAR_TEXT_MARGIN_Y)
def wrongway (All,Path,number,B): endw=0 termination=0 for kkk in range (0,12): if All[kkk] == 1: if Path[kkk] == 0: B[number]=2 kkk=11 if B[number]==0: B[number]=0 if B[number]==2: print("u r in the worng way") file=filew end=sound(file,playing) if end==0: B[number]=0 endw=1 return B[number],endw
def random_sound( name, unit ): # Plays a sound, name is the name of the attribute that has the sound. global sounds if not name in unit.style.friendly_contents or 'loop' in unit.style.friendly_contents[ name]: say(name + "Not in " + unit.tts_name) return False data = unit.style.friendly_contents[name].split() if data[0] == 'repeat': data = data[2:] fle = mod + '/ui/sounds/' + random.choice(data) + '.ogg' testpath = pathlib.Path(fle) if not testpath.exists(): say("Error! File not found. This preticular sound could be in another sounds folder" ) return False sounds = [sound.sound(playing=True, filename=fle)] return None
def __init__(self, spaceship, left=True): GameObject.__init__(self) self.ship = spaceship self.position = (0, 0) self.left = left self.blink_time = 0 self.blink_visible = True self.salarm = sound.sound(SOUND_ALARM, play=False) self.in_alarm = False if left: self.s_position = (BAR_X_1 + BAR_TEXT_MARGIN_X, BAR_Y_S + BAR_TEXT_MARGIN_Y) self.e_position = (BAR_X_1 + BAR_TEXT_MARGIN_X, BAR_Y_E + BAR_TEXT_MARGIN_Y) else: self.s_position = (BAR_X_2 - BAR_TEXT_MARGIN_X / 2, BAR_Y_S + BAR_TEXT_MARGIN_Y) self.e_position = (BAR_X_2 - BAR_TEXT_MARGIN_X / 2, BAR_Y_E + BAR_TEXT_MARGIN_Y)
def __init__(self, width=100, height=100): ''' Create an audiosplode world ''' self.width=width self.height=height self.cells = [ [EmptyCell(x,y, self) for y in range(height)] for x in range(width) ] #cell [0][0] has it's top left at the (0,0) pixel #the centre of this cell is (0.5,0.5) #this seems a little confusing, but I think it's overall less confusing than the alternative #Luke #self.mobs = [mobclass.mob([5,6])] self.mobs=[] #how many mobs escaped self.escaped=0 self.money=50 self.sound = sound.sound() self.pathdebug = [] #wjere te amopbs are trying to get to self.sink=None #array of Spawn cells self.spawns=[] #are there are new towers so pathfindinw will have to be redone? self.newTowers=False #towers seperate to cells because updating all the cells was insanely slow self.towers=[] self.shots = []
def way(Now,reg,creg,count): print("way start!!!") for ii in range(0,12): if Now[ii]!=0: count=ii+1 if count >= creg: print("OK") creg=count if count < creg: print("not OK") print("not OK") print("not OK") print("not OK") print("not OK") print("not OK") print("not OK") print("u r in the worng way77") file=filew end=sound(file,playing) creg=count return creg,count
def cluster_names(self): ''' This function clusters names based on the levenshtein distance between their soundex representations. This is done by passing the matrix of distances between names into an agglomerative clustering model. Output: cluster labels ''' from sklearn.cluster import AgglomerativeClustering from linguistic_distance import linguistic_distance from sound import sound sound = sound() names_sndx = [] for n in self.names: names_sndx.append(sound.get_soundex(n)) dists = linguistic_distance() distmat = [] for n1 in names_sndx: row = [] for n2 in names_sndx: row.append(dists.levenshtein(n1, n2)) distmat.append(row) clust = AgglomerativeClustering(n_clusters=self.clusters_to_form) clust.fit(distmat) self.labeling = clust.labels_ return clust.labels_
plt.title('Original Signal') # ## Filter implementation: # In[5]: filtered = signal.lfilter(b, 1, s) print("length of filtered sound in samples: ", len(filtered)) plt.plot(filtered) # ## Play the filtered sound: # In[6]: import functions snd.sound(filtered, 32000) # ## Now Down-sampling with factor N: # In[7]: N = 16 filteredds = filtered[::N] plt.plot(filteredds) # ## Listen to it at `1/N’th` sampling rate: # In[8]: snd.sound(filteredds, 2000)
plt.show() xpref, mTbarkquant = psyacprefilter(x, fs, quality=100) plt.plot(mTbarkquant) plt.title('The Masking Thresholds') plt.xlabel('The Bark Subbands') plt.show() xpref = np.round(xpref) # mid tread quantizer #xpref=np.floor(xpref)+0.5 #mid rise quantizer xrek = psyacpostfilter(xpref, fs, mTbarkquant) print("Original Signal") os.system('espeak -s 120 "Original Signal"') sound.sound(x, fs) print("Pre-filtered Signal") plt.plot(xpref) plt.xlabel('sample') plt.ylabel('Value') plt.title('The Psycho-Acoustically Prefiltered Signal') plt.show() os.system('espeak -s 120 "The amplified Pre-filtered Signal"') sound.sound(xpref * 1000, fs) print( "Reconstructed Signal after Quantization according to the Masking threshold" ) os.system('espeak -s 120 "Reconstructed Signal after the Postfilter"') sound.sound(xrek, fs) print("xrek.shape=", xrek.shape)
s4_ds = s4[0::8] s5_ds = s5[0::8] s6_ds = s6[0::8] s7_ds = s7[0::8] s8_ds = s8[0::8] # playing filtered sound of subband 1 #print ("Playinf filtered sound of SB1") #sound.sound(s1, 32000) # playing filtered sound of sb 1 after downsampling #print ("Playinf filtered and ds sound of SB1") #sound.sound(s1_ds, 4000) # playing filtered sound of subband 4 print("Playinf filtered sound of SB4") sound.sound(s4, 32000) # playing filtered sound of sb 1 after downsampling #print ("Playinf filtered and ds sound of SB4") #sound.sound(s4_ds, 4000) # frequency response w, H1 = signal.freqz(h1) w, H2 = signal.freqz(h2) w, H3 = signal.freqz(h3) w, H4 = signal.freqz(h4) w, H5 = signal.freqz(h5) w, H6 = signal.freqz(h6) w, H7 = signal.freqz(h7) w, H8 = signal.freqz(h8) # plotting Impulse response and frequency response
''' This source file checks the "soundex" function on the "sound" class to ensure the expected string is generated. @author [email protected] ''' if __name__ == '__main__': from sound import sound sound_dictionary = { 'California': 'C416', 'David': 'D130', 'Google': 'G240', 'Robert': 'R163', 'Rupert': 'R163', 'Tigger': 'T260' } sound = sound() for word in sound_dictionary: if sound.get_soundex(word) == sound_dictionary[word]: print 'Soundex works for', word else: print 'Soundex failed for', word, 'generated:', sound.get_soundex( word), 'but expected', sound_dictionary[word] exit(-1) print 'All soundex tests passed!'
# -*- coding: utf-8 -*- import os from gui import ask import collections import webbrowser import config import twython import sound import audio_player global snd snd = sound.sound() player = audio_player.URLStream() timelines = collections.OrderedDict() apikey = "W48NhXLuPeP66yvcXXurhQPY6" apisecret = "jST5JRY7KK8tjyxEm6QcpIWrHrMWeHXqyNPsK5w0ohYd9L7kHu" #apikey="gRcSncxR8Y2buqPYFd4U8qJKU" #apisecret="4gq2245Nust9dLCOTzBaJKvJQgrzwiqBYfKVDm7cpw1kb7WfUQ" def auth(): global soundpack try: soundpack = config.appconfig['general']['soundpack'] except: soundpack = "default" global twitter twitter = twython.Twython(apikey, apisecret) global auth if config.appconfig['general']['key'] == "" or config.appconfig['general'][ 'secret'] == "": auth = twitter.get_authentication_tokens()
def run(self): clock = pygame.time.Clock() pygame.font.init() font = pygame.font.SysFont("Tahoma", 28) font.set_bold(True) font_small = pygame.font.SysFont("Tahoma", 18) while True: clock.tick(30) for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.KEYDOWN and event.key == pygame.K_l: changeLanguage() elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE: if self.phase == 1: self.core.change() if self.core.combo is not None: sound("win") else: sound("flip1") self.phase = -1 elif self.phase != 0 and self.core.balance < self.core.stake: self.core.balance = 0 self.core.hand.give_all(self.core.deck) self.phase = 0 elif self.phase == 0: self.core.balance = Core.START_BALANCE self.core.set_initial_combo() self.phase = -1 elif self.phase == -1: sound("roll") self.core.roll() self.phase = 1 elif event.type == pygame.KEYDOWN and self.phase != 0: if event.key == pygame.K_1: self.core.changeMultiplier(1) elif event.key == pygame.K_2: self.core.changeMultiplier(2) elif event.key == pygame.K_3: self.core.changeMultiplier(3) elif event.key == pygame.K_4: self.core.changeMultiplier(4) elif event.key == pygame.K_5: self.core.changeMultiplier(5) elif self.phase == 1 and self.core.hand.cards and event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: for i in range(len(self.card_rects)): if self.card_rects[i].collidepoint(event.pos[0], event.pos[1]): sound("flip2") self.core.hand.cards[i].flip() break self.screen.fill((0, 0, 0)) self.screen.blit(self.background, (0, 0)) if self.core.hand.is_empty(): for i in range(5): self.screen.blit(self.slot_sprite, (i * 180 + 75, 320)) else: i = 0 for card in self.core.hand.cards: self.screen.blit(card.get_sprite() if card.flipped else self.card_back_sprite, (i * 180 + 75, 320)) i += 1 self.screen.blit(font.render(t("balance") + str(self.core.balance), -1, (255, 255, 255)), (650, 30)) cmd = None if self.phase == 0: cmd = t("game.state.start") else: if self.phase == 1: cmd = t("game.state.change") elif self.phase == -1: cmd = t("game.state.roll") win_sum = 0 if self.core.combo is not None: if self.core.combo == Combo.INITIAL: win_sum = self.core.combo.rate else: win_sum = self.core.combo.rate * self.core.stake * self.core.multiplier self.screen.blit(font_small.render(t("space") + cmd, -1, (255, 255, 255)), (650, 70)) self.screen.blit(font_small.render(t("language.change"), -1, (255, 255, 255)), (650, 100)) if self.phase != 0: self.screen.blit(font_small.render(t("multiplier.change"), -1, (255, 255, 255)), (650, 130)) self.draw_table() pygame.display.update()
fb = np.sin(np.pi / (2 * N) * (np.arange(int(1.5 * N)) + 0.5)) print("Encoder part:") #MDCT and quantization: yq, y, mTbarkquant = MDCT_psayac_quant_enc(x, fs, fb, N, nfilts, quality=60) print("Decoder part:") xrek, mT, ydeq = MDCTsyn_dequant_dec(yq, mTbarkquant, fs, fb, N, nfilts) print("Original Signal") os.system('espeak -s 120 "Original Signal"') sound.sound(x, fs) print( "Reconstructed Signal after Quantization according to the Masking threshold" ) os.system( 'espeak -s 120 "Reconstructed Signal after Quantization according to the Masking threshold"' ) sound.sound(xrek, fs) #print("y[3:6,10]=", y[3:6,10]) #print("ydeq[3:6,10]=", ydeq[3:6,10]) import matplotlib.pyplot as plt plt.plot(mTbarkquant) #value range: 0...75 plt.title("The Quantization Indices of the Scalefactors") plt.xlabel("The Bark Subbands") plt.show()
import map import application from random import randint import sound from sound_positioning import position_sound_1d import player from pyglet.window import key from speak import speak import pyglet m = map.map() me = player.player() en = player.player() en.x = -15 gun = sound.sound() step = sound.sound() death = sound.sound() amb = sound.sound() pain = sound.sound() enemyhit = sound.sound() enemydeath = sound.sound() jump = sound.sound() amb.load("sounds/amb.wav") enemy = sound.sound() jump.load("sounds/jump.wav") enemy.load("sounds/enemy.wav") death.load("sounds/playerdeath.ogg") enemydeath.load("sounds/enemydeath.wav") enemyhit.load("sounds/enemyhit.wav") gun.load("sounds/gun.wav")
reco_Rect = filt(up_Rect, fb_Rect, M) reco_Hann = filt(up_Hann, fb_Hann, M) reco_Sine = filt(up_Sine, fb_Sine, M) reco_Kais = filt(up_Kais, fb_Kais, M) #Reconstructed signal reconstructed_Rect = reconstructed(reco_Rect, M) reconstructed_Hann = reconstructed(reco_Hann, M) reconstructed_Sine = reconstructed(reco_Sine, M) reconstructed_Kais = reconstructed(reco_Kais, M) #sound.sound(chan, 32000) #sound.sound(reconstructed_Rect , 32000) #sound.sound(reconstructed_Hann, 32000) #sound.sound(reconstructed_Sine, 32000) sound.sound(reconstructed_Kais, 32000) #Plot the LP Freq Resp. of all filters plt.figure("LP FILTER Freq. Resp. of All Filters") plt.xlabel("Number of filter-taps [n]") plt.ylabel("Filter coefficients") #LP Freq. Resp. of Rect Filt. wr,hr = sig.freqz(fb_Rect[0]) plt.plot((wr / (2 * np.pi)) * fs, 20 * np.log10(np.abs(hr)),'b-', label='LP Rect') #LP Freq. Resp. of Sine Filt. ws,hs = sig.freqz(fb_Sine[0]) plt.plot((ws / (2 * np.pi)) * fs, 20 * np.log10(np.abs(hs)),'g-', label='LP Sine') #LP Freq. Resp. of Hanning Filt. wh,hh = sig.freqz(fb_Hann[0]) plt.plot((wh / (2 * np.pi)) * fs, 20 * np.log10(np.abs(hh)),'r-', label='LP Hanning')
def main(): # set up the pin numbers should be the out pins on your Pi signal = trafficLight.TrafficLight([18, 15, 17]) #set up the sound noise = sound.sound() # run starting test sequence for i in range(1,4): signal.change('red') time.sleep(0.5) signal.change('yellow') time.sleep(0.5) signal.change('green') time.sleep(0.5) signal.red.on() signal.yellow.on() signal.green.on() time.sleep(1) signal.red.off() signal.yellow.off() signal.green.off() noise.play() # import connection string and jql conex_file = open('conx.txt') url = conex_file.readline() auth_string = conex_file.readline() jql = conex_file.readline() jira_handle = jira.Jira(url, jql, auth_string) old_total = 0 try: while True: jira_handle.request() if jira_handle.total_tickets() >= 0 and jira_handle.total_tickets() < old_total: noise.play() old_total = jira_handle.total_tickets() elif jira_handle.total_tickets() > 0: old_total = jira_handle.total_tickets() if jira_handle.total_tickets() == -1: # Connection error signal.change('red') for i in range(0,10): signal.red.on() time.sleep(0.5) signal.red.off() elif jira_handle.total_tickets() == -2: # parse error signal.change('yellow') for i in range(0,10): signal.yellow.on() time.sleep(0.5) signal.yellow.off() elif jira_handle.total_tickets() < 35 and jira_handle.critical_tickets() == False: signal.change('green') elif jira_handle.total_tickets() >= 35 and jira_handle.total_tickets() < 50 and jira_handle.critical_tickets() == False: signal.change('yellow') else: signal.change('red') ##so we don't hammer the API time.sleep(5) except KeyboardInterrupt: signal.cleanup() pass
import sound #import thread import time test=sound.sound() print 'done' #quick chord test test.play(0) test.play(1) test.play(2) time.sleep(0.5) #trying to break it by running too many threads [test.play(2) for i in xrange(3)] time.sleep(0.4) #scale test test.play(3) time.sleep(3.5)
return model def keras_PQMF_syn(subbands,model): """MDCT Synthesis Filter bank implemented with Keras. argument: Y: a 2D array containing the subbands, the last dim. is the subband index returns: xrek, 1D array of the input (audio) signal """ #Make the dimensionality suitable for keras: subbands=np.expand_dims(subbands,axis=0) subbands=np.expand_dims(subbands,axis=2) print("subbands.shape=", subbands.shape) xrek=model.predict(subbands) # Compute the synthesis MDCT print("xrek.shape=", xrek.shape) #Extract the right dimension for the reconstructed audio sognal: xrek=xrek[0,:,0,0] return xrek if __name__ == '__main__': from sound import sound #N=1024 #Number of filters, stride #filtlen=8192 #Length of filter impulse response N=64 filtlen=640 model = generate_model_syn(N,filtlen) with open("pqmf_subbands.pickle", 'rb') as subfile: subbands=pickle.load(subfile) xrek= keras_PQMF_syn(subbands,model) os.system('espeak -ven -s 120 '+'"The output of the synthesis PQMF"') sound(2**15*xrek,16000)
# # Python Example: Sampling # Make a sine wave which at 44100 Hz sampling rate has a frequency of 400 Hz at 1 second duration. Hence we need 44100 samples, and 400 periods of our sinusoid in this second. # # `- Gerard Schuler` # ## Import the relevant modules: # In[1]: import numpy as np s = np.sin(2 * np.pi * 400 * np.arange(0, 1, 1. / 44100)) import sound sound.sound(s * 2**15, 44100) # ## Plot the first 1000 samples: # In[2]: get_ipython().magic('matplotlib inline') import matplotlib.pyplot as plt plt.plot(s[:1000]) # ## Plot the first 100 samples: # In[3]: plt.plot(s[:100])
def __init__(self, window, window_title): self.window = window self.window.title(window_title) self.window.geometry("500x450") try: self.entspeichern() except Exception as e: print('Kann nicht entspeichern:'+ str(e)) pass self.callback = self.enable_btn window.iconbitmap('assets/bone.ico') self.pathlabel = tk.Label(window) self.pathlabel.grid(row=0, column=0) self.flag_game = tk.BooleanVar() self.eichungwechsel = True browsebutton = tk.Button(window, text="Browse",bd='5', command=self.browsefunc) browsebutton.grid(row=1, padx=40, column=0) tk.Label(window, text='Maximalkraft [N]:\n (nur FREIE ANALYSE)').grid(row=0, column=2, sticky='w') tk.Label(window, text='Vorspannungskraft [N]:\n (nur FREIE ANALYSE)').grid(row=1, column=2, sticky='w') tk.Label(window, text='Vorspannungslänge [ms]: ').grid(row=2, column=2, sticky='w') plateauH_entry =tk.Entry(window, width=6) plateauH_entry.grid(row=1, column=3, sticky='w') plateauH_entry.insert(0,'80') peak_entry =tk.Entry(window, width=6) peak_entry.grid(row=0, column= 3, sticky='w') peak_entry.insert(0,'360') plateauL_entry =tk.Entry(window, width=6) plateauL_entry.grid(row=2, column=3, sticky='w') plateauL_entry.insert(0,'1000') self.analyse_btn =tk.Button(window, text="ANALYSE!", bd='5', command=lambda: filecomp(self.filename,self.peak, (self.peak/4), float(plateauL_entry.get())), state='disabled') self.analyse_btn.grid(row=0, column=1, sticky='w' , pady=10) self.ohne_ref_btn = tk.Button(window, text='FREIE ANALYSE!', bd='5', command=lambda: filecomp(self.filename, peak_entry.get(), plateauH_entry.get()), state='disabled') self.ohne_ref_btn.grid(row=1, column=1, sticky='w') self.reader_btn =tk.Button(window, text="AUFNAHME!", bd='5', command=lambda: (self.disable_btn(),self.nullung(daten),open_reader_from_main(daten, self.callback))) self.reader_btn.grid(row=3, column=1, sticky='w', pady= 5) self.realtimeplot_btn =tk.Button(window, text="REALTIMEPLOT!", bd='5', command=lambda:(self.disable_btn(), self.nullung(daten), open_realtimeplot3_from_main(daten, self.callback))) self.realtimeplot_btn.grid(row=4, column=1, sticky='w',pady=5) game_rdR =tk.Radiobutton(window, text= 'Rechts!', var = self.flag_game, value=True) game_rdR.grid(row=5, column =2, sticky='w') game_rdL =tk.Radiobutton(window, text = 'Links!', var = self.flag_game, value = False) game_rdL.grid(row=5, column=3, sticky='w') self.game_btn =tk.Button(window, text="SPIEL!", bd='5', command=lambda:(self.disable_btn(),self.nullung(daten), flappy(self.flag_game.get(), daten, self.callback))) self.game_btn.grid(row=5, column=1, sticky='w', pady=5) fortschritt_btn =tk.Button(window, text="FORTSCHRITT!", bd='5', command= fortschritt) fortschritt_btn.grid(row=6, column=1, sticky='w', pady=5) self.sound_btn= tk.Button(window, text='SOUND!', bd='5', command=lambda: (self.disable_btn(), self.nullung(daten),sound(daten, self.callback))) self.sound_btn.grid(row=7, column=1, sticky='w', pady=5) self.eichungR_lable=tk.Label(window, text='Rechts:') self.eichungR_lable.grid(row=8, column=0) self.eichungL_lable=tk.Label(window, text='Links:') self.eichungL_lable.grid(row=10, column=0) self.eichungR_entry= tk.Entry(window, width=15) self.eichungR_entry.grid(row=9, column=0) self.eichungL_entry=tk.Entry(window, width=15) self.eichungL_entry.grid(row=11, column=0) self.eichung_btn=tk.Button(window, text='Eichung 1kg rechts', bd='5', command=lambda: self.eichung()) self.eichung_btn.grid(row=10, column= 1, sticky='w') self.speichern_btn = tk.Button(window, text='SAVE!',bd='5', command=self.speichern) self.speichern_btn.grid(row=10, column=2, sticky='w', padx=10) center(self.window) self.window.mainloop()
dest='hrescale', help='Height rescale factor') (options, args) = parser.parse_args() debug_log = options.debug_log if not options.bb_file: print('No BIOS/BASIC ROM selected (e.g. msxbiosbasic.rom)') sys.exit(1) # bb == bios/basic bb = rom(options.bb_file, debug, 0x0000) put_page(0, 0, 0, bb) put_page(0, 0, 1, bb) snd = sound(debug) if options.scc_rom: for o in options.scc_rom: parts = o.split(':') scc_obj = scc(parts[2], snd, debug) scc_slot = int(parts[0]) scc_subslot = int(parts[1]) put_page(scc_slot, scc_subslot, 1, scc_obj) put_page(scc_slot, scc_subslot, 2, scc_obj) if options.disk_rom: for o in options.disk_rom: parts = o.split(':') disk_slot = int(parts[0]) disk_subslot = int(parts[1])
def unit_manager(u): #Main unit manager, that accepts input from the user. #keys without the shift key allow you to view and listen to properties or sounds. #Holding the shift key with a key that says a property will allow you to change most properties. #1-7: properties #Q-P: sounds pygame.display.set_caption(u.tts_name) c, stc, fc = u.contents, u.style.contents, u.friendly_contents while win.wait(20): if win.pressing(k_ctrl) and win.key_pressed(k_n): # This allows to create a new unit, but this code should probably be moved to the unit list menu. temp = data() internal_name = win.get_input("Enter the new unit's name") if len(internal_name) == 0 or internal_name == None: say("Canceled") continue external_name = win.get_input( "Enter the units name that will be shown to the player. Just press enter to use the internal name" ) if len(external_name) == 0: external_name = internal_name temp.name = internal_name temp.tts_name = external_name largest_tts = list(tts.contents.keys()) largest_tts = [int(i) for i in largest_tts] largest_tts = sorted(largest_tts)[-1] largest_tts += 1 tts.contents[str(largest_tts)] = external_name m = menu.menu(sorted([str(i.tts_name) for i in units]), win) res = m.run( prompt= 'Select a unit that can be used to take initial properties from or just press escape' ) temp.style = data() if res != -1: tempunit = rules.children[rules.children.index(res)] temp.style.contents['title'] = str(largest_tts) temp.style.name = internal_name temp.style.contents['is_a'] = tempunit.name temp.contents['is_a'] = tempunit.name temp = set_up_inherit(temp, rules) temp.style = set_up_inherit(temp.style, style) rules.children.append(temp) style.children.append(temp.style) units.append(temp) say("Done, made " + temp.tts_name + " as " + temp.name) unit_manager(temp) # View the new unit. return if win.pressing(k_shift): if win.key_pressed(k_1): set_val('hp_max', u) if win.key_pressed(k_2): set_val('damage', u) if win.key_pressed(k_3): set_val('armor', u) if win.key_pressed(k_4): set_val('speed', u) if win.key_pressed(k_5): set_val('range', u) if win.key_pressed(k_6): set_val('cooldown', u) if win.key_pressed(k_7): set_val('can_use', u, False) if win.key_pressed(k_w): set_sound("launch_attack", u) if win.key_pressed(k_e): set_sound("attack_hit", u) if win.key_pressed(k_r): set_sound("move", u) if win.key_pressed(k_y): set_sound("death", u) if win.key_pressed( k_q) and 'noise' in stc and 'repeat' not in stc['noise']: sounds = [ sound.sound(playing=True, filename=mod + '/ui/sounds/' + stc['noise'][5:].split()[0] + '.ogg') ] # Different from the other sounds because noise can be either loop or repeat if win.key_pressed(k_w): random_sound("launch_attack", u) if win.key_pressed(k_e): random_sound("attack_hit", u) if win.key_pressed(k_r): random_sound("move", u) if win.key_pressed(k_t): random_sound('noise', u) if win.key_pressed(k_y): random_sound("death", u) if win.key_pressed(K_F1): say(u.name) # Speaks the internal not tts name if win.key_pressed(k_1): get_val('hp_max', u) if win.key_pressed(k_2): get_val('damage', u) if win.key_pressed(k_3): get_val('armor', u) if win.key_pressed(k_4): get_val('speed', u) if win.key_pressed(k_5): get_val('range', u) if win.key_pressed(k_6): get_val('cooldown', u) if win.key_pressed(k_7) and 'can_use' in c: tolk.say(c['can_use'].replace(' ', ', ').replace('a_', 'ability_')) if win.key_pressed(k_escape): return
import map import application from random import randint import sound from sound_positioning import position_sound_1d import player from pyglet.window import key from speak import speak import pyglet m=map.map() me=player.player() en=player.player() en.x=-15 gun=sound.sound() step=sound.sound() death=sound.sound() amb=sound.sound() pain=sound.sound() enemyhit=sound.sound() enemydeath=sound.sound() jump=sound.sound() amb.load("sounds/amb.wav") enemy=sound.sound() jump.load("sounds/jump.wav") enemy.load("sounds/enemy.wav") death.load("sounds/playerdeath.ogg") enemydeath.load("sounds/enemydeath.wav") enemyhit.load("sounds/enemyhit.wav") gun.load("sounds/gun.wav") def positions(): position_sound_1d(enemy.handle,me.x,en.x,1,0.5)
import numpy as np import matplotlib.pyplot as plt fs = 44100 f = 400.0 s = np.sin(2*np.pi*f*np.arange(0, 1, 1.0/fs)) from sound import sound sound((2**15)*s,fs) #Now plot the first 1000 samples plt.plot(s[0:1000]) plt.show() #Next plot the first 100 samples plt.plot(s[0:100]) plt.show() #We generate the unit impulse train unit = np.zeros(44100) unit[0::8] = 1 plt.plot(unit[0:100]) plt.xlabel('n') plt.ylabel('unit(n)') plt.show() #Listen to it, with scaling to the value range for 16 bit/sample sound(unit*2.0**15,44100) #The multiplication with the unit impulse train
#MDCT analysis and synthesis filter bank, one after the other, for time measurement #Gerald Schuller, November 2017 from keras_MDCTanalysis_stereo import * from keras_MDCTsynthesis_stereo import * from sound import sound import time N = 1024 #Number of filters, stride filtlen = 2048 #Length of filter impulse response modelana = generate_model_ana_stereo( N, filtlen) # Compile an neural net analysis filter bank modelsyn = generate_model_syn_stereo(N, filtlen) # " Synthesis filter bank fs, X = wav.read('teststereo.wav') print("fs=", fs) X = X * 1.0 / 2**15 startime = time.time() #Analysis Filter Bank: Y = keras_MDCT_ana_stereo(X, modelana) #Synthesis Filter Bank: xrek = keras_MDCT_syn_stereo(Y, modelsyn) endtime = time.time() print("Duration analysis-synthesis: ", endtime - startime) os.system('espeak -ven -s 120 ' + '"The output of the synthesis MDCT"') sound(2**15 * xrek, fs)
# 25% of the time go for a 5'ish second bid after a last second bid if lateBid and bidAt is None and last and last < lateBidLast and random.random() < lateBidChance: bidAt = lateBid # 80% of the time be risky if bidAt is None and random.random() < snipeChance: bidAt = snipeBid # Otherwise look normal'ish if bidAt is None: bidAt = defaultBid return calcBid(bidAt) sounds = sound.sound() site = smokoo.smokoo(interface=interface) browser = runner.runner(remoteShellPort, 'smokoo.co.za') browser.run('window.alert=alert=function(){};') def log(item, m): logfile = file(str(item)+'.log','a') print >> logfile, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), m logfile.close() clicks = {} lastBidder = {} bidders = {} myBids = {} bidTime = {}
def __init__(self, master, config): #display options row = 2 PADX = 8 PADY = 3 self.totalMsg = 0 self.messages = [] self.hashlist = [] self.cargoCount = "0" self.colors = [] self.config = config self.run = True self.connected = False self.soundplayer = sound() self.checkBoxes = [] self.entryStatList = [ "ProspectedAsteroid", "StartJump", "SupercruiseExit" ] try: self.journal = journal() except ValueError as e: print(e) quit() #setup ui backgroundColor = self.config.config['ui_colors']['backgroundColor'] textColor = self.config.config['ui_colors']['textColor'] boxColor = self.config.config['ui_colors']['boxColor'] boxTextColor = self.config.config['ui_colors']['boxTextColor'] self.w = master self.w.title("EliteProspecting") self.w.protocol("WM_DELETE_WINDOW", self.closing) self.w.bind("<Return>", self.saveSettings) self.w.resizable(False, False) self.w.configure(background=backgroundColor) self.tLtd = tk.IntVar(value=self.config.config['mining']['track_ltd']) self.tPainite = tk.IntVar( value=self.config.config['mining']['track_painite']) self.sound = tk.IntVar(value=self.config.config['ui']['sound']) self.trans = tk.IntVar(value=self.config.config['ui']['transparency']) self.collect = tk.IntVar(value=self.config.config['server']['collect']) self.onlineD = tk.IntVar(value=self.config.config['ui']['online']) self.overlay = tk.IntVar( value=self.config.config['ui']['show_overlay']) self.cargo = tk.IntVar( value=self.config.config['mining']['track_cargo']) self.ipLabel = tk.Label(self.w, text="Server IP", background=backgroundColor, foreground=textColor) self.ipAddr = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.ipLabel.grid(row=row, padx=PADX, sticky=tk.W) self.ipAddr.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.portLabel = tk.Label(self.w, text="Server Port", background=backgroundColor, foreground=textColor) self.port = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.portLabel.grid(row=row, padx=PADX, sticky=tk.W) self.port.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.roomLabel = tk.Label(self.w, text="Server Room", background=backgroundColor, foreground=textColor) self.room = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.roomLabel.grid(row=row, padx=PADX, sticky=tk.W) self.room.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.myColorLabel = tk.Label(self.w, text="My Color", background=backgroundColor, foreground=textColor) self.myColor = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.myColorLabel.grid(row=row, padx=PADX, sticky=tk.W) self.myColor.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.otherColorLabel = tk.Label(self.w, text="Other's Color", background=backgroundColor, foreground=textColor) self.otherColor = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.otherColorLabel.grid(row=row, padx=PADX, sticky=tk.W) self.otherColor.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.fontLabel = tk.Label(self.w, text="Font size", background=backgroundColor, foreground=textColor) self.font = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.fontLabel.grid(row=row, padx=PADX, sticky=tk.W) self.font.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.lineLabel = tk.Label(self.w, text="Lines number", background=backgroundColor, foreground=textColor) self.line = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.lineLabel.grid(row=row, padx=PADX, sticky=tk.W) self.line.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) row += 1 self.ltdCB = tk.Checkbutton(self.w, text='Track LDT greater than', variable=self.tLtd, background=backgroundColor, foreground=textColor, command=self.onCheck) self.ltdCB.grid(row=row, padx=PADX, pady=PADY, sticky=tk.W) self.ltdThreshold = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.ltdThreshold.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) self.checkBoxes.append((self.ltdCB, self.tLtd)) row += 1 self.painiteCB = tk.Checkbutton(self.w, text='Track painite greater than', variable=self.tPainite, background=backgroundColor, foreground=textColor, command=self.onCheck) self.painiteCB.grid(row=row, padx=PADX, pady=PADY, sticky=tk.W) self.painiteThreshold = tk.Entry(self.w, background=boxColor, foreground=boxTextColor) self.painiteThreshold.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) self.checkBoxes.append((self.painiteCB, self.tPainite)) row += 1 self.cargoB = tk.Checkbutton(self.w, text='Track my cargo', variable=self.cargo, background=backgroundColor, foreground=textColor, command=self.onCheck) self.cargoB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.cargoB, self.cargo)) row += 1 self.soundCB = tk.Checkbutton( self.w, text='Play a sound when threshold is met', variable=self.sound, background=backgroundColor, foreground=textColor, command=self.onCheck) self.soundCB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.soundCB, self.sound)) row += 1 self.overlayB = tk.Checkbutton(self.w, text='Show overlay', variable=self.overlay, background=backgroundColor, foreground=textColor, command=self.onCheck) self.overlayB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.overlayB, self.overlay)) row += 1 self.transB = tk.Checkbutton(self.w, text='Make overlay transparent', variable=self.trans, background=backgroundColor, foreground=textColor, command=self.onCheck) self.transB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.transB, self.trans)) row += 1 self.collectB = tk.Checkbutton( self.w, text= 'Allow server to store prospecting event for statistical purpose (anonymous)', variable=self.collect, background=backgroundColor, foreground=textColor, command=self.onCheck) self.collectB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.collectB, self.collect)) row += 1 self.onlineDB = tk.Checkbutton(self.w, text='Start EliteProspecting online', variable=self.onlineD, background=backgroundColor, foreground=textColor, command=self.onCheck) self.onlineDB.grid(row=row, column=0, padx=PADX, pady=PADY, sticky=tk.W) self.checkBoxes.append((self.onlineDB, self.onlineD)) row += 1 self.settings = tk.Button(self.w, text="Save settings", command=self.saveSettings, background=backgroundColor, foreground=textColor) self.settings.grid(row=row, padx=PADX, pady=PADY, sticky=tk.W) row += 1 self.onlineB = tk.Button(self.w, text="Go online", command=self.connect, background=backgroundColor, foreground=textColor) self.onlineLabel = tk.Label(self.w, text="Offline", background=backgroundColor) self.onlineB.grid(row=row, padx=PADX, pady=PADY, sticky=tk.W) self.onlineLabel.grid(row=row, column=1, padx=PADX, pady=PADY, sticky=tk.EW) self.onlineLabel.config(foreground="Red") self.loadConf() self.loadSetup() self.setupUi() self.onCheck() self.processThread = threading.Thread(target=self.processEvents) self.processThread.start() self.networkThread = threading.Thread(target=self.receiveMsg) self.networkThread.start() #go online at startup ? if self.config.config['ui']['online'] == "1": self.connect()
plt.title('Spreading Function') plt.show() spreadingfuncmatrix = spreadingfunctionmat(spreadingfunctionBarkdB, alpha, nfilts) plt.imshow(spreadingfuncmatrix) plt.title('Matrix spreadingfuncmatrix as Image') plt.xlabel('Bark Domain Subbands') plt.ylabel('Bark Domain Subbands') plt.show() #-Testing----------------------------------------- #A test magnitude spectrum: # White noise: x = np.random.randn(32000) * 1000 sound.sound(x, fs) mX = np.abs(np.fft.fft(x[0:2048], norm='ortho'))[0:1025] mXbark = mapping2bark(mX, W, nfft) #Compute the masking threshold in the Bark domain: mTbark = maskingThresholdBark(mXbark, spreadingfuncmatrix, alpha, fs, nfilts) #Massking threshold in the original frequency domain mT = mappingfrombark(mTbark, W_inv, nfft) plt.plot(20 * np.log10(mX + 1e-3)) plt.plot(20 * np.log10(mT + 1e-3)) plt.title('Masking Theshold for White Noise') plt.legend(('Magnitude Spectrum White Noise', 'Masking Threshold')) plt.xlabel('FFT subband') plt.ylabel("Magnitude ('dB')") plt.show()
import sound import sys w = sys.argv[1].decode(sys.stdin.encoding).encode('gbk') sound.sound(w)