def main(): """ A user input interface for terminal """ # Import all audio files from audio folder files = glob.glob( os.path.join(os.path.dirname(__file__), 'audio/', '*.' + '*')) # show all audio files for i, x in enumerate(files): print(str(i + 1) + ') ' + x.title()) filepath = '' hertz = 0 languageCode = '' chooseMethod = 0 # When the user provides system arguments at script startup, use those instead. if len(sys.argv) > 1: filepath = sys.argv[1] hertz = int(sys.argv[2]) languageCode = sys.argv[3] chooseMethod = int(sys.argv[4]) else: filepath = files[int(input("Press the number of the audio file \n")) - 1] hertz = int(input('Enter hertz E.G. 16000 \n')) languageCode = input('Enter language code E.G. nl-NL or en-GB \n') chooseMethod = int( input( '1) Google Storage audio \n2) Audio file Sliced \n3) Audio file Async \n' )) if chooseMethod == 1: AudioTranscribe.fromGoogleStorage(Audio(filename='woordentest', fileFormat='mp3', languageCode='nl-NL'), enable_word_time=True) elif chooseMethod == 2: AudioTranscribe.AudioTranscribe.fromAudioFile( Audio(filepath, hertz, languageCode)) elif chooseMethod == 3: AudioTranscribe.AudioTranscribe.transcribeFromSlicedAudio( configAudio=Audio(filepath, hertz, languageCode), configSlicing=ConfigSlicing(0, 60000, 60000, 500, -40)) else: print('Something went wrong, please choose [1] or [2] or for exit [q]') if input() == 'q': exit(1) else: main() pass
def get_text_response_via_speech_recognition(self): response = SpeechToText(Audio().record(), language=self.language).convert_to_text() if isinstance(response, str) and not None: return response.strip() print("Something went wrong, retrying.") return self.get_text_response_via_speech_recognition()
def __init__(self): self.__version__ = '3.1.3' logger_format = '%(asctime)s %(message)s' logging.basicConfig(format=logger_format, level=logging.INFO, datefmt="%H:%M:%S", filename='./logfile.log', filemode='w') logging.getLogger().addHandler(SysLogHandler(facility=SysLogHandler.LOG_DAEMON, address='/dev/log')) self._SIGKILL = False self.logging = logging self.config = DB('./config.json') self.network = Network(self) self.display = DisplayContent() self.detector = Detector() self.ultrasound = Ultrasound(self) self.temp = Temp() self.server = SimpleWebSocketServer('', 8069, WebSocketServer, self) self.cook = Cook(self) self.energy = Energy(self) self.history = History(self) self.audio = Audio(self) self.users = Users(self) self.automations = Automations(self) signal.signal(signal.SIGTERM, self.sig_handler) signal.signal(signal.SIGINT, self.sig_handler) self.log("Boot: v" + self.__version__)
def __init__(self, device): self.track_list = None self.menu_items = {} self.player = Audio() self.renderer = MusicRender() self.parent = device self.index = 0
def get_new_audio(self): if self.result is not None: new_audio = Audio(self.audio.get_sampling_rate(), self.result) return new_audio else: print('ERROR: Apply filter in audio before getting it\'s result.') exit(1)
def __init__(self): self.queries = { "start": self.start, "stop": self.stop, "hello": self.hello, "inflate": self.inflate, "ouch": self.inflate, "how are you": self.how_are_you, "deflate": self.deflate, } GPIO.setmode(GPIO.BCM) GPIO.setup(11, GPIO.OUT) GPIO.setup(10, GPIO.OUT) GPIO.output(11, GPIO.LOW) GPIO.output(10, GPIO.LOW) result_queue = queue.Queue() self.recogniser = Audio(result_queue, list(self.queries.keys())) self.recogniser.start() pygame.init() #self.play_sound_duration('care.mp3',2) #sleep(1) #self.play_sound_duration('hello_baymax.mp3',2) #sleep(1) while 1: result = result_queue.get(True) self.queries[result]()
def Automode_C(js, sc, dl): print("AutoMode_C") with StepperMotor(0.02) as smFront: switchCount = 0 motorCount = 0 global stepping global continuing with open(js) as f: sequence = json.load(f) StepperService(smFront).start() for i in sequence: if i.get("star") != None: for pin in i["star"]: sc.senddata(str(pin)) time.sleep(0.08) if i.get("daylight") != None: if i.get("daylight"): dl.dawn() else: dl.dusk() if i.get("audio") != None: name = i.get("audio") Audio(name).start() if i.get("motor") != None: stepping += i["motor"] motorCount += i["motor"] if i["interval"] == "wait": while True: if stepping == 0: break time.sleep(0.05) elif i["interval"] == "end": continuing = False else: time.sleep(i["interval"] - 0.09 * switchCount) switchCount = 0 time.sleep(1) with StepperMotor(0.04) as smRear: StepperService(smRear).start() stepping += -motorCount sc.senddata("star") print("Turn off the all StarPins") for pin in allstarsPin: sc.senddata(str(-pin)) time.sleep(0.06) sc.senddata("exit") while True: if stepping == 0: continuing = False break time.sleep(0.03) sc.senddata("exit") print("Automode_C end\n")
def __init__(self, device): self.ListaDeTracks = [] self.menu_items = {} GM = GestorMusica() self.ListaDeTracks = GM.get_tracks() self.Player = Audio() self.parent = device
def __init__(self, song): self.audio = Audio() self.audio.preOpen() self.detect = Detect() pygame.init() self.audio.open() self.song = LoadSong(song).song pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption('DanceDanceCV') screen = pygame.display.get_surface() self.view = View(screen, self.detect, self.song)
def process_audio(self, input_data, output_folder): audio = Audio(data=read_json(input_data), output_folder=output_folder) print("BEGIN", end="") start = time.time() audio.check_up() print(" OK - {}".format(time.time() - start)) print("\tPREPROCESADO") audio.clean_audio() print("\tAUDIO LIMPIO ", end="") sys.stdout.flush() start = time.time() audio.split_audio() print(" OK - {}".format(time.time() - start)) print("\tAUDIO SPLITTED", end="") return audio.process_output()
def __init__(self, song, speed): self.input = Input() self.resource = Resource() self.audio = Audio() self.audio.pre_open() pygame.init() self.audio.open() if song != None: self.song = loadSong(self.resource, song) else: self.song = loadSong(self.resource, "gangnam") self.clock = pygame.time.Clock() pygame.display.set_mode( (Constants.SCREEN_WIDTH, Constants.SCREEN_HEIGHT)) pygame.display.set_caption("DanceCV") screen = pygame.display.get_surface() if speed != None: self.scene = Scene(self.resource, self.song, screen, self.input, speed) else: self.scene = Scene(self.resource, self.song, screen, self.input, 2)
def __init__(self, config=None): """ Constructor. @param config: L{Config} instance for settings """ if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") tickrate = self.config.get("engine", "tickrate") Engine.__init__(self, fps=fps, tickrate=tickrate) pygame.init() self.title = _("Frets on Fire") self.restartRequested = False self.handlingException = False self.video = Video(self.title) self.audio = Audio() Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.pre_open(frequency=frequency, bits=bits, stereo=stereo, bufferSize=bufferSize) pygame.init() self.audio.open(frequency=frequency, bits=bits, stereo=stereo, bufferSize=bufferSize) Log.debug("Initializing video.") width, height = [ int(s) for s in self.config.get("video", "resolution").split("x") ] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen=fullscreen, multisamples=multisamples) # Enable the high priority timer if configured if self.config.get("engine", "highpriority"): Log.debug("Enabling high priority timer.") self.timer.highPriority = True viewport = glGetIntegerv(GL_VIEWPORT) h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.svg = SvgContext(geometry) self.svg.setRenderingQuality(self.config.get("opengl", "svgquality")) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.server = None self.sessions = [] self.mainloop = self.loading # Load game modifications Mod.init(self) theme = Config.load(self.resource.fileName("theme.ini")) Theme.open(theme) # Make sure we are using the new upload URL if self.config.get("game", "uploadurl").startswith("http://kempele.fi"): self.config.set("game", "uploadurl", "http://fretsonfire.sourceforge.net/play") self.addTask(self.audio, synchronized=False) self.addTask(self.input, synchronized=False) self.addTask(self.view) self.addTask(self.resource, synchronized=False) self.data = Data(self.resource, self.svg) self.input.addKeyListener(FullScreenSwitcher(self), priority=True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False Log.debug("Ready.")
print('Please enter gain for each range (in dB).') try: sub_low = Gain(float(input('16Hz - 60Hz gain: '))) low = Gain(float(input('60Hz - 250Hz gain: '))) mid_low = Gain(float(input('250Hz - 2kHz gain: '))) mid_high = Gain(float(input('2kHz - 4kHz gain: '))) high = Gain(float(input('4kHz - 6kHz gain: '))) bright = Gain(float(input('6kHz - 16kHz gain: '))) except AttributeError: print('ERROR: Gain must be between 6.02dB and -40dB.') exit(1) except KeyboardInterrupt: print('\nBye!') exit(0) blind_intro_path = find_Blind_intro() my_audio = Audio(blind_intro_path) filter1 = Filter(16, 60, sub_low.get_gain(), my_audio) filter2 = Filter(60, 250, low.get_gain(), my_audio) filter3 = Filter(250, 2000, mid_low.get_gain(), my_audio) filter4 = Filter(2000, 4000, mid_high.get_gain(), my_audio) filter5 = Filter(4000, 6000, high.get_gain(), my_audio) filter6 = Filter(6000, 16000, bright.get_gain(), my_audio) bank_of_filters = Filter_Bank( [filter1, filter2, filter3, filter4, filter5, filter6]) new_audio = bank_of_filters.apply_and_sum(my_audio) new_audio = Audio(wav_path=None, sampling_rate=my_audio.get_sampling_rate(), data=new_audio) new_audio.save_audio('new_Blind_intro.wav') try: dump_plots = input(
def __init__(self): self.keyMap = { "left": 0, "right": 0, "forward": 0, "cam-left": 0, "cam-right": 0 } base.win.setClearColor(Vec4(0, 0, 0, 1)) self.speed = 0 self.font_digital = loader.loadFont('font/SFDigitalReadout-Heavy.ttf') # Speedometer self.speed_img = OnscreenImage(image="models/speedometer.png", scale=.5, pos=(1.1, 0, -.95)) self.speed_img.setTransparency(TransparencyAttrib.MAlpha) OnscreenText(text="km\n/h", style=1, fg=(1, 1, 1, 1), font=self.font_digital, scale=.07, pos=(1.25, -.92)) # Display Speed self.display_speed = OnscreenText(text=str(self.speed), style=1, fg=(1, 1, 1, 1), pos=(1.3, -0.95), align=TextNode.ARight, scale=.07, font=self.font_digital) # Health Bar self.bars = {'H': 100, 'EH': 0, 'A': 0} # bk_text = "This is my Demo" # self.textObject = OnscreenText(text = bk_text, pos = (0.55,-0.05),scale = 0.07,fg=(1,0.5,0.5,1),align=TextNode.ACenter,mayChange=1) self.Health_bar = DirectWaitBar(text="", value=100, pos=(0.280, 0, 0.475), barColor=(1, 0, 0, 1), frameSize=(0, .705, .3, .35)) self.EHealth_bar = DirectWaitBar(text="", value=0, pos=(1, 0, 0.475), barColor=(0, 1, 0, 1), frameSize=(0, .23, .3, .35), range=50) self.Armour_bar = DirectWaitBar(text="", value=0, pos=(.43, 0, .593), barColor=(159, 0, 255, 1), frameSize=(0, .8, .3, .35)) # self.bar = DirectWaitBar(text = "hi", # value = 0, # range = 500, # pos = ( 0,0,0), # barColor = (0.97,0,0,1), # frameSize = (-0.3,0.3,0.5,0.8), # text_mayChange = 1, # text_shadow =(0,0,0,0.8), # text_fg = (0.9,0.9,0.9,1), # text_scale = 0.025, # text_pos = (0,0.01,0)) def getHealthStatus(): return self.bars def displayBars(): health = getHealthStatus() self.Health_bar['value'] = health['H'] self.EHealth_bar['value'] = health['EH'] self.Armour_bar['value'] = health['A'] def armourPickup(): self.bars['A'] += 25 displayBars() def healthPickup(): self.bars['EH'] += 25 displayBars() def decHealth(): self.bars['H'] -= 10 displayBars() # Post the instructions self.frame = OnscreenImage(image="models/gframe.png", pos=(0, 0, 0), scale=(1.25, 1, 1)) self.frame.setTransparency(TransparencyAttrib.MAlpha) # self.title = addTitle("Panda3D Tutorial: Roaming Ralph (Walking on the Moon)") self.inst1 = addInstructions(0.95, "[ESC]: Quit") self.inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left") self.inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right") self.inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward") self.inst6 = addInstructions(0.70, "[A]: Rotate Camera Left") self.inst7 = addInstructions(0.65, "[S]: Rotate Camera Right") # Set up the environment # self.environ = loader.loadModel("models/square") self.environ.reparentTo(render) self.environ.setPos(0, 0, 0) self.environ.setScale(100, 100, 1) self.moon_tex = loader.loadTexture("models/moon_1k_tex.jpg") self.environ.setTexture(self.moon_tex, 1) # Create the main character, Ralph self.ralph = Actor("models/ralph", { "run": "models/ralph-run", "walk": "models/ralph-walk" }) self.ralph.reparentTo(render) self.ralph.setScale(.2) self.ralph.setPos(0, 0, 0) # Create a floater object. We use the "floater" as a temporary # variable in a variety of calculations. self.floater = NodePath(PandaNode("floater")) self.floater.reparentTo(render) # Accept the control keys for movement and rotation self.accept("escape", sys.exit) self.accept("arrow_left", self.setKey, ["left", 1]) self.accept("arrow_right", self.setKey, ["right", 1]) self.accept("arrow_up", self.setKey, ["forward", 1]) self.accept("a", self.setKey, ["cam-left", 1]) self.accept("s", self.setKey, ["cam-right", 1]) self.accept("arrow_left-up", self.setKey, ["left", 0]) self.accept("arrow_right-up", self.setKey, ["right", 0]) self.accept("arrow_up-up", self.setKey, ["forward", 0]) self.accept("a-up", self.setKey, ["cam-left", 0]) self.accept("s-up", self.setKey, ["cam-right", 0]) self.accept("h", decHealth) self.accept("j", healthPickup) self.accept("k", armourPickup) taskMgr.add(self.move, "moveTask") taskMgr.doMethodLater(.1, self.show_speed, 'updateSpeed') # Game state variables self.isMoving = False # Set up the camera base.disableMouse() base.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2) # Create some lighting ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.3, .3, .3, 1)) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(-5, -5, -5)) directionalLight.setColor(Vec4(1, 1, 1, 1)) directionalLight.setSpecularColor(Vec4(1, 1, 1, 1)) render.setLight(render.attachNewNode(ambientLight)) render.setLight(render.attachNewNode(directionalLight)) self.audioManager = Audio(self) self.audioManager.startAudioManager() self.audioManager.initialiseSound(self.ralph)
row = create_example( path[dataset], hyper_params , n*(i-1) + j, ref_speech, pri_speech, sec_speech) if row is not None: batch.append(row) print(i) return batch def save_batch(dataset,data): df_path = os.path.join(path[dataset],'data_frame.csv') df = pd.read_csv(df_path) df_batch = pd.DataFrame(data = data, columns = columns) df = df.append(df_batch) df.to_csv(df_path,index=False) columns=['key','ref_speech','pri_speech','sec_speech','input_spec_path','output_spec_path','input_phase_path','output_phase_path','dvector_path'] hyper_params = HyperParams() audio = Audio(hyper_params) dataset_path = os.path.join('drive','My Drive','LibriSpeech Dataset') path = {} path['dev'] = os.path.join(dataset_path,'LibriSpeech Dev Dataset') path['test'] = os.path.join(dataset_path,'LibriSpeech Test Dataset') path['train'] = os.path.join(dataset_path ,'LibriSpeech Train Dataset') with open(os.path.join(path['dev'],'dev_speeches.data'),'rb') as f: dev_speeches = pickle.load(f) with open(os.path.join(path['test'],'test_speeches.data'),'rb') as f: test_speeches = pickle.load(f) with open(os.path.join(path['train'],'train_speeches.data'),'rb') as f: train_speeches = pickle.load(f) embedder_path = os.path.join(dataset_path,"embedder.pt") embedder_pt = torch.load(embedder_path,map_location=torch.device('cpu'))
def sound_input2(E2): audio = Audio() audio.record_voice() text = audio.shibie_voice() if text is not None: E2.insert(END, text)
#!/usr/bin/env python3 import argparse from Audio import Audio import rsa from Cryptographer import Cryptographer parser = argparse.ArgumentParser(description='Encryptor parser') parser.add_argument('-i', type=str, default=r'audio.wav') parser.add_argument('-enc', type=str, default="utf-8") with open("private_key.txt", "r") as pr: private_str = pr.read().split(" ") private = rsa.PrivateKey(int(private_str[0]), int(private_str[1]), int(private_str[2]), int(private_str[3]), int(private_str[4])) args = parser.parse_args() audio = Audio(filename=args.i) decoder = Cryptographer(audio=audio, coding=args.enc) res = decoder.decode(private) print(res.replace("*", "\n"))
elif chooseMethod == 2: AudioTranscribe.AudioTranscribe.fromAudioFile( Audio(filepath, hertz, languageCode)) elif chooseMethod == 3: AudioTranscribe.AudioTranscribe.transcribeFromSlicedAudio( configAudio=Audio(filepath, hertz, languageCode), configSlicing=ConfigSlicing(0, 60000, 60000, 500, -40)) else: print('Something went wrong, please choose [1] or [2] or for exit [q]') if input() == 'q': exit(1) else: main() pass # main() filename = 'nl-0024.wav' # AudioTranscribe.fromGoogleStorage(Audio(filename=filename, fileFormat='wav', languageCode='nl-NL'), enable_word_time=True) AudioTranscribe.GoogleSpeechToWords( Audio(filename=filename, fileFormat='wav', languageCode='nl-NL')) # AudioTranscribe.fromAudioFile(Audio('aphasiapatientW.wav', 16000, 'en-GB')) # AudioTranscribe.transcribeFromSlicedAudio(Audio('aphasiapatientW.wav', 16000, 'en-GB'))
"""Beo's famous dance""" from time import sleep from Movements import Movements from Eyes import Eyes from Audio import Audio from SensorTouch import SensorTouch SENSORTOUCH = SensorTouch() AUDIO = Audio() EYES = Eyes() MOVEMENTS = Movements() def update_touch(dancing): """Checks to see if the sensor is being touched""" SENSORTOUCH.update_status() status = SENSORTOUCH.get_status() if status == 1: if dancing: return False else: return True else: return False DANCING = False while True: DANCING = update_touch(DANCING) if DANCING:
def __init__(self, cuff_pin, bladder_pin, filename): print('Setup') self.fsm = FSM(Pin(cuff_pin), Pin(bladder_pin), Audio(filename)) self.Camera = PiVideoStream().start() self._sleep_time = 0.01 # ms
def __init__(self, config=None): Log.debug("GameEngine class init (GameEngine.py)...") self.mainMenu = None #placeholder for main menu object - to prevent reinstantiation self.currentScene = None self.versionString = version #stump: other version stuff moved to allow full version string to be retrieved without instantiating GameEngine self.uploadVersion = "%s-4.0" % Version.PROGRAM_NAME #akedrou - the version passed to the upload site. self.dataPath = Version.dataPath() Log.debug(self.versionString + " starting up...") Log.debug("Python version: " + sys.version.split(' ')[0]) Log.debug("Pygame version: " + str(pygame.version.ver)) Log.debug("PyOpenGL version: " + OpenGL.__version__) Log.debug("Numpy version: " + np.__version__) Log.debug("PIL version: " + Image.VERSION) Log.debug("sys.argv: " + repr(sys.argv)) Log.debug("os.name: " + os.name) Log.debug("sys.platform: " + sys.platform) if os.name == 'nt': import win32api Log.debug("win32api.GetVersionEx(1): " + repr(win32api.GetVersionEx(1))) elif os.name == 'posix': Log.debug("os.uname(): " + repr(os.uname())) """ Constructor. @param config: L{Config} instance for settings """ self.tutorialFolder = "tutorials" if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") self.tasks = [] self.frameTasks = [] self.fps = fps self.currentTask = None self.paused = [] self.running = True self.clock = pygame.time.Clock() self.title = self.versionString self.restartRequested = False # evilynux - Check if theme icon exists first, then fallback on FoFiX icon. themename = self.config.get("coffee", "themename") themeicon = os.path.join(Version.dataPath(), "themes", themename, "icon.png") fofixicon = os.path.join(Version.dataPath(), "fofix_icon.png") icon = None if os.path.exists(themeicon): icon = themeicon elif os.path.exists(fofixicon): icon = fofixicon self.video = Video(self.title, icon) if self.config.get("video", "disable_screensaver"): self.video.disableScreensaver() self.audio = Audio() self.frames = 0 self.fpsEstimate = 0 self.priority = self.config.get("engine", "highpriority") self.show_fps = self.config.get("video", "show_fps") self.advSettings = self.config.get("game", "adv_settings") self.restartRequired = False self.quicksetRestart = False self.quicksetPerf = self.config.get("quickset", "performance") self.scrollRate = self.config.get("game", "scroll_rate") self.scrollDelay = self.config.get("game", "scroll_delay") Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.open(frequency=frequency, bits=bits, stereo=stereo, bufferSize=bufferSize) self.cmdPlay = 0 self.cmdMode = None self.cmdDiff = None self.cmdPart = None self.gameStarted = False self.world = None self.audioSpeedFactor = 1.0 Log.debug("Initializing video.") #myfingershurt: ensuring windowed mode starts up in center of the screen instead of cascading positions: os.environ['SDL_VIDEO_WINDOW_POS'] = 'center' width, height = [ int(s) for s in self.config.get("video", "resolution").split("x") ] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen=fullscreen, multisamples=multisamples) Log.debug("OpenGL version: " + glGetString(GL_VERSION)) Log.debug("OpenGL vendor: " + glGetString(GL_VENDOR)) Log.debug("OpenGL renderer: " + glGetString(GL_RENDERER)) Log.debug("OpenGL extensions: " + ' '.join(sorted(glGetString(GL_EXTENSIONS).split()))) if self.video.default: self.config.set("video", "fullscreen", False) self.config.set("video", "resolution", "800x600") if self.config.get("video", "shader_use"): shaders.set(os.path.join(Version.dataPath(), "shaders")) # Enable the high priority timer if configured if self.priority: Log.debug("Enabling high priority timer.") self.fps = 0 # High priority # evilynux - This was generating an error on the first pass (at least under # GNU/Linux) as the Viewport was not set yet. try: viewport = glGetIntegerv(GL_VIEWPORT) except: viewport = [0, 0, width, height] h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.svg = SvgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.startupMessages = self.video.error self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.mainloop = self.loading self.menuMusic = False self.setlistMsg = None # Load game modifications Mod.init(self) self.addTask(self.input, synchronized=False) self.addTask(self.view, synchronized=False) self.addTask(self.resource, synchronized=False) self.data = Data(self.resource, self.svg) ##MFH: Animated stage folder selection option #<themename>\Stages still contains the backgrounds for when stage rotation is off, and practice.png #subfolders under Stages\ will each be treated as a separate animated stage set self.stageFolders = [] currentTheme = themename stagespath = os.path.join(Version.dataPath(), "themes", currentTheme, "backgrounds") themepath = os.path.join(Version.dataPath(), "themes", currentTheme) if os.path.exists(stagespath): self.stageFolders = [] allFolders = os.listdir( stagespath ) #this also includes all the stage files - so check to see if there is at least one .png file inside each folder to be sure it's an animated stage folder for name in allFolders: aniStageFolderListing = [] thisIsAnAnimatedStageFolder = False try: aniStageFolderListing = os.listdir( os.path.join(stagespath, name)) except Exception: thisIsAnAnimatedStageFolder = False for aniFile in aniStageFolderListing: if os.path.splitext( aniFile )[1] == ".png" or os.path.splitext( aniFile )[1] == ".jpg" or os.path.splitext( aniFile )[1] == ".jpeg": #we've found at least one .png file here, chances are this is a valid animated stage folder thisIsAnAnimatedStageFolder = True if thisIsAnAnimatedStageFolder: self.stageFolders.append(name) i = len(self.stageFolders) if i > 0: #only set default to first animated subfolder if one exists - otherwise use Normal! defaultAniStage = str(self.stageFolders[0]) else: defaultAniStage = "Normal" Log.debug("Default animated stage for " + currentTheme + " theme = " + defaultAniStage) aniStageOptions = dict([(str(self.stageFolders[n]), self.stageFolders[n]) for n in range(0, i)]) aniStageOptions.update({"Normal": _("Slideshow")}) if i > 1: #only add Random setting if more than one animated stage exists aniStageOptions.update({"Random": _("Random")}) Config.define("game", "animated_stage_folder", str, defaultAniStage, text=_("Animated Stage"), options=aniStageOptions) #MFH: here, need to track and check a new ini entry for last theme - so when theme changes we can re-default animated stage to first found lastTheme = self.config.get("game", "last_theme") if lastTheme == "" or lastTheme != currentTheme: #MFH - no last theme, and theme just changed: self.config.set("game", "animated_stage_folder", defaultAniStage) #force defaultAniStage self.config.set("game", "last_theme", currentTheme) selectedAnimatedStage = self.config.get("game", "animated_stage_folder") if selectedAnimatedStage != "Normal" and selectedAnimatedStage != "Random": if not os.path.exists( os.path.join(stagespath, selectedAnimatedStage)): Log.warn("Selected animated stage folder " + selectedAnimatedStage + " does not exist, forcing Normal.") self.config.set( "game", "animated_stage_folder", "Normal" ) #MFH: force "Standard" currently selected animated stage folder is invalid else: Config.define("game", "animated_stage_folder", str, "None", text=_("Animated Stage"), options=["None", _("None")]) Log.warn( "No stages\ folder found, forcing None setting for Animated Stage." ) self.config.set( "game", "animated_stage_folder", "None") #MFH: force "None" when Stages folder can't be found try: fp, pathname, description = imp.find_module( "CustomTheme", [themepath]) theme = imp.load_module("CustomTheme", fp, pathname, description) self.theme = theme.CustomTheme(themepath, themename) except ImportError: self.theme = Theme(themepath, themename) self.addTask(self.theme) self.input.addKeyListener(FullScreenSwitcher(self), priority=True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False self.graphicMenuShown = False Log.debug("Ready.")
from Médiathèque import Mediatheque from Auteur import Auteur from Editeur import Editeur from Article import Article from Livre import Livre from Vidéo import Vidéo from Audio import Audio from Multimédia import Multimedia lam = Mediatheque(1, "Le Bouscat") Massi = Auteur(1, "Ouldrabah") MassiEdition = Editeur(1, "MassiEdition") vid1 = Vidéo(2, "La Bourboule", True, Massi, MassiEdition) aud1 = Audio(3, "La Bourboule", True, Massi, MassiEdition) mult1 = Multimedia(4, "La Bourboule", True, Massi, MassiEdition) livre1 = Livre(5, "La Bourboule", True, Massi, MassiEdition, "03/03/2013", 3366) art1 = Article(6, "Se futon de la gueule du monde? ", True, Massi, MassiEdition, "Gorafi", 3366) print(lam) print(Massi) print(MassiEdition) print(vid1) print(aud1) print(mult1) print(livre1) print(art1)
import argparse from Audio import Audio import rsa from Cryptographer import Cryptographer from BBS import BBS parser = argparse.ArgumentParser(description='Encryptor parser') parser.add_argument('-i', type=str, default=r'source.wav') parser.add_argument('-o', type=str, default=r'audio.wav') parser.add_argument('-fmsg', type=str, default=None) parser.add_argument('-msg', type=str, default="Hello world!") parser.add_argument('-enc', type=str, default="utf-8") args = parser.parse_args() if args.fmsg is not None: with open(args.fmsg, "r") as read_text: msg = read_text.read().replace("\n", "*") else: msg = args.msg with open("public_key.txt", "r") as pr: private_str = pr.read().split(" ") public = rsa.PublicKey(int(private_str[0]), int(private_str[1])) audio_test = Audio(filename=args.i) audio = Audio(filename=args.i) encrypter = Cryptographer(audio=audio, algorithm=BBS(), coding=args.enc) encrypter.encrypt(msg, filename=args.o, publicKey=public) audio2 = Audio(filename=args.o) print("Successfully encrypted")
greeting = Greeting(name) greeting_message = greeting.message + "\n" brief_pause = "<break time=\"300ms\"/>" pause = "<break time=\"1000ms\"/>" # print(greeting_message) # Message from todoClass todo = Todo() todo_message = todo.message+ "\n" # print(todo_message) # Message from weather class weather = Weather() weather_message = weather.message + "\n" # print(weather_message) # Message from News class news = News() news_message = news.message+ "\n" # Message from Quote Class quote = Quote() quote_message = quote.message close_message = user.name + brief_pause + "is there anything I can help you with?" message = "<speak>" + greeting_message + brief_pause + todo_message + pause + weather_message + pause + news_message + brief_pause + quote_message + brief_pause + close_message + "</speak>" audio = Audio(message) playsound(audio.audio)
from Movement import MovementClass from Audio import Audio import time movement = MovementClass() audio = Audio() while 1: points = movement.getMovement() audio.playSine(movement.mouse[0] + 300) for point in points: pass # print("x:") # print(point.pt[0]) # print("y:") # print(point.pt[1]) if len(points) > 0: pass if movement.getIsClosed() == 0: break
import matplotlib.pyplot as plt import scipy.signal as sig import collections import wave import time import pyaudio import scipy.io.wavfile as sciwave import numpy as np from Audio import Audio # f = 'Music/MF DOOM feat RZA - Books of War.wav' f = 'Music/Blank Banshee - Teen Pregnancy.wav' audio = Audio(f) audio.getBeats() data0 = audio.fluxBins[:,0] hist0 = audio.fluxAvgs[:,0] #delta0 = audio.deltaFlux[:,i] beats0 = audio.beats[:,0] data1 = audio.fluxBins[:, 1] hist1 = audio.fluxAvgs[:,1] beats1 = audio.beats[:,1] data2 = audio.fluxBins[:, 2] hist2 = audio.fluxAvgs[:,2] beats2 = audio.beats[:,2] data3 = audio.fluxBins[:, 3] hist3 = audio.fluxAvgs[:, 3]
def testOpen(self): a = Audio() assert a.open() a.close()
from Audio import Audio from services.audio_service import export if __name__ == "__main__": audio_name = input('Type your audio file name: ') start_minutes, start_seconds = input( 'Type your desired start_time: ').split(':') end_minutes, end_seconds = input('Type your desired end_time: ').split(':') audio = Audio(audio_name, start_minutes, start_seconds, end_minutes, end_seconds) export(audio)