def stopRecording(self): time.sleep(.25) self.record_end = len(self.frames)-1 # save index of where we stopped recording frames_to_save = list(self.frames[self.record_start:self.record_end]) normalized_frames_to_save = Audio_Utils.getNormalizedAudioFrames(frames_to_save, Audio_Utils.DEFAULT_DBFS) normalized_frames_to_save = Audio_Utils.getFramesWithoutStartingSilence(normalized_frames_to_save) print "recorded", '%.2f' % Audio_Utils.framesToSeconds(len(frames_to_save)), "seconds of audio" self.record_start = None self.record_end = None self.saved_frames = normalized_frames_to_save
def _listen(self): ''' Listens in an endless loop, updating frames as it listens. Spawn a thread to call this method or it will block you permanently. :return: None ''' stream = StreamBuilder().getInputStream(StreamBuilder.STEREO_MIX_INDEX) while True: read_result = stream.read(1024) # every 60 seconds, reset the size of our frame array UNLESS we are currently recording something (record_start gets set to a number if we are) if len(self.frames) > Audio_Utils.secondsToFrames(60) and self.record_start is None: print "removing all but last 10 seconds of frames. Frame size went from " + str(len(self.frames)) \ + " to " + str(len(self.frames[-Audio_Utils.secondsToFrames(10):])) self.frames = self.frames[-Audio_Utils.secondsToFrames(10):] self.frames.append(read_result)
def _writeFrameToStreams(self, frame): self.stream_in_use = True if self.time_stretch_enabled: self._handleTimeStretchWriteFrameToStreams(frame) elif self.reverse_mode: self.current_sharedStream.playFrame( Audio_Utils.getReversedFrame(frame)) else: self.current_sharedStream.playFrame(frame) self.stream_in_use = False
def _handleTimeStretchWriteFrameToStreams(self, frame): atomic_audio_units = Audio_Utils.unpackFrameIntoAtomicAudioUnits(frame) atomic_audio_units = [ x for i, x in enumerate(atomic_audio_units) if i % (1 - self.pitch_modifier) * 10 != 0 ] print "atomic audio units size is " + str( len(atomic_audio_units)) + " and modulo is " + str( (1 - self.pitch_modifier) * 10) self.time_stretched_audio_unit_buffer.extend(atomic_audio_units) # if buffer exactly full after adding stretched frame contents, play it and empty it if len(self.time_stretched_audio_unit_buffer ) == self.AUDIO_UNITS_PER_FRAME: self.current_sharedStream.playFrame( Audio_Utils.buildFrameFromAtomicAudioUnits( self.time_stretched_audio_unit_buffer)) print str.format("exactly filled buffer {}", len(self.time_stretched_audio_unit_buffer)) self.time_stretched_audio_unit_buffer = [] # if buffer not yet full after adding stretched frame contents, do nothing elif len(self.time_stretched_audio_unit_buffer ) < self.AUDIO_UNITS_PER_FRAME: print str.format("buffer not yet full. only size {}", len(self.time_stretched_audio_unit_buffer)) # if buffer overly-full after adding stretched frame contents, play the first non-overfilled part and save the overfilled part as the new buffer elif len(self.time_stretched_audio_unit_buffer ) > self.AUDIO_UNITS_PER_FRAME: first_audio_units = self.time_stretched_audio_unit_buffer[ 0:self.AUDIO_UNITS_PER_FRAME] remaining_audio_units = self.time_stretched_audio_unit_buffer[ self.AUDIO_UNITS_PER_FRAME:] self.current_sharedStream.playFrame( Audio_Utils.buildFrameFromAtomicAudioUnits(first_audio_units)) print str.format("filled buffer {} and remaining part {}", len(first_audio_units), len(remaining_audio_units)) self.time_stretched_audio_unit_buffer = remaining_audio_units
def listen(): global frames, record_start stream = pyaudio.PyAudio().open( format=pyaudio.paInt16, channels=2, rate=44100, input=True, frames_per_buffer=1024, input_device_index=Audio_Utils.getIndexOfStereoMix()) while True: read_result = stream.read(1024) if len(frames) > secondsToFrames( 60 ) and record_start is None: # every 60 seconds, reset the size of our frame array UNLESS we are currently recording something (record_start gets set to a number if we are) frames = frames[-secondsToFrames(10):] frames.append(read_result)
def _getFramesFromRecordingName(self, name): if name not in self.recording_names_to_frames: path = os.path.join(self.recordings_folder, name) self.recording_names_to_frames[name] = Audio_Utils.getFramesFromFile(path) return self.recording_names_to_frames[name]
def getLengthOfSoundInSeconds(self): return Audio_Utils.framesToSeconds(len(self.frames))
def reloadFramesFromFile(self): self.frames = Audio_Utils.getFramesFromFile(self.path_to_sound) self.frames = Audio_Utils.getFramesWithoutStartingSilence(self.frames) self.frames = Audio_Utils.getNormalizedAudioFrames( self.frames, Audio_Utils.DEFAULT_DBFS)
def moveMarkedFrameIndex(self, move_amount): self.marked_frame_index = max( 0, self.marked_frame_index + Audio_Utils.secondsToFrames(move_amount) ) # shift back in frames by .2 seconds. used max() with 0 to not get out of bounds error
def play(self): if self.frames is None: raise ValueError( "Error: cannot play sound self.frames == None. The sound file was most likely deleted. sound = " + self.path_to_sound) if self.current_sharedStream is None: self.current_sharedStream, self.shared_steam_index = SharedStreamManager.getUnusedStreamAndIndex( ) if self.frames is None and os.path.exists(self.path_to_sound): self.reloadFramesFromFile() self.is_playing = True self.continue_playing = True if self.reset_frame_index_on_play: self.frame_index = 0 else: self.frame_index = self.marked_frame_index self.reset_frame_index_on_play = True # auto switch back after one play while self.frame_index < len(self.frames) and self.continue_playing: if self.jump_to_marked_frame_index: self.frame_index = self.marked_frame_index self.jump_to_marked_frame_index = False elif self.jump_to_secondary_frame_index: self.frame_index = self.secondary_marked_frame_index self.jump_to_secondary_frame_index = False elif self.undo_marked_frame_jump: self.frame_index = self.index_before_jump + ( self.frame_index - self.marked_frame_index) self.undo_marked_frame_jump = False current_frame = self.frames[min(self.frame_index, len(self.frames) - 1)] if self.oscillation_frames_remaining > 0: next(self.oscillation_generator) # do slow motion stuff here if self.slow_motion_started: if self.slow_motion_frames_left > 0: self.slow_motion_frames_left -= 1 #if frame_index % self.slow_motion_frame_skip_rate == 0: self.pitch_modifier -= self.slow_motion_slow_rate else: self.slow_motion_started = False elif self.speed_up_started: if self.speed_up_frames_left > 0: self.speed_up_frames_left -= 1 #if frame_index % self.slow_motion_frame_skip_rate == 0: self.pitch_modifier += self.slow_motion_slow_rate else: self.speed_up_started = False # round the pitch modifier to 0 if its close enough, I want zero comparison to work if -0.0001 < float(self.pitch_modifier) < 0.0001: self.pitch_modifier = 0 if self.pitch_modifier != 0: current_frame = Audio_Utils.getPitchShiftedFrame( current_frame, self.pitch_modifier) # if self.frame_index % 100 == 0: # print "current pitch is ", self.pitch_modifier # print "oscillation shift is", self.oscillate_shift, "frames between oscillation shifts", self.frames_between_oscillate_shifts, "half_oscillation_cycles_remaining", self.half_oscillation_cycles_remaining self._writeFrameToStreams(current_frame) if self.reverse_mode: self.frame_index = max(0, self.frame_index - 1) # prevent out of bounds if self.frame_index == 0: self.reverse_mode = False self.stop() self.reset_frame_index_on_play = False else: self.frame_index += 1 self.is_playing = False SharedStreamManager.releaseStreamAtIndex(self.shared_steam_index) self.current_sharedStream = None
def OnKeyboardEvent(event): global frames, cached_frames, extended_cache, keys_down, record_start, record_end updateKeysDown(event) if len(keys_down) == 2: if keys_down[0] in "qwer" and str(keys_down[1]) in "123456789": letter = keys_down[0] number = str(float(keys_down[1]) / 2) Audio_Utils.swapAudioFileOutForExtendedVersion( letter + ".wav", number) elif keys_down[0] == "lmenu" and keys_down[1] in "qwer": time.sleep(.25) print "SAVED " + keys_down[1] cached_frames = frames[-secondsToFrames(1):] extended_cache = frames[-secondsToFrames(5):] Audio_Utils.writeFramesToFile(cached_frames, keys_down[1] + ".wav") for i in range(1, 10): half_i = float(i) / 2 Audio_Utils.writeFramesToFile( extended_cache[-secondsToFrames(half_i):], "Extended_Audio" + "/" + keys_down[1] + "-" + str(half_i) + ".wav") elif keys_down[0] in "qwerx" and keys_down[ 1] == "oem_3": # oem_3 is tilde Audio_Utils.copyFileToBackupFolder(keys_down[0] + ".wav", "Favorites") elif keys_down[0] in "qwerx": # directional keys letter_file = keys_down[0] + ".wav" if keys_down[1] == "left": Audio_Utils.trimEnd(letter_file, letter_file, 250) elif keys_down[1] == "right": Audio_Utils.trimStart(letter_file, letter_file, 250) elif keys_down[1] == "up": Audio_Utils.trimStart(letter_file, letter_file, 50) elif keys_down[1] == "down": Audio_Utils.trimEnd(letter_file, letter_file, 50) elif keys_down[0] == "lmenu" and keys_down[1] == "pause": sys.exit() elif keys_down[0] == "lmenu" and keys_down[1] == "x": if record_start is None: # if we aren't already recording record_start = len( frames) - 1 # save index of current frame else: # if we are already recording time.sleep(.25) record_end = len( frames) - 1 # save index of where we stopped recording Audio_Utils.writeFramesToFile( frames[record_start:record_end], "x.wav") record_start = None record_end = None return True