async def load(self): if self.is_loaded: return try: if not self.filename or os.path.getsize(self.filename) == 0: raise FileNotFoundError f = sf.SoundFile(self.filename) except FileNotFoundError: self.percent_loaded = None else: self._audio = AudioClip.zeros(f.frames, f.channels, f.samplerate) self._audio.writeable = True read_so_far = 0 blocksize = 30 * f.samplerate # load in 30-second chunks for block in f.blocks(blocksize=blocksize): self._audio.overwrite(AudioClip(block, f.samplerate), read_so_far) read_so_far += blocksize self.percent_loaded = 100 * read_so_far / f.frames await asyncio.sleep(0) f.close() self._audio.writeable = False self.percent_loaded = None self.notify_modified()
def test_stereo_audio_clip_concatenate(): clip1 = AudioClip(np.zeros((100, 2), np.float32), 48000) clip2 = AudioClip(np.zeros((300, 2), np.float32), 48000) result = AudioClip.concatenate((clip1, clip2)) assert result.frame_rate == 48000 assert len(result) == 400 assert result.channels == 2
async def main(): interface = create_io_interface("jack") await interface.init("amio-tests") interface.set_transport_rolling(True) frame_rate = interface.get_frame_rate() for i in range(30): print(f"Playspec {i + 1} out of 30...") clips = [AudioClip.zeros(100000, 2, frame_rate) for _ in range(30)] playspec = [ PlayspecEntry( clip=clip, frame_a=0, frame_b=100000, play_at_frame=0, repeat_interval=0, gain_l=1.0, gain_r=1.0, ) for clip in clips ] interface.schedule_playspec_change(playspec, 0, 0, None) await asyncio.sleep(0.3) interface.set_transport_rolling(False) await interface.close()
def create_audio_clip(self): frame_rate = self._session.frame_rate beat_length_seconds = 60 / self._session.bpm bar_length_seconds = beat_length_seconds * self._session.time_signature self._repeat_interval = int(bar_length_seconds * frame_rate) self._audio_clip = AudioClip.zeros(self._repeat_interval, 1, frame_rate) self._audio_clip.overwrite(metronome_bar_clip, 0) for i in range(1, self._session.time_signature): self._audio_clip.overwrite( metronome_beat_clip, int(i * beat_length_seconds * frame_rate))
def __init__( self, session: "manokee.session.Session", frame_rate: float, element: ET.Element = None, name: str = None, ): self._session = session self.percent_loaded = None self.frame_rate = frame_rate if element is not None: assert name is None self._name = element.attrib["name"] self._is_rec = element.attrib["rec"] != "0" self._is_mute = element.attrib["mute"] != "0" self._is_solo = element.attrib["solo"] != "0" self._rec_source = element.attrib["rec-source"] self._source = element.attrib.get("source", "internal") self._fader = Fader(float(element.attrib["vol"]), float(element.attrib["pan"])) self._beats_in_audacity_beat = int( element.attrib.get("beats-in-audacity-beat", "1")) self._audacity_project = (audacity_project.parse( element.attrib.get("audacity-project")) if self.is_audacity_project else None) self.wall_time_recorder = WallTimeRecorder([ WallTimeEntry( parse_timedelta(element.attrib["session-time"]), datetime.fromisoformat(element.attrib["start-time"]), parse_timedelta(element.attrib["duration"]), ) for element in element.findall("wall-time") ]) else: self._name = name if name is not None else "track" self._is_rec = False self._is_mute = False self._is_solo = False self._rec_source = "L" self._source = "internal" self._fader = Fader() self._beats_in_audacity_beat = 1 self._audacity_project = None self.wall_time_recorder = WallTimeRecorder() self.requires_audio_save = False if self.is_audacity_project: self._audio = self.audacity_project.as_audio_clip() self._audio.writeable = False else: self._audio = AudioClip.zeros(1, 1, self.frame_rate) self.percent_loaded = 0
def test_sine_2(): # TODO Enhance this test by adding checks for sine properties clip = AudioClip.sine(440, 1.0, 48000, 2, 48000) assert clip.frame_rate == 48000 assert clip.channels == 2 assert len(clip) == 48000
def test_resize_upwards(): clip = AudioClip(np.zeros((100, 2), np.float32), 48000) clip.resize(150) assert clip.channels == 2 assert len(clip) == 150
def test_mono_audio_clip_basic_properties_1(): clip = AudioClip(np.zeros((100, 1), np.float32), 48000) assert clip.frame_rate == 48000 assert len(clip) == 100 assert clip.channels == 1
from amio import AudioClip import manokee.session import numpy as np metronome_bar_clip = AudioClip.from_soundfile("res/metbar.flac") metronome_beat_clip = AudioClip.from_soundfile("res/metbeat.flac") class Metronome: def __init__(self, session: "manokee.session.Session"): self._session = session self._needs_clip_recreation = True self._audio_clip = None self._repeat_interval = 0 @property def needs_clip_recreation(self) -> bool: return self._needs_clip_recreation @needs_clip_recreation.setter def needs_clip_recreation(self, value: bool): if value == False: raise ValueError("Not allowed to set it externally to False") self._needs_clip_recreation = value @property def audio_clip(self) -> AudioClip: if self._needs_clip_recreation: self.create_audio_clip() return self._audio_clip
def as_clip(self) -> AudioClip: return AudioClip.concatenate(self._chunks)
def as_audio_clip(self): return AudioClip.concatenate(self.get_audio_clips())
def get_audio_clips(self): return (AudioClip.from_au_file( self.project.get_blockfile_path(block.get_filename())) for block in self.get_wave_blocks())
def as_audio_clip(self): tracks = self.get_wave_tracks() left = next(tracks).as_audio_clip() right = next(tracks).as_audio_clip() return AudioClip.stereo_clip_from_mono_clips(left, right)