def function_save(context: Context, music: Music, outputs: Union[str, Sequencer, List[SequencerLike]]) -> Any: # The single parameter specifies if only one sequencer was given # In that case, instead of returning an array, we return just the sequencer single: bool = False if type(outputs) is str or isinstance(outputs, Sequencer): sequencers: List[Sequencer] = [ function_make_sequencer(context, outputs) ] single = True else: sequencers: List[Sequencer] = [ function_make_sequencer(context, seq) for seq in outputs ] for seq in sequencers: seq.realtime = False seq.start() for ev in music.expand(context.fork(cursor=0)): for seq in sequencers: seq.register_event(ev) for seq in sequencers: seq.close() if single: return sequencers[0] return sequencers
def function_to_mkl(context: Context, music: Music, ast: bool = False, nameless_voices: bool = True, declare_voices: bool = True, base_voice: Voice = None) -> Union[str, Node]: source: Box[str] = Box("") inp, out = Transformer.pipeline2( VoiceIdentifierTransformer(), MusiklaNotationBuilderTransformer(only_final=True, ast=ast, context=context, nameless_voices=nameless_voices, declare_voices=declare_voices, base_voice=base_voice), ) out.subscribe(lambda s: source.set(s)) for ev in music.expand(context.fork(cursor=0)): inp.add_input(ev) inp.end_input() return source.value
def function_play(context: Context, expr): value = expr.eval(context) if isinstance(value, Music): return value return Music()
def __eval__(self, context): value = context.symbols.lookup(self.name) if isinstance(value, Music): return Music(value.expand(context)) return value
def function_chord(context: Context, *notes): notes = [n.first_note() if isinstance(n, Music) else n for n in notes] chord = ChordEvent(timestamp=context.cursor, pitches=[int(n) for n in notes], name=None, duration=notes[0].duration, voice=notes[0].voice, velocity=notes[0].velocity, value=notes[0].value, tied=notes[0].tied, staff=notes[0].staff) return Music([chord])
def function_sample(context: Context, file: str, duration: float = None, len=None): event = SoundEvent(file, timestamp=context.cursor, voice=context.voice, duration=duration, value=len, velocity=context.voice.velocity) context.cursor += event.duration return Music([event])
def __eval__(self, context: Context, assignment: bool = False): if self.expression is None: return None forked = context.fork() value = self.expression.eval(forked) if isinstance(value, Music): return Music(self.get_events(context, forked, value)) else: context.join(forked) return value
def function_readmidi(context: Context, file: str = None, port: Union[str, List[str], bool] = None, voices: List[Voice] = None, cutoff_sequence=None, ignore_message_types: List[str] = None): if file is not None: mid = mido.MidiFile(file) return read_midi_file(context, mid, voices, cutoff_sequence, ignore_message_types) else: if port == True: default_port = context.library( MidiLibrary).get_midi_default_input() if default_port is None: port = mido.open_input() else: port = mido.open_input(default_port) elif type(port) == str: port = mido.open_input(port) elif type(port) == list: port = MultiPort([mido.open_input(p) for p in port]) events = midi_stream_to_music(context, port, context.voice, ignore_message_types) events = ComposeNotesTransformer.iter(events) if cutoff_sequence != None: events = TerminationMelodyTransformer.iter( events, list(cutoff_sequence.expand(context.fork(cursor=0)))) events = TeeTransformer.iter( events, Transformer.pipeline( VoiceIdentifierTransformer(), MusiklaNotationBuilderTransformer(), Transformer.subscriber(lambda n: print(n + '\n\n')))) return Music(list(events))
def __eval__(self, context): value = None stack_frame: Optional[StackFrame] = context.symbols.lookup( 'stack_frame', container='stack') iterator = iter(self.values(context)) while True: try: value = next(iterator) if self._check_stack_frame(stack_frame, False): return stack_frame.returned_value if isinstance(value, Music): return Music( self._get_events(context, stack_frame, iterator, value)) except StopIteration: break return value
def read_midi_file(context: Context, file: mido.MidiFile = None, voices: List[Voice] = None, cutoff_sequence=None, ignore_message_types: List[str] = None): tracks = [] for i, track in enumerate(file.tracks): if voices is None: msgVoice = context.voice else: msgVoice = voices[i] if msgVoice == None: continue events = midi_track_to_music(context, file, track, msgVoice, ignore_message_types) events = ComposeNotesTransformer.iter(events) tracks.append(events) return Music.parallel(tracks)
def function_slice(notes: Music, start: int, end: int): return notes.filter(lambda n: n.timestamp >= start and n.timestamp <= end)
def function_cc(context: Context, control: int, value: int): event = ControlChangeEvent(context.cursor, context.voice, control, value) return Music([event])
def __eval__(self, context): return Music(self.get_events(context))
def get_events(self, context: Context, forked: Context, value: Music): try: for event in value.expand(context): yield event finally: context.join(forked)