Ejemplo n.º 1
0
    def to_midi(self):
        """
        Constructs a L{MIDI EventStream<midi.EventStream>} from the 
        data in this stream.
        This can then be output to a file to be played.
        
        Note that TPCNotes will be output as normal MIDI notes. We 
        can't do anything of the clever tuning stuff that we can do 
        with tonal space coordinates, since we'd need to do a further 
        step of analysis to work out the fully specified TS point from 
        the pitch class.
        
        """
        tempo = 120

        from midi import EventStream, NoteOffEvent, NoteOnEvent, SetTempoEvent
        mid = EventStream()
        mid.add_track()
        # Set the tempo first at the beginning
        temp = SetTempoEvent()
        temp.tempo = tempo
        temp.tick = 0
        mid.add_event(temp)
        # Work out how many ticks there are in a millisecond
        ticks_per_ms = float(mid.resolution) * tempo / 60000
        # Create midi events for every event in our stream
        for ev in self.events:
            if isinstance(ev, TPCNoteEvent):
                # Create note-on and note-off events
                note = ev.note

                noteon = NoteOnEvent()
                noteon.pitch = note
                noteon.tick = int(ev.start * ticks_per_ms)
                noteon.velocity = 100
                mid.add_event(noteon)

                noteoff = NoteOffEvent()
                noteoff.pitch = note
                noteoff.tick = int(ev.end * ticks_per_ms)
                noteoff.velocity = 100
                mid.add_event(noteoff)
            elif isinstance(ev, (ChordEvent, BeatEvent)):
                # These events don't affect the midi data
                continue
            else:
                raise TypeError, "event type %s not recognised by "\
                    "MIDI converter." % type(ev).__name__
        return mid
Ejemplo n.º 2
0
 def to_midi(self):
     """
     Constructs a L{MIDI EventStream<midi.EventStream>} from the 
     data in this stream.
     This can then be output to a file to be played.
     
     Note that TPCNotes will be output as normal MIDI notes. We 
     can't do anything of the clever tuning stuff that we can do 
     with tonal space coordinates, since we'd need to do a further 
     step of analysis to work out the fully specified TS point from 
     the pitch class.
     
     """
     tempo = 120
     
     from midi import EventStream, NoteOffEvent, NoteOnEvent, SetTempoEvent
     mid = EventStream()
     mid.add_track()
     # Set the tempo first at the beginning
     temp = SetTempoEvent()
     temp.tempo = tempo
     temp.tick = 0
     mid.add_event(temp)
     # Work out how many ticks there are in a millisecond
     ticks_per_ms = float(mid.resolution) * tempo / 60000
     # Create midi events for every event in our stream
     for ev in self.events:
         if isinstance(ev, TPCNoteEvent):
             # Create note-on and note-off events
             note = ev.note
             
             noteon = NoteOnEvent()
             noteon.pitch = note
             noteon.tick = int(ev.start * ticks_per_ms)
             noteon.velocity = 100
             mid.add_event(noteon)
             
             noteoff = NoteOffEvent()
             noteoff.pitch = note
             noteoff.tick = int(ev.end * ticks_per_ms)
             noteoff.velocity = 100
             mid.add_event(noteoff)
         elif isinstance(ev, (ChordEvent,BeatEvent)):
             # These events don't affect the midi data
             continue
         else:
             raise TypeError, "event type %s not recognised by "\
                 "MIDI converter." % type(ev).__name__
     return mid
Ejemplo n.º 3
0
 def from_data(data, directives):
     # Build the tone matrix straight up
     state = {
         'equal_temperament' : False,
         'double_root' : False,
         'envelope' : None,
         'origin' : 440,
         'time' : 0.0,
     }
     tone_matrix = ToneMatrix()
     
     #### Prepare a midi stream
     mid = EventStream()
     mid.add_track()
     # Add a tempo event
     tempo = SetTempoEvent()
     tempo.tempo = directives['tempo']
     tempo.tick = 0
     mid.add_event(tempo)
     
     # Each line represents a single chord or some kind of directive
     for line in data:
         first_word = line.split()[0].lower()
         if "=" in line:
             # This is an assignment
             key, __, value = line.partition("=")
             key = key.strip()
             value = value.strip()
             # Check it's valid
             if key == "equal_temperament":
                 if value not in ['off','on']:
                     raise HarmonicalInputFileReadError, \
                         "equal_temperament must be 'off' or 'on', "\
                         "not '%s'" % value
                 value = (value == 'on')
             elif key == "origin":
                 try:
                     value = int(value)
                 except ValueError:
                     # Try interpreting as a coordinate
                     try:
                         coord = _read_coord(value)
                     except HarmonicalInputFileReadError:
                         raise HarmonicalInputFileReadError, "origin "\
                             "value must be an integer or a coordinate."
                     value = _get_pitch(coord)
             elif key == "double_root":
                 if value not in ['off','on']:
                     raise HarmonicalInputFileReadError, \
                         "double_root must be 'off' or 'on', "\
                         "not '%s'" % value
                 value = (value == 'on')
             elif key == "envelope":
                 if value not in ENVELOPES:
                     raise HarmonicalInputFileReadError, "unknown "\
                         "envelope '%s'. Must be one of: %s" % \
                         (value, ", ".join(ENVELOPES.keys()))
                 value = ENVELOPES[value]()
             elif key == "program":
                 # Midi program change
                 try:
                     value = int(value)
                     if value > 127 or value < 0:
                         raise ValueError
                 except ValueError:
                     raise HarmonicalInputFileReadError, "invalid program "\
                         "change: %s. Should be an integer 0-127" % value
                 pchange = ProgramChangeEvent()
                 pchange.value = value
                 pchange.tick = int(state['time'] * mid.resolution)
                 mid.add_event(pchange)
             else:
                 raise HarmonicalInputFileReadError, "invalid "\
                     "assignment key: '%s'" % key
             # Make this assignment when we get to it in the score
             state[key] = value
         elif first_word == "rest":
             tokens = line.split()
             duration = _get_duration(tokens)
             
             # Just move the time counter on without doing anything
             state['time'] += duration
         elif first_word == "chord":
             tokens = line.lstrip("chord").split()
             duration = _get_duration(tokens)
             root = _get_root(tokens)
             volume = _get_volume(tokens)
             sec_duration = _qn_to_seconds(duration)
             
             # Must be just a chord type left
             if len(tokens) > 1:
                 raise HarmonicalInputFileReadError, "chord must "\
                     "include just a chord type"
             if len(tokens) == 0:
                 ctype = ''
             else:
                 ctype = tokens[0]
             
             # Add the chord to the tone matrix
             tone_matrix.add_tone(
                     _qn_to_seconds(state['time']), 
                     SineChordEvent(
                         _get_pitch(root), 
                         ctype,
                         duration=sec_duration, 
                         amplitude=volume,
                         equal_temperament=state['equal_temperament'],
                         double_root=state['double_root'],
                         envelope=state['envelope']
                     )
                 )
                 
             # Add the same chord to the midi file
             tick_time = int(mid.resolution * state['time'])
             tick_duration = int(duration * mid.resolution)
             # TODO: currently this will always treat C as the origin 
             #  even if you change it with directives
             events = events_for_chord(root, 
                                       ctype,
                                       tick_time,
                                       tick_duration, 
                                       velocity = int(volume*127),
                                       equal_temperament=state['equal_temperament'],
                                       double_root=state['double_root'])
             for ev in events:
                 mid.add_event(ev)
             
             # Move the timer on ready for the next chord
             state['time'] += duration
         elif first_word in ["tones", "t"]:
             # Must be a chord made up of coordinates
             tokens = line.lstrip("tones").split()
             duration = _get_duration(tokens)
             root = _get_root(tokens)
             volume = _get_volume(tokens)
             sec_duration = _qn_to_seconds(duration)
                 
             # The rest should be the list of coordinates
             coordinates = [_read_coord(token) for token in tokens]
             
             # Add the chord to the tone matrix
             tone_matrix.add_tone(
                     _qn_to_seconds(state['time']), 
                     SineClusterEvent(
                         _get_pitch(root), 
                         coordinates,
                         duration=sec_duration, 
                         amplitude=volume,
                         equal_temperament=state['equal_temperament'],
                         double_root=state['double_root'],
                         envelope=state['envelope']
                     )
                 )
                 
             
             # Add the same chord to the midi file
             tick_time = int(mid.resolution * state['time'])
             tick_duration = int(duration * mid.resolution)
             for note in coordinates:
                 # TODO: currently this will always treat C as the origin 
                 #  even if you change it with directives
                 events = tonal_space_note_events(note, 
                                                  tick_time,
                                                  tick_duration, 
                                                  velocity = int(volume*127))
                 if state['equal_temperament']:
                     # Omit tuning event (the first one)
                     events = events[1:]
                 for ev in events:
                     mid.add_event(ev)
             
             # Move the timer on ready for the next chord
             state['time'] += duration
         else:
             raise HarmonicalInputFileReadError, "could not make sense "\
                 "of the line: %s" % line
     return ChordInputFile(tone_matrix, midi_file=mid)
Ejemplo n.º 4
0
    def generate(self, overlay=None, offset=0):
        """
        Generates a midi stream.
        
        """
        octaves = 1

        if overlay is not None:
            stream = overlay
            # Use organ sound
            instrument = 23
            # Find the last channel used in the file we're overlaying
            channel = max(ev.channel for ev in stream.trackpool) + 1
            volume = 50
        else:
            stream = EventStream()
            stream.resolution = self.resolution
            # Just use piano
            instrument = 0
            channel = 0
            volume = 127

        stream.add_track()
        pc = ProgramChangeEvent()
        pc.value = instrument
        pc.tick = 0
        pc.channel = channel
        stream.add_event(pc)

        # Length of each chord in midi ticks
        chord_length = int(self.resolution * self.chord_length)

        times = [i * chord_length + offset for i in range(len(self.labels))]

        pending_note_offs = []
        for label, time in zip(self.labels, times):
            chord_root = label.root
            # Work out the notes for this chord
            triad_notes = [(chord_root + note) % (octaves*12) + 72 for \
                                        note in self.chord_vocab[label.label]]
            # Add the root in the octave two below
            triad_notes.append(chord_root + 48)

            # Add note offs for notes already on
            for noff in pending_note_offs:
                noff.tick = time - 1
                stream.add_event(noff)
            pending_note_offs = []

            if self.text_events:
                # Add a text event to represent the chord label
                tevent = LyricsEvent()
                tevent.data = "%s\n" % label
                tevent.tick = time
                stream.add_event(tevent)

            # Add a note-on and off event for each note
            for note in triad_notes:
                non = NoteOnEvent()
                non.tick = time
                non.pitch = note
                non.channel = channel
                non.velocity = volume
                stream.add_event(non)

                # Hold the note until the next chord is played
                noff = NoteOffEvent()
                noff.pitch = note
                noff.channel = channel
                noff.velocity = volume
                pending_note_offs.append(noff)

        # Add the last remaining note offs
        for noff in pending_note_offs:
            noff.tick = time + chord_length
            stream.add_event(noff)
        return stream
Ejemplo n.º 5
0
    def render(self):
        """
        Creates MIDI data from the path and chord types.
        
        @rtype: midi.EventStream
        @return: an event stream containing all the midi events
        
        """
        mid = EventStream()
        mid.add_track()

        # Set the tempo at the beginning
        tempo = SetTempoEvent()
        tempo.tempo = self.tempo
        mid.add_event(tempo)

        # Set the instrument at the beginning
        instr = ProgramChangeEvent()
        instr.value = self.instrument
        mid.add_event(instr)

        beat_length = mid.resolution
        # Work out when each root change occurs
        time = Fraction(0)
        root_times = []
        for root, length in self.path:
            root_times.append((root, time))
            time += length

        def _root_at_time(time):
            current_root = root_times[0][0]
            for root, rtime in root_times[1:]:
                # Move through root until we get the first one that
                #  occurs after the previous time
                if rtime > time:
                    return current_root
                current_root = root
            # If we're beyond the time of the last root, use that one
            return current_root

        # Add each chord
        time = Fraction(0)
        bass_events = []
        bass = self.bass_root is not None
        for chord_type, length in self.chord_types:
            tick_length = length * beat_length - 10
            tick_time = time * beat_length
            # Find out what root we're on at this time
            root = _root_at_time(time)
            # Add all the necessary events for this chord
            chord_events = events_for_chord(
                root,
                chord_type,
                int(tick_time),
                int(tick_length),
                equal_temperament=self.equal_temperament,
                root_octave=self.root_octave,
                double_root=(self.double_root or bass))
            if bass:
                # Add the bass note to the bass track
                bass_events.extend([copy.copy(ev) for ev in chord_events[-1]])
            if bass and not self.double_root:
                # Remove the doubled root that we got for the bass line
                chord_events = sum(chord_events[:-1], [])
            # Add the main chord notes to the midi track
            for ev in chord_events:
                mid.add_event(ev)
            time += length

        if bass:
            bass_channel = 1
            # Add another track to the midi file for the bass notes
            mid.add_track()
            # Select a bass instrument - picked bass
            instr = ProgramChangeEvent()
            instr.value = 33
            instr.channel = bass_channel
            mid.add_event(instr)
            # Add all the bass notes
            for ev in bass_events:
                ev.channel = bass_channel
                mid.add_event(ev)
        return mid
Ejemplo n.º 6
0
    def from_stream(stream,
                    time_unit=4,
                    tick_offset=0,
                    name=None,
                    only_notes=True,
                    truncate=None,
                    gold=None,
                    sequence_index=None):
        """
        Creates a L{SegmentedMidiInput} from a midi event stream.
        
        @type only_notes: bool
        @param only_notes: if True, only includes note-on/note-off events in 
            the segments. If False, the stream will be sliced so that each 
            segment repeats things like program change events at the beginning.
            Including only notes, however, makes the preprocessing very much 
            faster
        
        """
        # Divide the stream up into slices of the right size
        # Number of ticks in each slice
        tick_unit = int(stream.resolution * time_unit)
        if len(stream.trackpool) == 0:
            end_time = 0
        else:
            end_time = max(stream.trackpool).tick

        if only_notes:
            from midi import EventStream, NoteOnEvent, NoteOffEvent, EndOfTrackEvent
            # Only include notes in the stream
            # This is much simpler and faster than the alternative
            events = [ev for ev in list(sorted(stream.trackpool)) if \
                        type(ev) in [NoteOnEvent, NoteOffEvent]]
            events = iter(events)
            try:
                current_event = events.next()
                # Get up to the start point in the stream
                while current_event.tick < tick_offset:
                    current_event = events.next()
            except StopIteration:
                # Got to the end of the stream before we even started
                inputs = []
            else:
                inputs = []
                for chunk_start in range(tick_offset, end_time, tick_unit):
                    chunk_end = chunk_start + tick_unit
                    slc = EventStream()
                    slc.add_track()
                    slc.format = stream.format
                    slc.resolution = stream.resolution
                    slc.segment_start = chunk_start

                    # Add all the note events in this time period
                    try:
                        while current_event.tick < chunk_end:
                            slc.add_event(current_event)
                            current_event = events.next()
                        # Add the end of track event
                        eot = EndOfTrackEvent()
                        eot.tick = chunk_end
                        slc.add_event(eot)
                    except StopIteration:
                        # Reached the end of the stream
                        inputs.append(slc)
                        break

                    inputs.append(slc)
        else:
            # Use slices to do all the necessary repetition of ongoing events
            from midi.slice import EventStreamSlice
            start_times = range(tick_offset, end_time, tick_unit)
            # First slice starts at the offset value
            slices = [
                EventStreamSlice(stream, chunk_start, chunk_start + tick_unit)
                for chunk_start in start_times
            ]
            inputs = [slc.to_event_stream(repeat_playing=False, cancel_playing=False) \
                                for slc in slices]
            # Associate the start time with each segment
            for slc, start_time in zip(inputs, start_times):
                slc.segment_start = start_time

        # Remove empty segments from the start and end
        current = 0
        # There's always one event - the end of track
        while len(inputs[current].trackpool) < 2:
            current += 1
        inputs = inputs[current:]
        # And the end
        current = len(inputs) - 1
        while len(inputs[current].trackpool) < 2:
            current -= 1
        inputs = inputs[:current + 1]

        if truncate is not None:
            inputs = inputs[:truncate]

        return SegmentedMidiInput(inputs,
                                  time_unit=time_unit,
                                  tick_offset=tick_offset,
                                  name=name,
                                  stream=stream,
                                  gold=gold,
                                  sequence_index=sequence_index)
    def from_stream(
        stream, time_unit=4, tick_offset=0, name=None, only_notes=True, truncate=None, gold=None, sequence_index=None
    ):
        """
        Creates a L{SegmentedMidiInput} from a midi event stream.
        
        @type only_notes: bool
        @param only_notes: if True, only includes note-on/note-off events in 
            the segments. If False, the stream will be sliced so that each 
            segment repeats things like program change events at the beginning.
            Including only notes, however, makes the preprocessing very much 
            faster
        
        """
        # Divide the stream up into slices of the right size
        # Number of ticks in each slice
        tick_unit = int(stream.resolution * time_unit)
        if len(stream.trackpool) == 0:
            end_time = 0
        else:
            end_time = max(stream.trackpool).tick

        if only_notes:
            from midi import EventStream, NoteOnEvent, NoteOffEvent, EndOfTrackEvent

            # Only include notes in the stream
            # This is much simpler and faster than the alternative
            events = [ev for ev in list(sorted(stream.trackpool)) if type(ev) in [NoteOnEvent, NoteOffEvent]]
            events = iter(events)
            try:
                current_event = events.next()
                # Get up to the start point in the stream
                while current_event.tick < tick_offset:
                    current_event = events.next()
            except StopIteration:
                # Got to the end of the stream before we even started
                inputs = []
            else:
                inputs = []
                for chunk_start in range(tick_offset, end_time, tick_unit):
                    chunk_end = chunk_start + tick_unit
                    slc = EventStream()
                    slc.add_track()
                    slc.format = stream.format
                    slc.resolution = stream.resolution
                    slc.segment_start = chunk_start

                    # Add all the note events in this time period
                    try:
                        while current_event.tick < chunk_end:
                            slc.add_event(current_event)
                            current_event = events.next()
                        # Add the end of track event
                        eot = EndOfTrackEvent()
                        eot.tick = chunk_end
                        slc.add_event(eot)
                    except StopIteration:
                        # Reached the end of the stream
                        inputs.append(slc)
                        break

                    inputs.append(slc)
        else:
            # Use slices to do all the necessary repetition of ongoing events
            from midi.slice import EventStreamSlice

            start_times = range(tick_offset, end_time, tick_unit)
            # First slice starts at the offset value
            slices = [EventStreamSlice(stream, chunk_start, chunk_start + tick_unit) for chunk_start in start_times]
            inputs = [slc.to_event_stream(repeat_playing=False, cancel_playing=False) for slc in slices]
            # Associate the start time with each segment
            for slc, start_time in zip(inputs, start_times):
                slc.segment_start = start_time

        # Remove empty segments from the start and end
        current = 0
        # There's always one event - the end of track
        while len(inputs[current].trackpool) < 2:
            current += 1
        inputs = inputs[current:]
        # And the end
        current = len(inputs) - 1
        while len(inputs[current].trackpool) < 2:
            current -= 1
        inputs = inputs[: current + 1]

        if truncate is not None:
            inputs = inputs[:truncate]

        return SegmentedMidiInput(
            inputs,
            time_unit=time_unit,
            tick_offset=tick_offset,
            name=name,
            stream=stream,
            gold=gold,
            sequence_index=sequence_index,
        )
Ejemplo n.º 8
0
 def generate(self, overlay=None, offset=0):
     """
     Generates a midi stream.
     
     """
     octaves = 1
     
     if overlay is not None:
         stream = overlay
         # Use organ sound
         instrument = 23
         # Find the last channel used in the file we're overlaying
         channel = max(ev.channel for ev in stream.trackpool) + 1
         volume = 50
     else:
         stream = EventStream()
         stream.resolution = self.resolution
         # Just use piano
         instrument = 0
         channel = 0
         volume = 127
     
     stream.add_track()
     pc = ProgramChangeEvent()
     pc.value = instrument
     pc.tick = 0
     pc.channel = channel
     stream.add_event(pc)
     
     # Length of each chord in midi ticks
     chord_length = int(self.resolution * self.chord_length)
     
     times = [i*chord_length + offset for i in range(len(self.labels))]
     
     pending_note_offs = []
     for label,time in zip(self.labels, times):
         chord_root = label.root
         # Work out the notes for this chord
         triad_notes = [(chord_root + note) % (octaves*12) + 72 for \
                                     note in self.chord_vocab[label.label]]
         # Add the root in the octave two below
         triad_notes.append(chord_root + 48)
         
         # Add note offs for notes already on
         for noff in pending_note_offs:
             noff.tick = time-1
             stream.add_event(noff)
         pending_note_offs = []
         
         if self.text_events:
             # Add a text event to represent the chord label
             tevent = LyricsEvent()
             tevent.data = "%s\n" % label
             tevent.tick = time
             stream.add_event(tevent)
         
         # Add a note-on and off event for each note
         for note in triad_notes:
             non = NoteOnEvent()
             non.tick = time
             non.pitch = note
             non.channel = channel
             non.velocity = volume
             stream.add_event(non)
             
             # Hold the note until the next chord is played
             noff = NoteOffEvent()
             noff.pitch = note
             noff.channel = channel
             noff.velocity = volume
             pending_note_offs.append(noff)
     
     # Add the last remaining note offs
     for noff in pending_note_offs:
         noff.tick = time+chord_length
         stream.add_event(noff)
     return stream
Ejemplo n.º 9
0
 def render(self):
     """
     Creates MIDI data from the path and chord types.
     
     @rtype: midi.EventStream
     @return: an event stream containing all the midi events
     
     """
     mid = EventStream()
     mid.add_track()
     
     # Set the tempo at the beginning
     tempo = SetTempoEvent()
     tempo.tempo = self.tempo
     mid.add_event(tempo)
     
     # Set the instrument at the beginning
     instr = ProgramChangeEvent()
     instr.value = self.instrument
     mid.add_event(instr)
     
     beat_length = mid.resolution
     # Work out when each root change occurs
     time = Fraction(0)
     root_times = []
     for root,length in self.path:
         root_times.append((root,time))
         time += length
     def _root_at_time(time):
         current_root = root_times[0][0]
         for root,rtime in root_times[1:]:
             # Move through root until we get the first one that 
             #  occurs after the previous time
             if rtime > time:
                 return current_root
             current_root = root
         # If we're beyond the time of the last root, use that one
         return current_root
     
     # Add each chord
     time = Fraction(0)
     bass_events = []
     bass = self.bass_root is not None
     for chord_type,length in self.chord_types:
         tick_length = length * beat_length - 10
         tick_time = time * beat_length
         # Find out what root we're on at this time
         root = _root_at_time(time)
         # Add all the necessary events for this chord
         chord_events = events_for_chord(root, chord_type, int(tick_time), 
                             int(tick_length), 
                             equal_temperament=self.equal_temperament,
                             root_octave=self.root_octave, 
                             double_root=(self.double_root or bass))
         if bass:
             # Add the bass note to the bass track
             bass_events.extend([copy.copy(ev) for ev in chord_events[-1]])
         if bass and not self.double_root:
             # Remove the doubled root that we got for the bass line
             chord_events = sum(chord_events[:-1], [])
         # Add the main chord notes to the midi track
         for ev in chord_events:
             mid.add_event(ev)
         time += length
     
     if bass:
         bass_channel = 1
         # Add another track to the midi file for the bass notes
         mid.add_track()
         # Select a bass instrument - picked bass
         instr = ProgramChangeEvent()
         instr.value = 33
         instr.channel = bass_channel
         mid.add_event(instr)
         # Add all the bass notes
         for ev in bass_events:
             ev.channel = bass_channel
             mid.add_event(ev)
     return mid
Ejemplo n.º 10
0
    def generate(self, overlay=None, offset=0):
        """
        Generates a midi stream.
        
        """
        octaves = 1

        if overlay is not None:
            stream = overlay
            # Use organ sound
            instrument = 23
            # Find the last channel used in the file we're overlaying
            channel = max(ev.channel for ev in stream.trackpool) + 1
            volume = 30
        else:
            stream = EventStream()
            stream.resolution = self.resolution
            # Just use piano
            instrument = 0
            channel = 0
            volume = 127
        stream.add_track()
        pc = ProgramChangeEvent()
        pc.value = instrument
        pc.tick = 0
        pc.channel = channel
        stream.add_event(pc)
        # Length of each chord in midi ticks
        chord_length = int(self.resolution * self.chord_length)

        if self.times is None:
            times = [
                i * chord_length + offset for i in range(len(self.labels))
            ]
        else:
            times = [t + offset for t in self.times]

        formatter = getattr(self, 'formatter')

        pending_note_offs = []
        for (tonic, mode, chord), time in zip(self.labels, times):
            scale_chord_root = constants.CHORD_NOTES[mode][chord][0]
            chord_root = (tonic + scale_chord_root) % 12
            triad_type = constants.SCALE_TRIADS[mode][chord]
            # Work out the notes for this chord
            triad_notes = [(chord_root + note) % (octaves * 12) + 72
                           for note in constants.TRIAD_NOTES[triad_type]]
            # Add the root in the octave two below
            triad_notes.append(chord_root + 48)

            # Add note offs for notes already on
            for noff in pending_note_offs:
                noff.tick = time - 1
                stream.add_event(noff)
            pending_note_offs = []

            if self.text_events:
                # Add a text event to represent the chord label
                tevent = LyricsEvent()
                label = formatter((tonic, mode, chord))
                tevent.data = "%s\n" % label
                tevent.tick = time
                stream.add_event(tevent)

            # Add a note-on and off event for each note
            for note in triad_notes:
                non = NoteOnEvent()
                non.tick = time
                non.pitch = note
                non.channel = channel
                non.velocity = volume
                stream.add_event(non)

                # Hold the note until the next chord is played
                noff = NoteOffEvent()
                noff.pitch = note
                noff.channel = channel
                noff.velocity = volume
                pending_note_offs.append(noff)

        # Add the last remaining note offs
        for noff in pending_note_offs:
            noff.tick = time + chord_length
            stream.add_event(noff)
        return stream
Ejemplo n.º 11
0
 def generate(self, overlay=None, offset=0):
     """
     Generates a midi stream.
     
     """
     octaves = 1
     
     if overlay is not None:
         stream = overlay
         # Use organ sound
         instrument = 23
         # Find the last channel used in the file we're overlaying
         channel = max(ev.channel for ev in stream.trackpool) + 1
         volume = 30
     else:
         stream = EventStream()
         stream.resolution = self.resolution
         # Just use piano
         instrument = 0
         channel = 0
         volume = 127
     stream.add_track()
     pc = ProgramChangeEvent()
     pc.value = instrument
     pc.tick = 0
     pc.channel = channel
     stream.add_event(pc)
     # Length of each chord in midi ticks
     chord_length = int(self.resolution * self.chord_length)
     
     if self.times is None:
         times = [i*chord_length + offset for i in range(len(self.labels))]
     else:
         times = [t+offset for t in self.times]
     
     formatter = getattr(self, 'formatter')
     
     pending_note_offs = []
     for (tonic,mode,chord),time in zip(self.labels, times):
         scale_chord_root = constants.CHORD_NOTES[mode][chord][0]
         chord_root = (tonic+scale_chord_root) % 12
         triad_type = constants.SCALE_TRIADS[mode][chord]
         # Work out the notes for this chord
         triad_notes = [(chord_root + note) % (octaves*12) + 72 for note in constants.TRIAD_NOTES[triad_type]]
         # Add the root in the octave two below
         triad_notes.append(chord_root + 48)
         
         # Add note offs for notes already on
         for noff in pending_note_offs:
             noff.tick = time-1
             stream.add_event(noff)
         pending_note_offs = []
         
         if self.text_events:
             # Add a text event to represent the chord label
             tevent = LyricsEvent()
             label = formatter((tonic,mode,chord))
             tevent.data = "%s\n" % label
             tevent.tick = time
             stream.add_event(tevent)
         
         # Add a note-on and off event for each note
         for note in triad_notes:
             non = NoteOnEvent()
             non.tick = time
             non.pitch = note
             non.channel = channel
             non.velocity = volume
             stream.add_event(non)
             
             # Hold the note until the next chord is played
             noff = NoteOffEvent()
             noff.pitch = note
             noff.channel = channel
             noff.velocity = volume
             pending_note_offs.append(noff)
     
     # Add the last remaining note offs
     for noff in pending_note_offs:
         noff.tick = time+chord_length
         stream.add_event(noff)
     return stream
Ejemplo n.º 12
0
def simplify(stream,
             remove_drums=False,
             remove_pc=False,
             remove_all_text=False,
             one_track=False,
             remove_tempo=False,
             remove_control=False,
             one_channel=False,
             remove_misc_control=False,
             real_note_offs=False,
             remove_duplicates=False):
    """
    Filters a midi L{midi.EventStream} to simplify it. This is useful 
    as a preprocessing step before taking midi input to an algorithm, 
    for example, to make it clearer what the algorithm is using.
    
    Use kwargs to determine what filters will be applied. Without any 
    kwargs, the stream will just be left as it was.
    
    Returns a filtered copy of the stream.
    
    @type remove_drums: bool
    @param remove_drums: filter out all channel 10 events
    @type remove_pc: bool
    @param remove_pc: filter out all program change events
    @type remove_all_text: bool
    @param remove_all_text: filter out any text events. This includes 
        copyright, text, track name, lyrics.
    @type one_track: bool
    @param one_track: reduce everything to just one track
    @type remove_tempo: bool
    @param remove_tempo: filter out all tempo events
    @type remove_control: bool
    @param remove_control: filter out all control change events
    @type one_channel: bool
    @param one_channel: use only one channel: set the channel of 
        every event to 0
    @type remove_misc_control: bool
    @param remove_misc_control: filters a miscellany of device 
        control events: aftertouch, channel aftertouch, pitch wheel, 
        sysex, port
    @type real_note_offs: bool
    @param real_note_offs: replace 0-velocity note-ons with actual 
        note-offs. Some midi files use one, some the other
    
    """
    from midi import EventStream, TextEvent, ProgramChangeEvent, \
        CopyrightEvent, TrackNameEvent, \
        SetTempoEvent, ControlChangeEvent, AfterTouchEvent, \
        ChannelAfterTouchEvent, PitchWheelEvent, SysExEvent, \
        LyricsEvent, PortEvent, CuePointEvent, MarkerEvent, EndOfTrackEvent
    import copy

    # Empty stream to which we'll add the events we don't filter
    new_stream = EventStream()
    new_stream.resolution = stream.resolution
    new_stream.format = stream.format

    # Work out when the first note starts in the input stream
    input_start = first_note_tick(stream)

    # Filter track by track
    for track in stream:
        track_events = []
        for ev in sorted(track):
            # Don't add EOTs - they get added automatically
            if type(ev) == EndOfTrackEvent:
                continue
            ev = copy.deepcopy(ev)
            # Each filter may modify the event or continue to filter it altogether

            if remove_drums:
                # Filter out any channel 10 events, which is typically
                #  reserved for drums
                if ev.channel == 9 and \
                        type(ev) in (NoteOnEvent, NoteOffEvent):
                    continue
            if remove_pc:
                # Filter out any program change events
                if type(ev) == ProgramChangeEvent:
                    continue
            if remove_all_text:
                # Filter out any types of text event
                if type(ev) in (TextEvent, CopyrightEvent, TrackNameEvent,
                                LyricsEvent, CuePointEvent, MarkerEvent):
                    continue
            if remove_tempo:
                # Filter out any tempo events
                if type(ev) == SetTempoEvent:
                    continue
            if remove_control:
                # Filter out any control change events
                if type(ev) == ControlChangeEvent:
                    continue
            if remove_misc_control:
                # Filter out various types of control events
                if type(ev) in (AfterTouchEvent, ChannelAfterTouchEvent,
                                ChannelAfterTouchEvent, PitchWheelEvent,
                                SysExEvent, PortEvent):
                    continue
            if real_note_offs:
                # Replace 0-velocity note-ons with note-offs
                if type(ev) == NoteOnEvent and ev.velocity == 0:
                    new_ev = NoteOffEvent()
                    new_ev.pitch = ev.pitch
                    new_ev.channel = ev.channel
                    new_ev.tick = ev.tick
                    ev = new_ev
            if one_channel:
                ev.channel = 0

            track_events.append(ev)

        # If there are events left in the track, add them all as a new track
        if len(track_events) > 1:
            if not one_track or len(new_stream.tracklist) == 0:
                new_stream.add_track()
            for ev in track_events:
                new_stream.add_event(ev)
            track_events = []

    for track in stream:
        track.sort()

    # Work out when the first note happens now
    result_start = first_note_tick(new_stream)
    # Move all events after and including this sooner so the music
    #  starts at the same point it did before
    shift = result_start - input_start
    before_start = max(input_start - 1, 0)
    if shift > 0:
        for ev in new_stream.trackpool:
            if ev.tick >= result_start:
                ev.tick -= shift
            elif ev.tick < result_start and ev.tick >= input_start:
                # This event happened in a region that no longer contains notes
                # Move it back to before what's now the first note
                ev.tick = before_start

    new_stream.trackpool.sort()

    if remove_duplicates:
        # Get rid of now duplicate events
        remove_duplicate_notes(new_stream, replay=True)

    return new_stream
Ejemplo n.º 13
0
def simplify(stream, remove_drums=False, remove_pc=False, 
        remove_all_text=False, one_track=False, remove_tempo=False,
        remove_control=False, one_channel=False, 
        remove_misc_control=False, real_note_offs=False, remove_duplicates=False):
    """
    Filters a midi L{midi.EventStream} to simplify it. This is useful 
    as a preprocessing step before taking midi input to an algorithm, 
    for example, to make it clearer what the algorithm is using.
    
    Use kwargs to determine what filters will be applied. Without any 
    kwargs, the stream will just be left as it was.
    
    Returns a filtered copy of the stream.
    
    @type remove_drums: bool
    @param remove_drums: filter out all channel 10 events
    @type remove_pc: bool
    @param remove_pc: filter out all program change events
    @type remove_all_text: bool
    @param remove_all_text: filter out any text events. This includes 
        copyright, text, track name, lyrics.
    @type one_track: bool
    @param one_track: reduce everything to just one track
    @type remove_tempo: bool
    @param remove_tempo: filter out all tempo events
    @type remove_control: bool
    @param remove_control: filter out all control change events
    @type one_channel: bool
    @param one_channel: use only one channel: set the channel of 
        every event to 0
    @type remove_misc_control: bool
    @param remove_misc_control: filters a miscellany of device 
        control events: aftertouch, channel aftertouch, pitch wheel, 
        sysex, port
    @type real_note_offs: bool
    @param real_note_offs: replace 0-velocity note-ons with actual 
        note-offs. Some midi files use one, some the other
    
    """
    from midi import EventStream, TextEvent, ProgramChangeEvent, \
        CopyrightEvent, TrackNameEvent, \
        SetTempoEvent, ControlChangeEvent, AfterTouchEvent, \
        ChannelAfterTouchEvent, PitchWheelEvent, SysExEvent, \
        LyricsEvent, PortEvent, CuePointEvent, MarkerEvent, EndOfTrackEvent
    import copy
    
    # Empty stream to which we'll add the events we don't filter
    new_stream = EventStream()
    new_stream.resolution = stream.resolution
    new_stream.format = stream.format
    
    # Work out when the first note starts in the input stream
    input_start = first_note_tick(stream)
    
    # Filter track by track
    for track in stream:
        track_events = []
        for ev in sorted(track):
            # Don't add EOTs - they get added automatically
            if type(ev) == EndOfTrackEvent:
                continue
            ev = copy.deepcopy(ev)
            # Each filter may modify the event or continue to filter it altogether
            
            if remove_drums:
                # Filter out any channel 10 events, which is typically 
                #  reserved for drums
                if ev.channel == 9 and \
                        type(ev) in (NoteOnEvent, NoteOffEvent):
                    continue
            if remove_pc:
                # Filter out any program change events
                if type(ev) == ProgramChangeEvent:
                    continue
            if remove_all_text:
                # Filter out any types of text event
                if type(ev) in (TextEvent, CopyrightEvent, TrackNameEvent,
                        LyricsEvent, CuePointEvent, MarkerEvent):
                    continue
            if remove_tempo:
                # Filter out any tempo events
                if type(ev) == SetTempoEvent:
                    continue
            if remove_control:
                # Filter out any control change events
                if type(ev) == ControlChangeEvent:
                    continue
            if remove_misc_control:
                # Filter out various types of control events
                if type(ev) in (AfterTouchEvent, ChannelAfterTouchEvent, 
                        ChannelAfterTouchEvent, PitchWheelEvent, 
                        SysExEvent, PortEvent):
                    continue
            if real_note_offs:
                # Replace 0-velocity note-ons with note-offs
                if type(ev) == NoteOnEvent and ev.velocity == 0:
                    new_ev = NoteOffEvent()
                    new_ev.pitch = ev.pitch
                    new_ev.channel = ev.channel
                    new_ev.tick = ev.tick
                    ev = new_ev
            if one_channel:
                ev.channel = 0
            
            track_events.append(ev)
        
        # If there are events left in the track, add them all as a new track
        if len(track_events) > 1:
            if not one_track or len(new_stream.tracklist) == 0:
                new_stream.add_track()
            for ev in track_events:
                new_stream.add_event(ev)
            track_events = []
    
    for track in stream:
        track.sort()
    
    # Work out when the first note happens now
    result_start = first_note_tick(new_stream)
    # Move all events after and including this sooner so the music 
    #  starts at the same point it did before
    shift = result_start - input_start
    before_start = max(input_start-1, 0)
    if shift > 0:
        for ev in new_stream.trackpool:
            if ev.tick >= result_start:
                ev.tick -= shift
            elif ev.tick < result_start and ev.tick >= input_start:
                # This event happened in a region that no longer contains notes
                # Move it back to before what's now the first note
                ev.tick = before_start
    
    new_stream.trackpool.sort()
    
    if remove_duplicates:
        # Get rid of now duplicate events
        remove_duplicate_notes(new_stream, replay=True)
    
    return new_stream