Esempio n. 1
0
    def test_book_example(self):

        score = Score()

        # set up 3 instrument voices: 2 violins, 1 trumpet, 1 clarinet
        catalogue = InstrumentCatalog.instance()
        score.add_instrument_voice(
            InstrumentVoice(catalogue.get_instrument("violin"), 2))
        score.add_instrument_voice(
            InstrumentVoice(catalogue.get_instrument("trumpet")))
        score.add_instrument_voice(
            InstrumentVoice(catalogue.get_instrument("clarinet")))

        #  1 beat == 1 sec, 3/4 TS + 60 beats per minute,
        score.tempo_sequence.add(TempoEvent(Tempo(60), Position(0)))
        score.time_signature_sequence.add(
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4)), Position(0)))

        # set up some notes in the two violins
        violin_voice = score.get_instrument_voice("violin")[0]
        violin_voice.voice(0).pin(
            Line([Note(DiatonicPitch(4, y), Duration(1, 8))
                  for y in 'afdecd']))
        violin_voice.voice(0).pin(
            Line([Note(DiatonicPitch(4, y), Duration(1, 4)) for y in 'cdc']))
Esempio n. 2
0
    def test_book_plf(self):
        array = [(0, 'A:4'), (Fraction(1, 2), 'C:5'), (Position(3, 4), 'e:4'),
                 (1, 'C:5')]
        f = PiecewiseLinearPitchFunction(array)

        assert 'A:4' == str(f.eval_as_nearest_pitch(0))
        assert 'C:5' == str(f.eval_as_nearest_pitch(0.5))
        assert 'E:4' == str(f.eval_as_nearest_pitch(0.75))
        assert 'C:5' == str(f.eval_as_nearest_pitch(1))

        # Recall f is also an instnace of PiecewiseLinearFunction, so eval should work
        assert DiatonicPitch.parse('A:4').chromatic_distance == f.eval(0)
        assert DiatonicPitch.parse('C:5').chromatic_distance == f.eval(0.5)
        assert DiatonicPitch.parse('E:4').chromatic_distance == f.eval(
            Fraction(3, 4))
        assert DiatonicPitch.parse('C:5').chromatic_distance == f.eval(
            Position(1))

        # 0.25 has two tones that are considered close between A:4 and C:5
        print('[{0}]'.format(','.join(str(p) for p in f.eval_as_pitch(0.25))))

        # 0.625 between C:5 and E:4 comes to G#:4
        print('[{0}]'.format(','.join(
            str(p) for p in f.eval_as_pitch(Fraction(5, 8)))))

        print(DiatonicPitch.parse('A:4').chromatic_distance)
        print(DiatonicPitch.parse('C:5').chromatic_distance)
        print(DiatonicPitch.parse('E:4').chromatic_distance)
        print(DiatonicPitch.parse('C:5').chromatic_distance)
Esempio n. 3
0
    def test_adding_notes(self):
        score = Score()

        # set up 3 instrument voices: 2 violins, 1 trumpet, 1 clarinet
        catalogue = InstrumentCatalog.instance()
        score.add_instrument_voice(
            InstrumentVoice(catalogue.get_instrument("violin"), 2))

        #  1 beat == 1 sec, 3/4 TS + 60 beats per minute,
        score.tempo_sequence.add(TempoEvent(Tempo(60), Position(0)))
        score.time_signature_sequence.add(
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4)), Position(0)))

        violin_voice = score.get_instrument_voice("violin")[0]

        line = Line([Note(DiatonicPitch(4, y), Duration(1, 8)) for y in 'afd'])
        violin_voice.voice(0).pin(line)

        notes = violin_voice.get_all_notes()
        for n in notes:
            print(n)

        line.append(Note(DiatonicPitch(4, 'g'), Duration(1, 8)))
        notes = violin_voice.get_all_notes()
        for n in notes:
            print(n)
Esempio n. 4
0
    def get_time_in_ms(self):
        event_list = self.score.tempo_sequence.sequence_list
        score_len = self.score.length()

        fine_tempo_sequence = TempoEventSequence()

        for event in event_list:
            if isinstance(event, TempoEvent):
                fine_tempo_sequence.add(TempoEvent(event.object, event.time))
            elif isinstance(event, TempoFunctionEvent):
                t1 = event.time
                beat_duration = event.beat_duration if event.beat_duration is None else \
                    ScoreToVstMidiConverter.DEFAULT_BEAT_DURATION
                next_event = self.score.tempo_sequence.successor(event)
                t2 = next_event.time if next_event is not None else Position(
                    score_len.duration)
                while t1 < t2:
                    tempo = int(
                        event.tempo(
                            t1, next_event.time if next_event is not None else
                            Position(score_len)))
                    delta_wnt = (tempo * ScoreToVstMidiConverter.TEMPO_EVENT_DURATION_MS * beat_duration.duration) / \
                                (60.0 * 1000.0)

                    fine_tempo_sequence.add(
                        TempoEvent(Tempo(tempo, beat_duration), t1))

                    t1 += delta_wnt

        conversion = TimeConversion(fine_tempo_sequence,
                                    self.score.time_signature_sequence,
                                    Position(score_len))
        actual_time = conversion.position_to_actual_time(
            Position(self.score.duration))
        return actual_time
Esempio n. 5
0
    def test_score_book_example(self):
        score = Score()

        catalogue = InstrumentCatalog.instance()
        score.add_instrument_voice(
            InstrumentVoice(catalogue.get_instrument("violin")))

        score.tempo_sequence.add(TempoEvent(Tempo(60), Position(0)))
        score.time_signature_sequence.add(
            TimeSignatureEvent(TimeSignature(4, Duration(1, 4)), Position(0)))

        violin_voice = score.get_instrument_voice("violin")[0]
        note_1 = Note(DiatonicPitch(4, 'A'), Duration(1, 4))
        note_2 = Note(DiatonicPitch(5, 'C'), Duration(1, 8))
        note_3 = Note(DiatonicPitch(5, 'B'), Duration(1, 8))
        note_4 = Note(DiatonicPitch(5, 'D'), Duration(1, 4))
        note_5 = Note(DiatonicPitch(5, 'E'), Duration(1, 8))
        note_6 = Note(DiatonicPitch(5, 'D'), Duration(1, 8))
        note_7 = Note(DiatonicPitch(4, 'G'), Duration(1, 4))
        note_8 = Note(DiatonicPitch(4, 'C'), Duration(1, 4))
        line = Line(
            [note_1, note_2, note_3, note_4, note_5, note_6, note_7, note_8])
        violin_voice.voice(0).pin(line)

        smc = ScoreToMidiConverter(score)
        smc.create('book_example_midi_file.mid', True)

        ScoreToMidiConverter.convert_line(line,
                                          'line_example_midi_file.mid',
                                          Tempo(90, Duration(1, 8)),
                                          instrument_name='violin')
def basic_search_example():
    print('----- test simple hct setup -----')

    lge = LineGrammarExecutor()

    pattern = '{<C-Major: I> qC:4 D iE F}'
    target = '{qC:4 D iE F <E-Major: v> qF# hG# <Ab-Minor: ii> qAb:3 Cb:4 iDb F <D-Major: I> qD E F#}'
    target_line, target_hct = lge.parse(target)

    search = MelodicSearch.create(pattern)

    answers = search.search(target_line, target_hct)

    assert answers is not None
    assert 2 == len(answers)
    assert Position(0) == answers[0]
    assert Position(3, 2) == answers[1]

    answers = search.search(target_line, target_hct, GlobalSearchOptions(note_match_chordal=True))

    assert answers is not None
    assert 1 == len(answers)
    assert Position(0) == answers[0]

    target = '{qC:4 D iE F <E-Major: v> qF# hG# <Ab-Minor: ii> qBb:3 Cb:4 iDb Eb <D-Major: I> qD E F#}'
    target_line, target_hct = lge.parse(target)

    answers = search.search(target_line, target_hct, GlobalSearchOptions(note_match_chordal=True))

    assert answers is not None
    assert 2 == len(answers)
    assert Position(0) == answers[0]
    assert Position(3, 2) == answers[1]
Esempio n. 7
0
    def test_add_notes_to_line(self):
        c = InstrumentCatalog.instance()
        violin = c.get_instrument("violin")

        note0 = Note(DiatonicPitch(4, 'a'), Duration(1, 8))
        note1 = Note(DiatonicPitch(4, 'b'), Duration(1, 8))
        note2 = Note(DiatonicPitch(4, 'c'), Duration(1, 8))
        note3 = Note(DiatonicPitch(4, 'd'), Duration(1, 8))

        line = Line()
        line.pin(note0, Offset(1, 4))
        line.pin(note1, Offset(3, 8))
        line.pin(note2, Offset(1, 2))
        line.pin(note3, Offset(5, 8))

        voice = Voice(violin)
        voice.pin(line, Offset(0))

        notee0 = Note(DiatonicPitch(5, 'a'), Duration(1, 8))
        notee1 = Note(DiatonicPitch(5, 'b'), Duration(1, 8))
        line.pin([notee0, notee1], Offset(3, 4))

        notes = voice.get_notes_starting_in_interval(
            Interval(Position(5, 8), Position(2, 1)))
        assert len(notes) == 3
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('D:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:5'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:5'))
    def test_generic_sin(self):
        f = GenericUnivariatePitchFunction(
            TestGenericUnivariatePitchFunction.sinasoidal, Position(0),
            Position(2))

        factor = 32

        for i in range(0, factor + 1):
            p = Fraction(i, factor)
            d = f.eval_as_chromatic_distance(p)
            np = f.eval_as_nearest_pitch(p)
            choices = TestGenericUnivariatePitchFunction.print_choices(
                f.eval_as_pitch(p))
            txt = '[{0}] chrom={1} near_p={2}   choices={3}'.format(
                i, d, np, choices)
            print(txt)

        # test
        p = Fraction(5, factor)
        d = f.eval_as_chromatic_distance(p)
        np = f.eval_as_nearest_pitch(p)
        choices = f.eval_as_pitch(p)
        assert 2 == len(choices)
        assert 'D#:5' == str(choices[0])
        assert 'E:5' == str(choices[1])
Esempio n. 9
0
def simple_reshape_no_pf():
    print('----- test_simple_reshape_no_pf -----')

    line_str = '{<C-Major: I> iE:4 E E E E E E E <:IV> qE ie e <:V> qe ie e <:VI>  qE E iE E E E}'

    score = create_score(line_str, 'violin', (3, 4, 'sww'))

    all_notes = score.line.get_all_notes()

    pitch_function = GenericUnivariatePitchFunction(sinasoidal, Position(0),
                                                    Position(3))
    time_range = Range(0, 3)

    # The first note should have one of 3 values, C:4, E:4, G:4
    constraints = {
        ChordalPitchConstraint(all_notes[0]),
        ChordalPitchConstraint(all_notes[9]),
        PitchRangeConstraint([all_notes[0]], PitchRange.create('C:4', 'G:4')),
    }

    motif = Motif(score.line, constraints, 'A')
    melodic_form = MelodicForm([motif])
    treshape = TReshape(score, pitch_function, time_range, melodic_form, False)

    results = treshape.apply()

    mc_filter = MinCurveFitFilter(pitch_function, results)
    print('{0} filtered results'.format(len(mc_filter.scored_results)))

    for index in range(0, min(5, len(mc_filter.scored_results))):
        result = mc_filter.scored_results[index]
        print('[{0}] {1} ({2})'.format(index, str_line(result[0].line),
                                       result[1]))
    def convert_line(line,
                     tempo=Tempo(60, Duration(1, 4)),
                     time_signature=TimeSignature(4, Duration(1, 4)),
                     channel_assignments=None,
                     fps=42100):
        """
        Static method to convert a Line to a midi file

        Args:
          :param line: Class Line object
          :param tempo: Tempo for playback, default is 60 BPM tempo beat = quarter note
          :param time_signature: TimeSiganture on playback, default is 4 quarter notes
          :param channel_assignments:
          :param fps: frames per second setting

        """
        score = Score()
        tempo_sequence = score.tempo_sequence
        tempo_sequence.add(TempoEvent(tempo, Position(0)))

        ts_sequence = score.time_signature_sequence
        ts_sequence.add(TimeSignatureEvent(time_signature, Position(0)))

        c = InstrumentCatalog.instance()
        piano = c.get_instrument("piano")

        piano_instrument_voice = InstrumentVoice(piano, 1)
        piano_voice = piano_instrument_voice.voice(0)

        piano_voice.pin(line, Offset(0))

        score.add_instrument_voice(piano_instrument_voice)
        return ScoreToVstMidiConverter.convert_score(score,
                                                     channel_assignments, fps)
Esempio n. 11
0
 def _build_default_time_sig_tempo():
     tempo_seq = TempoEventSequence()
     ts_seq = EventSequence()
     tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
     ts_seq.add(
         TimeSignatureEvent(TimeSignature(3, Duration(1, 4), 'sww'),
                            Position(0)))
     return ts_seq, tempo_seq
Esempio n. 12
0
    def test_overlap_vs_start(self):
        c = InstrumentCatalog.instance()
        violin = c.get_instrument("violin")

        note0 = Note(DiatonicPitch(4, 'a'), Duration(1, 8))
        note1 = Note(DiatonicPitch(4, 'b'), Duration(1, 8))
        note2 = Note(DiatonicPitch(4, 'c'), Duration(1, 8))
        note3 = Note(DiatonicPitch(4, 'd'), Duration(1, 8))

        line = Line()
        line.pin(note0, Offset(1, 4))
        line.pin(note1, Offset(3, 8))
        line.pin(note2, Offset(1, 2))
        line.pin(note3, Offset(5, 8))

        voice = Voice(violin)
        voice.pin(line, Offset(0))

        interval = Interval(Position(5, 16), Position(5, 8))

        notes = voice.get_notes_by_interval(interval)
        assert len(notes) == 3
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:4'))

        notes = voice.get_notes_starting_in_interval(interval)
        assert len(notes) == 2
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:4'))

        notee0 = Note(DiatonicPitch(5, 'a'), Duration(1, 2))
        notee1 = Note(DiatonicPitch(5, 'b'), Duration(1, 2))
        line1 = Line()
        line1.pin([notee0, notee1], Offset(0))

        voice.pin(line1, Offset(1, 4))

        notes = voice.get_notes_by_interval(interval)
        assert len(notes) == 4
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:5'))

        notes = voice.get_notes_by_interval(interval, line1)
        assert len(notes) == 1
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:5'))

        notes = voice.get_notes_starting_in_interval(interval)
        assert len(notes) == 2
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:4'))

        notes = voice.get_notes_starting_in_interval(
            Interval(Position(1, 8), Position(3, 8)), line1)
        assert len(notes) == 1
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:5'))
    def _fill_meta_track(self, meta_track):
        event_list = self.score.tempo_sequence.sequence_list
        score_len = self.score.length()

        #  Loop over list, for every change in tempo , the tempo should be reset.
        #  Note, that there may be tempo or ts changes that last for 0 duration - we skip those.
        last_fps_time = 0
        for tempo_event in event_list:
            if tempo_event.time >= score_len:
                break
            if isinstance(tempo_event, TempoEvent):
                current_fps_time = self._wnt_to_fps(tempo_event.time)

                # If there is a ts and tempo event, effect a midi tempo change
                beat_ratio = Fraction(
                    1, 4) / tempo_event.object.beat_duration.duration

                # tempo_value = (60/BPM) * (ts_beat / tempo_beat)
                tempo_value = int(
                    (60.0 / tempo_event.object.tempo) * beat_ratio * 1000000)

                frames = int(current_fps_time - last_fps_time)
                msg = MetaMessage(MetaMessage.TEMPO_MESSAGE, tempo_value,
                                  frames)
                meta_track.append(msg)
                last_fps_time = current_fps_time
            elif isinstance(tempo_event, TempoFunctionEvent):
                #  Run over event range making a small step function effectively, and setting the tempo
                #  every TEMPO_EVENT_DURATION_MS.
                t1 = tempo_event.time
                beat_duration = tempo_event.beat_duration if tempo_event.beat_duration is None else \
                    ScoreToVstMidiConverter.DEFAULT_BEAT_DURATION
                next_event = self.score.tempo_sequence.successor(tempo_event)
                t2 = next_event.time if next_event is not None else Position(
                    score_len.duration)
                while t1 < t2:
                    tempo = int(
                        tempo_event.tempo(
                            t1, next_event.time if next_event is not None else
                            Position(score_len)))

                    delta_wnt = (tempo * ScoreToVstMidiConverter.TEMPO_EVENT_DURATION_MS * beat_duration.duration) / \
                                (60.0 * 1000.0)

                    current_fps_time = self._wnt_to_fps(t1)
                    frames = int(current_fps_time - last_fps_time)

                    # If there is a ts and tempo event, effect a midi tempo change
                    beat_ratio = Fraction(1, 4) / beat_duration.duration

                    # tempo_value = (60/BMP) * (ts_beat / tempo_beat)
                    tempo_value = int((60.0 / tempo) * beat_ratio * 1000000)
                    msg = MetaMessage(MetaMessage.TEMPO_MESSAGE, tempo_value,
                                      frames)
                    meta_track.append(msg)

                    t1 += delta_wnt
                    last_fps_time = current_fps_time
Esempio n. 14
0
 def beat_position(self, position):
     """
     Get the beat position corresponding to given position.
     :param position:
     :return: BeatPosition
     """
     conversion = TimeConversion(self.tempo_sequence,
                                 self.time_signature_sequence,
                                 Position(self.duration.duration))
     return conversion.position_to_bp(Position(position.position))
Esempio n. 15
0
    def test_tempo_change(self):
        c = InstrumentCatalog.instance()

        score = Score()

        score.time_signature_sequence.add(
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4)), Position(0)))
        score.tempo_sequence.add(
            TempoEvent(Tempo(120, Duration(1, 4)), Position(0)))
        score.tempo_sequence.add(
            TempoEvent(Tempo(60, Duration(1, 4)), Position(1)))
        score.tempo_sequence.add(
            TempoEvent(Tempo(30, Duration(1, 4)), Position(2)))

        violin = c.get_instrument("violin")
        violin_instrument_voice = InstrumentVoice(violin, 1)
        violin_voice_0 = violin_instrument_voice.voice(0)
        assert violin_voice_0
        score.add_instrument_voice(violin_instrument_voice)

        violin_voice_0.dynamics_sequence.add(
            DynamicsEvent(Dynamics(Dynamics.F), Position(0)))

        # Add notes to the score
        vnote0 = Note(DiatonicPitch(4, 'a'), Duration(1, 4))
        vnote1 = Note(DiatonicPitch(4, 'b'), Duration(1, 4))
        vnote2 = Note(DiatonicPitch(5, 'c'), Duration(1, 4))
        vnote3 = Note(DiatonicPitch(5, 'd'), Duration(1, 4))

        vnote4 = Note(DiatonicPitch(4, 'a'), Duration(1, 4))
        vnote5 = Note(DiatonicPitch(4, 'b'), Duration(1, 4))
        vnote6 = Note(DiatonicPitch(5, 'c'), Duration(1, 4))
        vnote7 = Note(DiatonicPitch(5, 'd'), Duration(1, 4))

        vnote8 = Note(DiatonicPitch(4, 'a'), Duration(1, 4))
        vnote9 = Note(DiatonicPitch(4, 'b'), Duration(1, 4))
        vnote10 = Note(DiatonicPitch(5, 'c'), Duration(1, 4))
        vnote11 = Note(DiatonicPitch(5, 'd'), Duration(1, 4))

        # Set up a violin voice with 6 8th notes
        vline_0 = Line([
            vnote0, vnote1, vnote2, vnote3, vnote4, vnote5, vnote6, vnote7,
            vnote8, vnote9, vnote10, vnote11
        ])
        violin_voice_0.pin(vline_0)

        # Set up a violin voice with 6 8th notes
        vline_1 = Line([vnote0, vnote1, vnote2, vnote3])
        violin_voice_0.pin(vline_1)

        smc = ScoreToMidiConverter(score)
        smc.create('score_tempo_change_file.mid')

        TestScoreToMidiConverter.read_midi_file('score_tempo_change_file.mid')
Esempio n. 16
0
 def __init__(self, tonality, chord, duration, position=Position(0)):
     """
     Constructor.
     :param tonality: Tonality
     :param chord: Chord
     :param duration: Duration
     :param position: Position
     """
     self._tonality = tonality
     self._chord = chord
     self._duration = Duration(duration.duration)
     self._position = Position(position.position)
    def test_book_example(self):
        # Interpreter 0__.C:4 and each step of 1/12 maps to noew chromatic
        interpreter = ChromaticRangeInterpreter(DiatonicPitch.parse('C:4'), 0,
                                                Fraction(1, 12))

        # local_sin maps 0->0 .25->1 .5->0 .75->-1 1->0 and so on.
        f = GenericUnivariatePitchFunction(local_sin, Position(0), Position(2),
                                           False, interpreter)

        for i in range(0, 9):
            p = f.eval_as_nearest_pitch(i * 0.25)
            print('[{0}] {1}'.format((i * 0.25), str(p)))
Esempio n. 18
0
    def test_basic_plf(self):
        array = [(0, 'A:0'), (Fraction(1, 2), 'C:5'), (Position(3, 4), 'G:4'),
                 (1, 'A:5')]
        f = PiecewiseLinearPitchFunction(array)

        assert DiatonicPitch.parse('A:0').chromatic_distance == f.eval(0)
        assert DiatonicPitch.parse('C:5').chromatic_distance == f.eval(0.5)
        assert DiatonicPitch.parse('G:4').chromatic_distance == f.eval(
            Fraction(3, 4))
        assert DiatonicPitch.parse('A:5').chromatic_distance == f.eval(
            Position(1))

        assert DiatonicPitch.parse(
            'A:0').chromatic_distance == f.eval_as_chromatic_distance(0)
        assert DiatonicPitch.parse(
            'C:5').chromatic_distance == f.eval_as_chromatic_distance(0.5)
        assert DiatonicPitch.parse(
            'G:4').chromatic_distance == f.eval_as_chromatic_distance(
                Fraction(3, 4))
        assert DiatonicPitch.parse(
            'A:5').chromatic_distance == f.eval_as_chromatic_distance(
                Position(1))

        print(f.eval_as_frequency(0))
        assert ChromaticScale.A0 == f.eval_as_frequency(0)

        print(
            ChromaticScale.index_to_location(
                DiatonicPitch.parse('C:5').chromatic_distance))
        print(
            ChromaticScale.get_frequency(
                ChromaticScale.index_to_location(
                    DiatonicPitch.parse('C:5').chromatic_distance)))
        print(f.eval_as_frequency(0.5))
        assert math.isclose(
            ChromaticScale.get_frequency(
                ChromaticScale.index_to_location(
                    DiatonicPitch.parse('C:5').chromatic_distance)),
            f.eval_as_frequency(0.5))

        print(f.eval(0.25))  # 34.5
        print(f.eval_as_nearest_pitch(0.25))
        assert math.isclose(
            ChromaticScale.A0 * math.pow(ChromaticScale.SEMITONE_RATIO,
                                         f.eval(0.25) - 9),
            f.eval_as_frequency(0.25))

        assert 'A#:2' == str(f.eval_as_nearest_pitch(0.25))

        pitches = f.eval_as_pitch(0.25)
        assert 'A#:2' == str(pitches[0])
        assert 'B:2' == str(pitches[1])
Esempio n. 19
0
def create_score(line_expression, instrument, ts):
    lge = LineGrammarExecutor()
    source_instance_line, source_instance_hct = lge.parse(line_expression)

    tempo_seq = TempoEventSequence()
    ts_seq = EventSequence()
    tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
    ts_seq.add(TimeSignatureEvent(TimeSignature(ts[0], Duration(1, ts[1]), ts[2]), Position(0)))

    c = InstrumentCatalog.instance()
    instrument = c.get_instrument(instrument)

    return LiteScore(source_instance_line, source_instance_hct, instrument, tempo_seq, ts_seq)
    def build_simple_constraint(v_note, f, modality_type, key_str, chord_str):
        lower_policy_context = TestFitPitchToFunctionConstraint.policy_creator(
            modality_type, DiatonicTone(key_str), chord_str, 'C:2', 'C:8')

        tempo_seq = TempoEventSequence()
        ts_seq = EventSequence()
        tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
        ts_seq.add(
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4), 'sww'),
                               Position(0)))

        return FitPitchToFunctionConstraint(v_note, f, tempo_seq,
                                            ts_seq), lower_policy_context
Esempio n. 21
0
    def create_score_artifacts(modality, key_tone, chords, ts):
        diatonic_tonality = Tonality.create(
            modality, DiatonicToneCache.get_tone(key_tone))

        hc_track = TestPitchFitFunction.create_track(chords, diatonic_tonality)

        tempo_seq = TempoEventSequence()
        ts_seq = EventSequence()
        tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
        ts_seq.add(
            TimeSignatureEvent(TimeSignature(ts[0], Duration(1, ts[1]), ts[2]),
                               Position(0)))

        return hc_track, tempo_seq, ts_seq
    def test_compute_with_minor_key(self):
        print('-- test_compute_with_minor_key ---')
        line = Line()

        f = GenericUnivariatePitchFunction(
            TestFitPitchToFunctionConstraint.sinasoidal, Position(0),
            Position(2))
        v_notes = [
            Note(DiatonicPitch.parse('A:4'), Duration(1, 16))
            for _ in range(0, 33)
        ]
        for i in range(0, 33):
            line.pin(v_notes[i], Offset(i, 16))

        constraint, lower_policy_context = \
            TestFitPitchToFunctionConstraint.build_simple_constraint(v_notes[0], f, ModalityType.NaturalMinor,
                                                                     'C', 'tV')
        constraints = list()
        constraints.append(constraint)
        for i in range(1, 33):
            c, _ = \
                TestFitPitchToFunctionConstraint.build_simple_constraint(v_notes[i], f, ModalityType.NaturalMinor,
                                                                         'C', 'tV')
            constraints.append(c)

        p_map = PMap()
        p_map[v_notes[0]] = ContextualNote(lower_policy_context)

        results = constraint.values(p_map, v_notes[0])
        assert results is not None
        assert len(results) == 1
        print(next(iter(results)).diatonic_pitch)
        assert 'C:4' == str(next(iter(results)).diatonic_pitch)

        result_pitches = []
        for i in range(0, 33):
            p_map = PMap()
            p_map[v_notes[i]] = ContextualNote(lower_policy_context)
            results = constraints[i].values(p_map, v_notes[i])
            result_pitches.append(next(iter(results)).diatonic_pitch)

        assert len(result_pitches) == 33
        for i in range(0, 33):
            print('[{0}] {1}'.format(i, str(result_pitches[i])))

        checks = [
            'C:4', 'G:4', 'D:5', 'F:5', 'G:5', 'F:5', 'D:5', 'G:4', 'C:4'
        ]
        for i in range(0, len(checks)):
            assert checks[i] == str(result_pitches[i])
Esempio n. 23
0
    def test_time_conversion_simple(self):
        tempo_line = EventSequence([TempoEvent(Tempo(60), Position(0))])
        ts_line = EventSequence([
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4)), Position(0))
        ])
        conversion = TimeConversion(tempo_line, ts_line, Position(1, 1))
        actual_time = conversion.position_to_actual_time(Position(3, 4))
        print(actual_time)
        self.assertTrue(actual_time == 3000,
                        'actual time = {0} should be 3000'.format(actual_time))

        position = conversion.actual_time_to_position(3000)
        print(position)
        self.assertTrue(position, Position(3, 4))
Esempio n. 24
0
    def test_multi_track(self):
        c = InstrumentCatalog.instance()

        score = Score()

        score.time_signature_sequence.add(
            TimeSignatureEvent(TimeSignature(3, Duration(1, 4)), Position(0)))
        score.tempo_sequence.add(
            TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))

        violin = c.get_instrument("violin")
        piano = c.get_instrument("piano")

        note0 = Note(DiatonicPitch(4, 'a'), Duration(1, 4))
        note1 = Note(DiatonicPitch(4, 'b'), Duration(1, 4))
        note2 = Note(DiatonicPitch(4, 'c'), Duration(1, 4))
        note3 = Note(DiatonicPitch(4, 'd'), Duration(1, 4))
        note4 = Note(DiatonicPitch(5, 'g'), Duration(1, 4))
        note5 = Note(DiatonicPitch(5, 'f'), Duration(1, 4))
        note6 = Note(DiatonicPitch(5, 'e'), Duration(1, 4))
        note7 = Note(DiatonicPitch(5, 'd'), Duration(1, 4))

        violin_instrument_voice = InstrumentVoice(violin, 1)
        violin_voice = violin_instrument_voice.voice(0)
        assert violin_voice

        vline = Line([note0, note1, note2, note3])
        violin_voice.pin(vline)

        score.add_instrument_voice(violin_instrument_voice)

        piano_instrument_voice = InstrumentVoice(piano, 1)
        piano_voice = piano_instrument_voice.voice(0)
        assert piano_voice

        pline = Line([note4, note5, note6, note7])
        piano_voice.pin(pline, Offset(1, 8))

        score.add_instrument_voice(piano_instrument_voice)

        violin_voice.dynamics_sequence.add(
            DynamicsEvent(Dynamics(Dynamics.F), Position(0)))
        piano_voice.dynamics_sequence.add(
            DynamicsEvent(Dynamics(Dynamics.P), Position(0)))

        smc = ScoreToMidiConverter(score)
        smc.create('score_multi_trackoutput_file.mid')

        TestScoreToMidiConverter.read_midi_file(
            'score_multi_trackoutput_file.mid')
    def test_hct_rebuild_perfect_overlap(self):
        print('----- test_hct_rebuild_perfect_overlap -----')

        line_str = '{<C-Major: I> hA:5 <:IV> B  qC G <:VI> hD}'
        lge = LineGrammarExecutor()
        target_line, target_hct = lge.parse(line_str)
        TestTFlip.print_hct(target_hct)

        cue = DiatonicPitch(5, 'd')

        tflip = TDiatonicReflection(target_line, target_hct, cue)

        temporal_extent = Interval(Fraction(1, 2), Fraction(3, 2))
        score_line, score_hct = tflip.apply(temporal_extent, cue)
        TestTFlip.print_notes(score_line)
        TestTFlip.print_hct(score_hct)

        hc_list = score_hct.hc_list()
        assert hc_list[0].position == Position(0)
        assert hc_list[0].duration == Duration(1, 2)
        assert hc_list[0].chord.chord_type == TertianChordType(
            TertianChordType.Maj)
        assert hc_list[0].chord.chord_template.scale_degree == 1
        assert {t[0].diatonic_symbol
                for t in hc_list[0].chord.tones} == {'C', 'E', 'G'}
        assert hc_list[0].chord.chord_template.inversion == 1

        assert hc_list[1].position == Position(1, 2)
        assert hc_list[1].duration == Duration(1)
        assert hc_list[1].chord.chord_type == TertianChordType(
            TertianChordType.Min)
        assert hc_list[1].chord.chord_template.scale_degree == 3
        assert {t[0].diatonic_symbol
                for t in hc_list[1].chord.tones} == {'G', 'B', 'E'}
        assert hc_list[1].chord.chord_template.inversion == 3

        assert hc_list[2].position == Position(3, 2)
        assert hc_list[2].duration == Duration(1, 2)
        assert hc_list[2].chord.chord_type == TertianChordType(
            TertianChordType.Min)
        assert hc_list[2].chord.chord_template.scale_degree == 6
        assert {t[0].diatonic_symbol
                for t in hc_list[2].chord.tones} == {'A', 'C', 'E'}
        assert hc_list[2].chord.chord_template.inversion == 1

        notes = score_line.get_all_notes()
        assert str(notes[1].diatonic_pitch) == "F:4"
        assert str(notes[2].diatonic_pitch) == "E:5"
        assert str(notes[3].diatonic_pitch) == "A:4"
    def test_book_tempo_sequence(self):
        print('----- test_book_tempo_sequence -----')

        seq = TempoEventSequence()
        seq.add(TempoEvent(Tempo(TempoType.Grave), Position(0)))
        seq.add(TempoEvent(Tempo(TempoType.Moderato), Position(10)))
        seq.add(TempoEvent(Tempo(TempoType.Vivace), Position(25)))
        seq.add(TempoEvent(Tempo(TempoType.Largo), Position(50)))

        event = seq.first
        while event is not None:
            print(event)
            event = seq.successor(event)

        print('----- End test_book_tempo_sequence -----')
Esempio n. 27
0
def create_score(grammar_str, instrument, ts):
    lge = LineGrammarExecutor()
    target_line, target_hct = lge.parse(grammar_str)

    tempo_seq = TempoEventSequence()
    ts_seq = EventSequence()
    tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
    ts_seq.add(
        TimeSignatureEvent(TimeSignature(ts[0], Duration(1, ts[1]), ts[2]),
                           Position(0)))

    c = InstrumentCatalog.instance()
    violin = c.get_instrument(instrument)

    return LiteScore(target_line, target_hct, violin, tempo_seq, ts_seq)
Esempio n. 28
0
    def test_two_voices(self):
        print('test two voices')
        c = InstrumentCatalog.instance()
        violin = c.get_instrument("violin")

        note0 = Note(DiatonicPitch(4, 'a'), Duration(1, 8))
        note1 = Note(DiatonicPitch(4, 'b'), Duration(1, 8))
        note2 = Note(DiatonicPitch(4, 'c'), Duration(1, 8))
        tuplet = Tuplet(Duration(1, 8), 2, [note0, note1, note2])

        note3 = Note(DiatonicPitch(4, 'd'), Duration(1, 8))
        note4 = Note(DiatonicPitch(4, 'e'), Duration(1, 8))
        subbeam = Beam([note3, note4])
        beam = Beam(subbeam)
        line1 = Line([tuplet, beam])
        print(line1)

        notee0 = Note(DiatonicPitch(5, 'a'), Duration(1, 8))
        notee1 = Note(DiatonicPitch(5, 'b'), Duration(1, 8), 1)
        notee2 = Note(DiatonicPitch(5, 'c'), Duration(1, 8))
        notee3 = Note(DiatonicPitch(5, 'd'), Duration(1, 16))
        line2 = Line([notee0, notee1, notee2])
        line2.pin(notee3, Offset(1, 2))
        print(line2)

        voice = Voice(violin)
        voice.pin(line1, Offset(1, 4))
        voice.pin(line2, Offset(0, 1))

        print(voice)

        interval = Interval(Position(1, 2), Position(1))
        notes = voice.get_notes_by_interval(interval)

        assert len(notes) == 3
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('D:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('E:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('D:5'))

        interval = Interval(Position(1, 4), Position(7, 16))
        notes = voice.get_notes_by_interval(interval)

        assert len(notes) == 5
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('A:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:4'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('B:5'))
        assert TestVoice.has_pitch(notes, DiatonicPitch.parse('C:5'))
Esempio n. 29
0
 def get_notes_by_bp_interval(self, interval):
     conversion = TimeConversion(self.tempo_sequence,
                                 self.time_signature_sequence,
                                 Position(self.duration.duration))
     wnt_interval = Interval(conversion.bp_to_position(interval.lower),
                             conversion.bp_to_position(interval.upper))
     return self.get_notes_by_wnt_interval(wnt_interval)
Esempio n. 30
0
def structural_match():
    lge = LineGrammarExecutor()

    # test for non-structural match
    pattern = '{<C-Major: I> [iC:4 G F A] <:V> hB:4}'
    target = '{<F-Minor: v> qF:4 C:5 <C-Major: I> iC:4 G F A <:V> hB:4 }'
    target_line, target_hct = lge.parse(target)

    search = MelodicSearch.create(pattern)

    answers = search.search(target_line, target_hct, GlobalSearchOptions(structural_match=False))
    assert answers is not None
    assert 1 == len(answers)
    assert Position(1, 2) == answers[0]

    # test for illegal non-structural match
    answers = search.search(target_line, target_hct, GlobalSearchOptions(structural_match=True))
    assert answers is not None
    assert 0 == len(answers)

    pattern = '{<C-Major: I> [iC:4 G F A] <:V> qB:4 (I, 2)[E:5 G C]}'
    target = '{<F-Minor: v> qF:4 C:5 <C-Major: I> [iC:4 G F A] <:V> qB:4 (I, 2)[E:5 G C]}'
    target_line, target_hct = lge.parse(target)
    search = MelodicSearch.create(pattern)
    answers = search.search(target_line, target_hct, GlobalSearchOptions(structural_match=True))

    assert answers is not None
    assert 1 == len(answers)