Beispiel #1
0
def voiceProgression(key, chordProgression):
    """Voices a chord progression in a specified key using DP.

    Follows eighteenth-century voice leading procedures, as guided by the cost
    function defined in the `chordCost` and `progressionCost` functions.
    Returns a list of four-pitch chords, corresponding to successive Roman
    numerals in the chord progression.
    """
    key = Key(key)
    if isinstance(chordProgression, str):
        chordProgression = list(filter(None, chordProgression.split()))

    dp = [{} for _ in chordProgression]
    for i, numeral in enumerate(chordProgression):
        chord = RomanNumeral(numeral, key)
        voicings = voiceChord(key, chord)
        if i == 0:
            for v in voicings:
                dp[0][v.pitches] = (chordCost(key, v), None)
        else:
            for v in voicings:
                best = (float("inf"), None)
                for pv_pitches, (pcost, _) in dp[i - 1].items():
                    pv = Chord(pv_pitches)
                    ccost = pcost + progressionCost(key, pv, v)
                    if ccost < best[0]:
                        best = (ccost, pv_pitches)
                dp[i][v.pitches] = (best[0] + chordCost(key, v), best[1])

    cur, (totalCost, _) = min(dp[-1].items(), key=lambda p: p[1][0])
    ret = []
    for i in reversed(range(len(chordProgression))):
        ret.append(Chord(cur, lyric=chordProgression[i]))
        cur = dp[i][cur][1]
    return list(reversed(ret)), totalCost
Beispiel #2
0
def separate_chords(chunks):
    root = []
    harmonics = []
    nbHarmonics = 0
    for chunk in chunks:
        nbNotes = len(chunk.notes)
        # Adds harmonics partitions if necessary
        while nbNotes > nbHarmonics+1:
            l = []
            harmonics.append(l)
            nbHarmonics += 1
        # Determines the root of the chord
        chord = Chord(list(n for n,i in chunk.notes))
        rootIndex = list(chord.pitches).index(chord.root())
        # Distributes the notes in the lists
        harmonicIndex = 0
        for i in range(nbNotes):
            note = (
                chunk.start,
                chunk.end,
                chunk.notes[i][0], # pitch
                chunk.notes[i][1]  # index
            )
            if i == rootIndex:
                root.append(note)
            else:
                harmonics[harmonicIndex].append(note)
                harmonicIndex += 1
    return root, harmonics
    def generate(self, seq_len, a_par=0):
        pattern = self.model_inp[self.start]
        prediction_output = []
        for note_index in range(seq_len):
            prediction_input = pattern.reshape(1, seq_len, 2,
                                               len(self.sorted_notes))
            prediction_input = prediction_input / float(len(self.sorted_notes))
            predictions = self.model.predict(prediction_input, verbose=0)[0]
            for prediction in predictions:
                index = np.argmax(prediction[0])
                duration_i = np.argmax(prediction[1])

                for name, value in self.sorted_notes.items():
                    if value == index:
                        result = name
                        break
                    else:
                        result = None

                for name, value in self.sorted_durations.items():
                    if value == duration_i:
                        duration = name
                        break
                    else:
                        duration = None

                prediction_output.append((result, Duration(duration)))
                result = np.zeros_like(prediction)
                result[0][index] = 1
                result[1][duration_i] = 1
                pattern = np.concatenate([pattern, [result]])
            pattern = pattern[len(pattern) - seq_len:len(pattern)]

        offset = 0
        output_notes = []
        for pattern, duration in prediction_output:
            if pattern.isdigit() or ('.' in pattern):
                notes_in_chord = pattern.split('.')
                notes = []
                for current_note in notes_in_chord:
                    new_note = Note(int(current_note))
                    new_note.duration = duration
                    new_note.storedInstrument = instrument.PanFlute()
                    notes.append(new_note)
                new_chord = Chord(notes)
                new_chord.offset = offset
                output_notes.append(new_chord)
            else:
                new_note = Note(pattern)
                new_note.offset = offset
                new_note.storedInstrument = instrument.Flute()
                output_notes.append(new_note)
            offset += 0.6

        midi_stream = stream.Stream(output_notes)
        midi_stream.write('midi',
                          fp=f'my_music/{self.model.name}_{self.start}.mid')
Beispiel #4
0
def move_chord(chord: Chord, pitch: Pitch) -> Chord:
    while abs(chord_mean_distance(chord, pitch)) > 12.0:
        dist = chord_mean_distance(chord, pitch)
        if dist < 0:
            chord = chord.transpose(12)
        else:
            chord = chord.transpose(-12)

    return chord
Beispiel #5
0
    def generate_closed_chord(self,
                              root_note: PitchInit,
                              anchor_note: PitchInit = "C4",
                              max_notes: int = 5,
                              bass_note: Optional[PitchInit] = None,
                              include_root: bool = True):

        if isinstance(root_note, str):
            root_note = Pitch(root_note)

        if isinstance(anchor_note, str):
            anchor_note = Pitch(anchor_note)

        if isinstance(bass_note, str):
            bass_note = Pitch(bass_note)

        if max_notes < 2:
            raise ValueError("Not really a chord with only one or no notes.")

        chord_heap = []

        third = root_note.transpose(self.third_quality.interval)
        fifth = root_note.transpose(self.fifth_quality.interval)
        harmonies = [root_note.transpose(harm.interval) for harm in self.harmonies]

        heappush(chord_heap, (9, third))
        for harmony in harmonies:
            heappush(chord_heap, (7, harmony))

        heappush(chord_heap, (1, fifth))
        if include_root:
            heappush(chord_heap, (3, root_note))

        if self.upper_quality is not None:
            heappush(chord_heap, (9, (root_note.transpose(self.upper_quality.interval))))

        while len(chord_heap) > max_notes:
            heappop(chord_heap)

        chord_base = Chord(note for _, note in chord_heap)

        chord_base.sortDiatonicAscending(inPlace=True)

        chord_base = move_chord(chord_base, anchor_note)
        chord_base = chord_base.closedPosition()
        inversions = all_inversions(chord_base)
        inversions = [move_chord(c, anchor_note) for c in inversions]

        best_option = min(inversions, key=lambda x: chord_mad(x, anchor_note))

        if bass_note is not None:
            bass_note = move_pitch(bass_note, anchor_note.transpose(-12))
            best_option.add(bass_note, runSort=True)

        return best_option
Beispiel #6
0
def test_hasChordSymbolFigure():
    """Test hasChordSymbolFigure orbichord.symbol module method."""

    # Basic asserts
    assert hasChordSymbolFigure(Chord('C E G')) == True
    assert hasChordSymbolFigure(Chord('C C G')) == True

    # Checking chord symbols
    chord = ChordSymbol(root='C', bass='G#', kind='augmented')
    assert hasChordSymbolFigure(chord) == True

    chord = ChordSymbol(root='C', bass='G#', kind='augmented')
    chord.add('C#')
    assert hasChordSymbolFigure(chord) == True
Beispiel #7
0
def add_piano_closing(roman, duration, piano, show_symbols=False):
   '''Generate a closing riff and add it to the keyboard part'''
   symbol = ChordSymbol(chordSymbolFigureFromChord(roman))
   if show_symbols:
      print symbol
   piano.append(symbol)    # Leadsheet chord symbol

   filled = 0
   length_weight = 2    # Longer notes later in the bar
   root = roman.root()  # Root pitch of the chord (NOT a note object)
   while filled < duration:
      # TODO DRY with other piano func
      chord = Chord(deepcopy(roman.pitches))

      # invert chord randomly, root inversion twice as likely as others
      max_inv=len(chord.pitches)
      chord.inversion(random.randint(0,max_inv)%max_inv)

      # Add an extra root note 1 octave lower
      root = deepcopy(chord.root())
      root.octave -= 1
      chord.add(root)
      # TODO above same procedure as main riff func, but we should
      # make more fancy

      # Rhythm similar to bass method below
      length = min(random.randint(1,length_weight),duration-filled) # cap at time left
      chord.quarterLength = length/2.0

      piano.append(chord)
      filled += length
      length_weight += length # Longer notes later in the bar
Beispiel #8
0
def split_voice_lines(indexed_notes):
    """
    Split potentially polyphonic line into multiple monophonic lines using voice leading

    indexed_notes   List of tuples of the form (position, GeneralNote)
    
    returns         List of lists of tuples of the form (position, Note)
    """
    max_number_of_voices = max(
        get_number_of_voices(note) for idx, note in indexed_notes)
    peak = [
        i for i, (idx, note) in enumerate(indexed_notes)
        if get_number_of_voices(note) == max_number_of_voices
    ][0]

    head = indexed_notes[:peak + 1][::-1]
    climb = []
    if len(head) > 1:
        lead = head[0][1]
        for i, n in head[1:]:
            split_result = split_voices(lead, n)
            climb.append((i, split_result))
            lead = Chord(split_result)

    tail = indexed_notes[peak:]
    fall = []
    if len(tail) > 1:
        lead = tail[0][1]
        for i, n in tail[1:]:
            split_result = split_voices(lead, n)
            fall.append((i, split_result))
            lead = Chord(split_result)

    results = []

    for i in range(max_number_of_voices):
        results.append([])

    source_idx_for_peak, peak_note = indexed_notes[peak]
    for i, result in enumerate(results):
        for j, voices in climb:
            result.append((j, voices[i]))
        for j, voices in fall:
            result.append((j, voices[i]))
        result.append((source_idx_for_peak,
                       Note(peak_note.pitches[i],
                            quarterLength=peak_note.duration.quarterLength)))

    return results
Beispiel #9
0
def test_EfficientVoiceLeading():
    """Test EfficientVoiceLeading module class."""
    D_minor_scale = MinorScale('D')
    D_minor_scale = Chord(D_minor_scale.getPitches('D4', 'C5'))
    E_major_scale = MajorScale('E')
    E_major_scale = Chord(E_major_scale.getPitches('E4', 'D#5'))
    scale = ChromaticScale('C')
    voice_leading = EfficientVoiceLeading(
        scale=ChromaticScale('C'), metric=lambda delta: la.norm(delta, inf))
    vl, dist = voice_leading(
        D_minor_scale,
        E_major_scale,
    )
    assert vl == [1, 1, 0, 1, 1, 0, 1]
    assert dist == 1.0
def notate_score(musician_names, instrument_names, music):
    score = Score()

    for musician_name, instrument_name in zip(musician_names,
                                              instrument_names):
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        score.insert(0, StaffGroup(parts))

        notes = music[musician_name]

        for pitches in notes:
            if not pitches or pitches == 'stop':
                note = Rest()
            elif len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([4.0])
            note.duration = duration

            part.append(note)

    score.show('musicxml', '/Applications/Sibelius 7.5.app')
Beispiel #11
0
def run(chords, melody, series):
    all_notes = []
    # Construct music21 notes, grouped into measures
    for measure in filter(lambda x: isinstance(x, music21.stream.Measure), melody.elements):
        measure_notes = []
        for note in filter(lambda x: isinstance(x, music21.note.Note), measure.elements):
            measure_notes.append(note)
        all_notes.append(measure_notes)

    init_probs = INIT_PROBS if series == 'major' else INIT_PROBS_MIN

    # Build transition matrix where probability of self-transition is low
    all_chords = ALL_CHORDS if series == 'major' else ALL_CHORDS_MIN
    trans_probs = []
    for i in range(len(all_chords)):
        trans_prob = []
        for j in range(len(all_chords)):
            if i != j:
                trans_prob.append(3)
            else:
                trans_prob.append(1)

        s = sum(trans_prob)
        trans_probs.append(map(lambda x: x / s, trans_prob))

    prob, chord_seq = viterbi(all_chords, all_notes, init_probs, trans_probs, goodness)

    for chord in chord_seq:
        chords.append(Chord(chord.notes, duration=WHOLE_NOTE))
        print(chord.name)
Beispiel #12
0
def decode_score(encoding, num_measures, ts, image=False):
    score = Stream()
    score.timeSignature = TimeSignature(ts)
    steps_per_measure = len(encoding) / num_measures
    measure_ind = 0
    while measure_ind < num_measures:
        start_beat = int(measure_ind * steps_per_measure)
        end_beat = int((measure_ind + 1) * steps_per_measure)
        measure = Measure()
        for beat_ind in range(start_beat, end_beat):
            if image:
                played_pitches = np.nonzero(encoding[beat_ind])[0]
            else:
                played_pitches = np.nonzero(encoding[beat_ind])
            if len(played_pitches) == 0:
                measure.append(Rest(quarterLength=4.0 / GRANULARITY))
            else:
                played_notes = [
                    midi_to_note(int(pitch + MIN_PITCH))
                    for pitch in played_pitches
                ]
                chord = Chord(played_notes, quarterLength=4.0 / GRANULARITY)
                measure.append(chord)
        score.append(measure)
        measure_ind += 1
    return score
Beispiel #13
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict(
    )  # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0:  # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]:  # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = {note.pitch.midi: note for note in notes}
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Beispiel #14
0
def notate_note(note):
    if note['pitch'] == 'rest':
        n = Rest()
    else:
        if isinstance(note['pitch'], list):
            pitches = []
            for pitch_number in note['pitch']:
                p = Pitch(pitch_number)
                # Force all flats
                if p.accidental.name == 'sharp':
                    p = p.getEnharmonic()
                pitches.append(p)
            n = Chord(notes=pitches)

        else:
            p = Pitch(note['pitch'])
            # Force all flats
            if p.accidental.name == 'sharp':
                p = p.getEnharmonic()
            n = Note(p)

    d = Duration()
    if note['duration'] == 0:
        d.quarterLength = .125
        d = d.getGraceDuration()
    else:
        # music21 docs say `fill` is for testing. I can't remember why I chose
        # to use it originally. It works. But not for tuplets. Maybe this blog
        # post contains a better solution:
        # http://music21-mit.blogspot.com/2015/09/durations-and-durationtuples.html
        d.fill(note['durations'])
    n.duration = d
    return n
Beispiel #15
0
def all_inversions(chord: Chord) -> List[Chord]:
    pitches: deque[Pitch] = deque(chord.pitches)
    out = []
    for i in range(len(chord)):
        last = pitches.pop()
        last = last.transpose(-12)
        pitches.appendleft(last)
        out.append(Chord(pitches))

    return out
Beispiel #16
0
def _voiceTriadUnordered(noteNames):
    assert len(noteNames) == 3
    for tenor, alto, soprano in itertools.permutations(noteNames, 3):
        for sopranoNote in voiceNote(soprano, SOPRANO_RANGE):
            altoMin = max((ALTO_RANGE[0], sopranoNote.transpose("-P8")))
            altoMax = min((ALTO_RANGE[1], sopranoNote))
            for altoNote in voiceNote(alto, (altoMin, altoMax)):
                tenorMin = max((TENOR_RANGE[0], altoNote.transpose("-P8")))
                tenorMax = min((TENOR_RANGE[1], altoNote))
                for tenorNote in voiceNote(tenor, (tenorMin, tenorMax)):
                    yield Chord([tenorNote, altoNote, sopranoNote])
Beispiel #17
0
def run(chords, melody, series):
    candidates = ALL_CHORDS if series == 'major' else ALL_CHORDS_MIN
    # Insert a chord for each measure
    for measure in filter(lambda x: isinstance(x, music21.stream.Measure),
                          melody.elements):
        measure_notes = []
        for note in filter(lambda x: isinstance(x, music21.note.Note),
                           measure.elements):
            measure_notes.append(note)
        chords.append(
            Chord(chord_search(measure_notes, candidates).notes,
                  duration=WHOLE_NOTE))
def write_notation_cell(music, path, event_index):
    score = Score()

    metadata = Metadata()
    metadata.title = ''
    metadata.composer = ''
    score.insert(0, metadata)

    layout = ScoreLayout()
    layout.scalingMillimeters = 1.25
    layout.scalingTenths = 40
    score.insert(0, layout)

    for musician in music:
        instrument_name = musician['instrument']
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        if instrument.instrumentName is 'Violoncello':
            instrument.partName = 'Cello'
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        # score.insert(0, StaffGroup(parts))

        for event in musician['music']:
            pitches = event['pitches']
            dur = event['duration']
            # if not pitches or pitches == 'stop':
            #     note = Rest()
            if len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([dur])
            note.duration = duration

            part.append(note)

    file_path = os.path.join(path, str(event_index).zfill(2))
    musicxml_file_path = file_path + '.xml'
    png_output_file_path = file_path + '.png'

    score.write('musicxml', musicxml_file_path)

    write_png_with_musescore(musicxml_file_path, png_output_file_path, dpi=600)
Beispiel #19
0
def generate_notes_in_batch(note_params_df,
                            output_dir,
                            audio_format='flac',
                            sample_rate=44100):
    """
    Generates a batch of single note samples from the given table of parameters.

    `note_params_df` - a Pandas Dataframe with columns:
    `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note.
    `output_dir` - output directory for the MIDI files

    Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a
    """
    os.makedirs(output_dir, exist_ok=True)

    fs = FluidSynth(sample_rate=sample_rate)

    stream = Stream()

    for i, row in note_params_df.iterrows():
        stream.append(MetronomeMark(number=row['tempo']))
        stream.append(make_instrument(int(row['midi_instrument'])))
        duration = row['duration']
        stream.append(
            chord_with_volume(
                Chord([
                    Note(midi=int(row['midi_number']),
                         duration=Duration(duration))
                ]), row['volume']))
        stream.append(Rest(duration=Duration(2 * duration)))

    midi_file = '{0}/all_samples.midi'.format(output_dir)
    audio_file_stereo = '{0}/all_samples_stereo.{1}'.format(
        output_dir, audio_format)
    audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format)
    audio_index_file = '{0}/all_samples_index.csv'.format(output_dir)

    # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!!
    # The parts should be split according to an index.
    audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate)
    audio_index.to_csv(audio_index_file)

    write_midi(stream, midi_file)

    fs.midi_to_audio(midi_file, audio_file_stereo)

    convert_to_mono(audio_file_stereo, audio_file)
    os.remove(audio_file_stereo)

    x, sample_rate = sf.read(audio_file)

    parts = split_audio_to_parts(x, sample_rate, audio_index)
    store_parts_to_files(parts, sample_rate, output_dir, audio_format)
Beispiel #20
0
def test_chordSymbolFigure():
    """Test chordSymbolFigure orbichord.symbol module method."""

    # Normal chords
    chord = Chord('C E G')
    assert chordSymbolFigure(chord) == 'C'
    assert chordSymbolFigure(chord, inversion=0) == 'C'
    assert chordSymbolFigure(chord, inversion=1) == 'C/E'
    assert chordSymbolFigure(chord, inversion=2) == 'C/G'

    chord = Chord('B D# F#')
    assert chordSymbolFigure(chord) == 'B'
    assert chordSymbolFigure(chord, inversion=0) == 'B'
    assert chordSymbolFigure(chord, inversion=1) == 'B/D#'
    assert chordSymbolFigure(chord, inversion=2) == 'B/F#'

    # Good twin inversion
    chord = Chord('G C E')
    assert chordSymbolFigure(chord) == 'C/G'
    assert chordSymbolFigure(chord, inversion=0) == 'C'
    chord = Chord('F# B D#')
    assert chordSymbolFigure(chord) == 'B/F#'
    assert chordSymbolFigure(chord, inversion=0) == 'B'

    # Evil twin inversion
    chord = Chord('G E C')
    assert chordSymbolFigure(chord) == 'C/G'
    assert chordSymbolFigure(chord, inversion=0) == 'C'
    chord = Chord('F# D# B')
    assert chordSymbolFigure(chord) == 'B/F#'
    assert chordSymbolFigure(chord, inversion=0) == 'B'
Beispiel #21
0
def test_chordPitchClasses():
    """Test chordPitchClasses orbichord.identity module method."""
    assert chordPitchClasses(Chord('C E G')) == '<047>'
    assert chordPitchClasses(Chord('E G C')) == '<470>'
    assert chordPitchClasses(Chord('G C E')) == '<704>'
    assert chordPitchClasses(Chord('B D# G')) == '<B37>'
    assert chordPitchClasses(Chord('D# G B')) == '<37B>'
    assert chordPitchClasses(Chord('G B D#')) == '<7B3>'
Beispiel #22
0
def add_piano_riff(roman, duration, piano, show_symbols=False):
   '''Given a Roman chord, duration in eighths/quavers and a keyboard
      part, generate a riff and add it to the keyboard part'''

   # Add a chord symbol at the start
   symbol = ChordSymbol(chordSymbolFigureFromChord(roman))
   if show_symbols:
      print symbol
   piano.append(symbol)

   # Add the actual notes
   filled = 0
   while filled < duration:
      # NOTE: higher chance to rest if on beat = more syncopated rhythm to piano
      if random.randint(0, 1 + filled%2 + filled%4):
         # XXX: Must deepcopy, do not change original or it will break bassline
         chord = Chord(deepcopy(roman.pitches))


         # invert chord randomly, root inversion twice as likely as others
         max_inv=len(chord.pitches)
         chord.inversion(random.randint(0,max_inv)%max_inv)

         # TODO try randomly ommitting some chord notes

         # Randomly hold notes for longer if we have longer before
         # the next chord change
         max_length = min(duration-filled, 4)      # Cap at 1/2 bar
         length = random.randint(1,max_length)
         chord.quarterLength = length/2.0      # length is in eighths

         # Add an extra root note 1 octave lower
         root = deepcopy(chord.root())
         root.octave -= 1
         chord.add(root)

         # Add the chord at soft volume and update duration
         chord.volume = Volume(velocity=16,velocityIsRelative=False)
         piano.append(chord)
         filled += length
      else:
         piano.append(Rest(quarterLength=0.5))
         filled += 1
Beispiel #23
0
def show_sequence(chord_sequence):
    stream = Stream()

    chord_names = [chord.standard_name for chord in chord_sequence]

    print(chord_names)
    chord_sequence = [chord_sequence[0],
                      *chord_sequence]  # to solve a music21 problem

    for extended_chord in chord_sequence:
        chord = Chord(notes=extended_chord.components, type='whole')
        stream.append(chord)

    stream.show()
    stream.show('midi')
Beispiel #24
0
def make_music21_note(
    pitch_number=None,
    duration=1.0,
    staccato=False,
    tenuto=False,
    accent=False,
    falloff=False,
    plop=False,
    scoop=False,
    doit=False,
    breath_mark=False,
):
    if pitch_number == None or pitch_number == 'rest':
        n = Rest()
    elif isinstance(pitch_number, list):
        pitches = [Pitch(p) for p in pitch_number]
        for p in pitches:
            if p.accidental.name is 'natural':
                p.accidental = None
        n = Chord(pitches)
    else:
        p = Pitch(pitch_number)
        if p.accidental.name is 'natural':
            p.accidental = None
        n = Note(p)

    d = Duration()
    d.quarterLength = duration
    n.duration = d

    if staccato:
        n.articulations.append(Staccato())
    if tenuto:
        n.articulations.append(Tenuto())
    if accent:
        n.articulations.append(Accent())
    if falloff:
        n.articulations.append(Falloff())
    if plop:
        n.articulations.append(Plop())
    if scoop:
        n.articulations.append(Scoop())
    if doit:
        n.articulations.append(Doit())
    if breath_mark:
        n.articulations.append(BreathMark())

    return n
Beispiel #25
0
def generate_single_note(midi_number,
                         midi_instrument=0,
                         volume=1.0,
                         duration=1.0,
                         tempo=120):
    """
    Generates a stream containing a single note with given parameters.
    midi_number - MIDI note number, 0 to 127
    midi_instrument - MIDI intrument number, 0 to 127
    duration - floating point number (in quarter note lengths)
    volume - 0.0 to 1.0
    tempo - number of quarter notes per minute (eg. 120)

    Note that there's a quarter note rest at the beginning and at the end.
    """
    return Stream([
        MetronomeMark(number=tempo),
        make_instrument(int(midi_instrument)),
        chord_with_volume(
            Chord([Note(midi=int(midi_number), duration=Duration(duration))]),
            volume)
    ])
Beispiel #26
0
def run(all_chords, melody, series, chords_output):
    all_notes = []
    # Construct music21 notes, grouped into measures
    for measure in filter(lambda x: isinstance(x, music21.stream.Measure),
                          melody.elements):
        measure_notes = []
        for note in filter(lambda x: isinstance(x, music21.note.Note),
                           measure.elements):
            measure_notes.append(note)
        all_notes.append(measure_notes)

    init_probs = []
    for idx, item in enumerate(all_chords):
        if (idx == 1):
            init_probs.append(1)
        else:
            init_probs.append(0)

    trans_probs = []
    for i in range(len(all_chords)):
        trans_prob = []
        for j in range(len(all_chords)):
            if i != j:
                trans_prob.append(3)
            else:
                trans_prob.append(1)

        s = sum(trans_prob)
        trans_probs.append(list(map(lambda x: x / s, trans_prob)))

    prob, chord_seq = viterbi(all_chords, all_notes, init_probs, trans_probs,
                              goodness)

    for chord in chord_seq:
        chords_output.append(Chord(chord.notes, duration=WHOLE_NOTE))
        print(chord.name)
 def __init__(self,
              spotify_figure: str = None,
              figure: str = None,
              bass: str = None,
              root: str = None,
              kind: str = None,
              chord: Chord = None):
     if spotify_figure == 'NC':
         self.bass = None
         self.root = None
         self.chord = Chord()
         self.structure = 'NC'
     elif chord:
         assert root and figure
         self.bass = Pitch(bass[1:]) if bass else None
         self.root = Pitch(root)
         self.chord = chord
         self.structure = spotify_figure if spotify_figure else figure
     elif figure:
         assert root
         chord_symbol = ChordSymbol(figure=figure)
         self.chord = Chord(chord_symbol.pitches)
         self.bass = Pitch(bass[1:]) if bass else None
         if self.bass:
             self.bass.octave = None
         self.root = Pitch(root)
         self.root.octave = None
         self.structure = figure
     else:
         assert root and kind
         chord_symbol = ChordSymbol(bass=bass, root=root, kind=kind)
         self.chord = Chord(chord_symbol.pitches)
         self.bass = Pitch(bass) if bass else None
         if self.bass:
             self.bass.octave = None
         self.root = Pitch(root)
         self.root.octave = None
         self.structure = chord_symbol.figure
Beispiel #28
0
def getChordFromPitches(pitches):
    """Cached method. Calls music21.chord.Chord()."""
    cachedGetChordFromPitches.append(pitches)
    return Chord(pitches)
def append_chord(part, notes, duration, velocity):
    c = Chord(notes, duration=Duration(duration))
    c.volume = Volume(velocity=velocity)
    part.append(c)
Beispiel #30
0
from music21.chord import Chord
from music21.scale import MajorScale
from numpy import inf
from numpy import linalg as la
from orbichord.chordinate import EfficientVoiceLeading

CMaj = Chord('C E G')
GMaj = Chord('G B D')

max_norm_vl = EfficientVoiceLeading(scale=MajorScale('C'),
                                    metric=lambda delta: la.norm(delta, inf))
taxicab_norm_vl = EfficientVoiceLeading(scale=MajorScale('C'),
                                        metric=lambda delta: la.norm(delta, 1))
euclidean_norm_vl = EfficientVoiceLeading(
    scale=MajorScale('C'), metric=lambda delta: la.norm(delta, 2))

vl, distance = max_norm_vl(CMaj, GMaj)
print('CMaj-GMaj maximum norm efficient voice leading:', vl)
print('CMaj-GMaj maximum norm distance:', distance)

vl, distance = taxicab_norm_vl(CMaj, GMaj)
print('CMaj-GMaj taxicab norm efficient voice leading:', vl)
print('CMaj-GMaj taxicab norm distance:', distance)

vl, distance = euclidean_norm_vl(CMaj, GMaj)
print('CMaj-GMaj euclidean norm efficient voice leading:', vl)
print('CMaj-GMaj euclidean norm distance:', distance)
from music21.chord import Chord
from music21.scale import MajorScale, ChromaticScale
from orbichord.chordinate import interscalarMatrix, Permutation

scale = MajorScale('C')

# Chords are good twins

chordA = Chord('C E G')
chordB = Chord('A C E')

# Interscalar assuming any permutation
matrix = interscalarMatrix(chordA, chordB, scale)
print(matrix)
# Interscalar assuming cyclic permutation
matrix = interscalarMatrix(chordA,
                           chordB,
                           scale,
                           permutation=Permutation.CYCLIC)
print(matrix)
# Interscalar assuming no permutation
matrix = interscalarMatrix(chordA, chordB, scale, permutation=Permutation.NONE)
print(matrix)

# First ans second chord is good and bad twin, respectively.

chordA = Chord('C E G')
chordB = Chord('A E C')

# Interscalar assuming any permutation
matrix = interscalarMatrix(chordA, chordB, scale)