Ejemplo n.º 1
0
def main():
    parser = get_cmd_line_parser(description=__doc__)
    ParserArguments.filename(parser)
    ParserArguments.tempo(parser)
    ParserArguments.framerate(parser)
    ParserArguments.set_defaults(parser)
    ParserArguments.best(parser)
    args = parser.parse_args()
    defaults.framerate = args.framerate

    song = Stream()

    roots = 'ABCDEFG'
    scales = [scale.MajorScale, scale.MinorScale,
              scale.WholeToneScale, scale.ChromaticScale]

    print('Choosing a random scale from Major, Minor, Whole Tone, Chromatic.')
    rscale = random.choice(scales)(Pitch(random.choice(roots)))
    print('Using: %s' % rscale.name)

    print('Generating a score...')
    random_note_count = 50
    random_note_speeds = [0.5, 1]
    print('100 Random 1/8th and 1/4th notes in rapid succession...')
    for i in range(random_note_count):
        note = Note(random.choice(rscale.pitches))
        note.duration.quarterLength = random.choice(random_note_speeds)
        song.append(note)

    scale_practice_count = 4
    print('Do the scale up and down a few times... maybe %s' %
          scale_practice_count)
    rev = rscale.pitches[:]
    rev.reverse()
    updown_scale = rscale.pitches[:]
    updown_scale.extend(rev[1:-1])
    print('updown scale: %s' % updown_scale)
    for count, pitch in enumerate(cycle(updown_scale)):
        print(' note %s, %s' % (count, pitch))
        song.append(Note(pitch))
        if count >= scale_practice_count * len(updown_scale):
            break

    print('Composition finished:')
    song.show('txt')

    if args.best:
        print('Audifying the song to file "{}"...')
        wave = audify_to_file(song, args.tempo, args.filename, verbose=True)
    else:
        wave = audify_basic(song, args.tempo, verbose=True)
        print('Writing Song to file "{}"...'.format(args.filename))
        with wav_file_context(args.filename) as fout:
            fout.write_frames(wave.frames)

    return 0
Ejemplo n.º 2
0
def show_sequence(chord_sequence):
    stream = Stream()

    chord_names = [chord.standard_name for chord in chord_sequence]

    print(chord_names)
    chord_sequence = [chord_sequence[0],
                      *chord_sequence]  # to solve a music21 problem

    for extended_chord in chord_sequence:
        chord = Chord(notes=extended_chord.components, type='whole')
        stream.append(chord)

    stream.show()
    stream.show('midi')
Ejemplo n.º 3
0
if args.melody == 'little_happiness':
    melody = converter.parse(A_LITTLE_HAPPINESS)
elif args.melody == 'jj_lin':
    melody = converter.parse(JJ_LIN_MELODY)
else:
    print('Unrecognized melody: should be jj_lin or little_happiness')
    sys.exit(1)

if args.series not in ('major', 'minor'):
    print('Unrecognized series: should be major or minor')
    sys.exit(1)

melody.insert(0, MetronomeMark(number=95))

# Pick algorithm
if args.algorithm == 'basic':
    chord_search.run(chords, melody, args.series)
elif args.algorithm == 'hmm':
    viterbi.run(chords, melody, args.series)
else:
    print('Unrecognized algorithm: should be basic or hmm')
    sys.exit(1)

# Combine two parts
song = Stream()
song.insert(0, melody)
song.insert(0, chords)

# song.show('midi')
song.show()
Ejemplo n.º 4
0
class Transcriptor:
    def __init__(self, path):
        self.path = path
        self.nfft = 2048
        self.overlap = 0.5
        self.hop_length = int(self.nfft * (1 - self.overlap))
        self.n_bins = 72
        self.mag_exp = 4
        self.pre_post_max = 6
        self.threshold = -71

        self.audio_sample, self.sr = self.load()
        self.cqt = self.compute_cqt()
        self.thresh_cqt = self.compute_thresholded_cqt(self.cqt)

        self.onsets = self.compute_onset(self.thresh_cqt)

        self.tempo, self.beats, self.mm = self.estimate_tempo()

        self.music_info = np.array([
            self.estimate_pitch_and_notes(i)
            for i in range(len(self.onsets[1]) - 1)
        ])
        self.note_info = list(self.music_info[:, 2])

        self.stream = Stream()

    def load(self):
        x, sr = librosa.load(self.path, sr=None, mono=True)
        print("x Shape =", x.shape)
        print("Sample rate =", sr)
        print("Audio Length in seconds = {} [s]" .format(x.shape[0] / sr))
        return x, sr

    def compute_cqt(self):
        c = librosa.cqt(self.audio_sample, sr=self.sr, hop_length=self.hop_length,
                        fmin=None, n_bins=self.n_bins, res_type='fft')
        c_mag = librosa.magphase(c)[0] ** self.mag_exp
        cdb = librosa.amplitude_to_db(c_mag, ref=np.max)
        return cdb

    def compute_thresholded_cqt(self, cqt):
        new_cqt = np.copy(cqt)
        new_cqt[new_cqt < self.threshold] = -120
        return new_cqt

    def compute_onset_env(self, cqt):
        return librosa.onset.onset_strength(S=cqt, sr=self.sr, aggregate=np.mean,
                                            hop_length=self.hop_length)

    def compute_onset(self, cqt):
        onset_env = self.compute_onset_env(cqt)
        onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env,
                                                  sr=self.sr, units='frames',
                                                  hop_length=self.hop_length,
                                                  pre_max=self.pre_post_max,
                                                  post_max=self.pre_post_max,
                                                  backtrack=False)

        onset_boundaries = np.concatenate([[0], onset_frames, [cqt.shape[1]]])
        onset_times = librosa.frames_to_time(onset_boundaries, sr=self.sr,
                                             hop_length=self.hop_length)

        return [onset_times, onset_boundaries, onset_env]

    def display_cqt_tuning(self):
        plt.figure()
        librosa.display.specshow(self.thresh_cqt, sr=self.sr, hop_length=self.hop_length,
                                 x_axis='time', y_axis='cqt_note', cmap='coolwarm')
        plt.ylim([librosa.note_to_hz('B2'), librosa.note_to_hz('B5')])
        plt.vlines(self.onsets[0], 0, self.sr / 2, color='k', alpha=0.8)
        plt.title("CQT")
        plt.colorbar()
        plt.show()

    def estimate_tempo(self):
        tempo, beats = librosa.beat.beat_track(y=None, sr=self.sr,
                                               onset_envelope=self.onsets[2],
                                               hop_length=self.hop_length,
                                               start_bpm=120.0,
                                               tightness=100.0,
                                               trim=True,
                                               units='frames')
        tempo = int(2 * round(tempo / 2))
        mm = MetronomeMark(referent='quarter', number=tempo)
        return tempo, beats, mm

    def generate_note(self, f0_info, n_duration, round_to_sixteenth=True):
        f0 = f0_info[0]
        a = remap(f0_info[1], self.cqt.min(), self.cqt.max(), 0, 1)
        duration = librosa.frames_to_time(n_duration, sr=self.sr, hop_length=self.hop_length)
        note_duration = 0.02 * np.around(duration / 0.02)  # Round to 2 decimal places for music21 compatibility
        midi_duration = second_to_quarter(duration, self.tempo)
        midi_velocity = int(round(remap(f0_info[1], self.cqt.min(), self.cqt.max(), 80, 120)))
        if round_to_sixteenth:
            midi_duration = round(midi_duration * 16) / 16
        try:
            if f0 is None:
                midi_note = None
                note_info = Rest(type=self.mm.secondsToDuration(note_duration).type)
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note), type=self.mm.secondsToDuration(note_duration).type)
                note.volume.velocity = midi_velocity
                note_info = [note]
        except DurationException:
            if f0 is None:
                midi_note = None
                note_info = Rest(type='32nd')
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note),
                            type='eighth')
                note.volume.velocity = midi_velocity
                note_info = [note]

        midi_info = [midi_note, midi_duration, midi_velocity]
        n = np.arange(librosa.frames_to_samples(n_duration, hop_length=self.hop_length))
        sine_wave = a * np.sin(2 * np.pi * f0 * n / float(self.sr))
        return [sine_wave, midi_info, note_info]

    def estimate_pitch(self, segment, threshold):
        freqs = librosa.cqt_frequencies(n_bins=self.n_bins, fmin=librosa.note_to_hz('C1'),
                                        bins_per_octave=12)
        if segment.max() < threshold:
            return [None, np.mean((np.amax(segment, axis=0)))]
        else:
            f0 = int(np.mean((np.argmax(segment, axis=0))))
        return [freqs[f0], np.mean((np.amax(segment, axis=0)))]

    def estimate_pitch_and_notes(self, i):
        n0 = self.onsets[1][i]
        n1 = self.onsets[1][i + 1]
        f0_info = self.estimate_pitch(np.mean(self.cqt[:, n0:n1], axis=1), threshold=self.threshold)
        return self.generate_note(f0_info, n1 - n0)

    def transcript(self):
        self.stream.append(self.mm)
        electric_guitar = instrument.fromString('grand piano')
        electric_guitar.midiChannel = 0
        electric_guitar.midiProgram = 1
        self.stream.append(electric_guitar)
        self.stream.insert(0, metadata.Metadata())
        self.stream.metadata.title = self.path.split('/')[-1]
        for note in self.note_info:
            self.stream.append(note)
        key = self.stream.analyze('key')
        print(key.name)
        # Insert Key to Stream
        self.stream.insert(0, key)

        # self.stream.show('text')

    def show_stream(self):
        self.stream.show()

    def convert_stream_to_midi(self):
        midi_file = midi.translate.streamToMidiFile(self.stream)
        midi_file.open('midi_scale.mid', 'wb')
        midi_file.write()
        midi_file.close()

        midi_file = midi.translate.streamToMidiFile(self.stream)
        filename = filedialog.asksaveasfile(initialdir="/", title="Save Midi File",
                                            filetypes=('midi files', ('*.mid', '*.midi')))
        midi_file.open(filename.name, 'wb')
        midi_file.write()
        midi_file.close()