def generate_note(self, f0_info, n_duration, round_to_sixteenth=True): f0 = f0_info[0] a = remap(f0_info[1], self.cqt.min(), self.cqt.max(), 0, 1) duration = librosa.frames_to_time(n_duration, sr=self.sr, hop_length=self.hop_length) note_duration = 0.02 * np.around(duration / 0.02) # Round to 2 decimal places for music21 compatibility midi_duration = second_to_quarter(duration, self.tempo) midi_velocity = int(round(remap(f0_info[1], self.cqt.min(), self.cqt.max(), 80, 120))) if round_to_sixteenth: midi_duration = round(midi_duration * 16) / 16 try: if f0 is None: midi_note = None note_info = Rest(type=self.mm.secondsToDuration(note_duration).type) f0 = 0 else: midi_note = round(librosa.hz_to_midi(f0)) note = Note(librosa.midi_to_note(midi_note), type=self.mm.secondsToDuration(note_duration).type) note.volume.velocity = midi_velocity note_info = [note] except DurationException: if f0 is None: midi_note = None note_info = Rest(type='32nd') f0 = 0 else: midi_note = round(librosa.hz_to_midi(f0)) note = Note(librosa.midi_to_note(midi_note), type='eighth') note.volume.velocity = midi_velocity note_info = [note] midi_info = [midi_note, midi_duration, midi_velocity] n = np.arange(librosa.frames_to_samples(n_duration, hop_length=self.hop_length)) sine_wave = a * np.sin(2 * np.pi * f0 * n / float(self.sr)) return [sine_wave, midi_info, note_info]
def test_2(): top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.25)) top.append(Rest(quarterLength=0.25)) bot.append(Note('G3', quarterLength=0.25)) bot.append(Rest(quarterLength=0.25)) return Score([top, bot])
def note_rhythm_zip(melody, note_sequence, rhythm_sequence, time_signature, interval=0.25): melody_sequence = mimic_melody(note_sequence, melody) melody_sequence = [ Note(elem.nameWithOctave, quarterLength=interval) if type(elem) is Note else Rest(quarterLength=interval) for elem in melody_sequence for i in range(0, int(elem.quarterLength / interval)) ] new_melody_sequence = [] elem = None bar_length = 0.0 # Handle notes in the melody due to bars and time signature def add_to_melody_sequence(new_melody_sequence, elem, bar_length): if type(elem) not in [Note, Rest]: pass elif bar_length + elem.quarterLength >= time_signature: extra = bar_length + elem.quarterLength - time_signature elem.quarterLength = time_signature - bar_length if elem.quarterLength > 0.0: new_melody_sequence += [elem] bar_length = extra # The possible extra note elem = Note(elem.nameWithOctave) if type(elem) is Note else Rest() elem.quarterLength = extra if elem.quarterLength > 0.0: new_melody_sequence += [elem] else: new_melody_sequence += [elem] bar_length += elem.quarterLength return (new_melody_sequence, elem, bar_length) for index, rhythm in enumerate(rhythm_sequence): if rhythm == 'Hold' and type(elem) is Note: elem.quarterLength += interval elif rhythm == 'Note': new_melody_sequence, elem, bar_length = add_to_melody_sequence( new_melody_sequence, elem, bar_length) elem = melody_sequence[index] elem.quarterLength = interval elif rhythm == 'Rest' and type(elem) is Rest: elem.quarterLength += interval elif rhythm == 'Rest' or rhythm == 'Hold': new_melody_sequence, elem, bar_length = add_to_melody_sequence( new_melody_sequence, elem, bar_length) elem = Rest() elem.quarterLength = interval new_melody_sequence, elem, bar_length = add_to_melody_sequence( new_melody_sequence, elem, bar_length) return new_melody_sequence
def test_18(): """ NB: This test is designed specifically to ensure that the _event_finder() doesn't stop processing when it doesn't find an element of the expected types at an offset. You should ask it to look for Rest objects only. """ top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.5)) top.append(Rest(quarterLength=0.5)) bot.append(TimeSignature('4/4')) bot.append(Note('G3', quarterLength=0.5)) bot.append(Rest(quarterLength=0.5)) return Score([top, bot])
def test_15(): top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.5)) top.append(Note('G4', quarterLength=0.25)) # 0.5 top.append(Rest(quarterLength=0.25)) # 0.75 top.append(Note('G4', quarterLength=0.5)) # 1.0 top.append(Note('G4', quarterLength=0.5)) # 1.5 bot.append(Note('G3', quarterLength=0.5)) bot.append(Note('G3', quarterLength=0.25)) # 0.5 bot.append(Rest(quarterLength=0.25)) # 0.75 bot.append(Rest(quarterLength=0.5)) # 1.0 bot.append(Note('G3', quarterLength=0.5)) # 1.5 return Score([top, bot])
def notate_note(note): if note['pitch'] == 'rest': n = Rest() else: if isinstance(note['pitch'], list): pitches = [] for pitch_number in note['pitch']: p = Pitch(pitch_number) # Force all flats if p.accidental.name == 'sharp': p = p.getEnharmonic() pitches.append(p) n = Chord(notes=pitches) else: p = Pitch(note['pitch']) # Force all flats if p.accidental.name == 'sharp': p = p.getEnharmonic() n = Note(p) d = Duration() if note['duration'] == 0: d.quarterLength = .125 d = d.getGraceDuration() else: # music21 docs say `fill` is for testing. I can't remember why I chose # to use it originally. It works. But not for tuplets. Maybe this blog # post contains a better solution: # http://music21-mit.blogspot.com/2015/09/durations-and-durationtuples.html d.fill(note['durations']) n.duration = d return n
def _matrix_to_part(self, submatrix): ''' Takes a submatrix of size (T, D) and turn it into a music21.stream.Part object, where T is the number of time slices, and dim is the note vector. ''' part = Part() pitches = submatrix[:, 0] articulations = submatrix[:, 1] current_note = None for current_tick in range(len(submatrix)): if articulations[current_tick]: # if articulate # append the old note if current_note is not None: # for the first note part.append(current_note) # create a new note if pitches[current_tick] < self.rest: current_note = Note() # assign pitch, inverse of self._midi_to_input() current_note.pitch.midi = pitches[current_tick] else: current_note = Rest() # resets the duration to the smallest amount current_note.duration.quarterLength = self.unit_length else: current_note.duration.quarterLength += self.unit_length return part
def generate_sine_midi_note(f0_info, sr, n_duration): f0 = f0_info[0] A = remap(f0_info[1], CdB.min(), CdB.max(), 0, 1) duration = librosa.frames_to_time(n_duration, sr=fs, hop_length=hop_length) # Generate music21 note note_duration = 0.02 * np.around( duration / 2 / 0.02) # Round to 2 decimal places for music21 compatibility midi_velocity = int(round(remap(f0_info[1], CdB.min(), CdB.max(), 0, 127))) if f0 == None: try: note_info = Rest(type=mm.secondsToDuration(note_duration).type) except DurationException: note_info = None f0 = 0 else: midi_note = round(librosa.hz_to_midi(f0)) try: note = Note(midi_note, type=mm.secondsToDuration(note_duration).type) note.volume.velocity = midi_velocity note_info = [note] except DurationException: note_info = None if note_info is None: return None # Generate Sinewave n = np.arange(librosa.frames_to_samples(n_duration, hop_length=hop_length)) sine_wave = A * np.sin(2 * np.pi * f0 * n / float(sr)) return [sine_wave, note_info]
def decode_score(encoding, num_measures, ts, image=False): score = Stream() score.timeSignature = TimeSignature(ts) steps_per_measure = len(encoding) / num_measures measure_ind = 0 while measure_ind < num_measures: start_beat = int(measure_ind * steps_per_measure) end_beat = int((measure_ind + 1) * steps_per_measure) measure = Measure() for beat_ind in range(start_beat, end_beat): if image: played_pitches = np.nonzero(encoding[beat_ind])[0] else: played_pitches = np.nonzero(encoding[beat_ind]) if len(played_pitches) == 0: measure.append(Rest(quarterLength=4.0 / GRANULARITY)) else: played_notes = [ midi_to_note(int(pitch + MIN_PITCH)) for pitch in played_pitches ] chord = Chord(played_notes, quarterLength=4.0 / GRANULARITY) measure.append(chord) score.append(measure) measure_ind += 1 return score
def notate_score(musician_names, instrument_names, music): score = Score() for musician_name, instrument_name in zip(musician_names, instrument_names): instrument = get_instrument(instrument_name) instrument.partName = instrument.instrumentName instrument.partAbbreviation = instrument.instrumentAbbreviation parts = [] part = Part() parts.append(part) part.insert(0, instrument) score.insert(0, part) score.insert(0, StaffGroup(parts)) notes = music[musician_name] for pitches in notes: if not pitches or pitches == 'stop': note = Rest() elif len(pitches) == 1: pitch = Pitch(pitches[0] + 60) note = Note(pitch) else: note = Chord(notes=[Pitch(p + 60) for p in pitches]) duration = Duration() duration.fill([4.0]) note.duration = duration part.append(note) score.show('musicxml', '/Applications/Sibelius 7.5.app')
def to_musicxml(sc_enc): "Converts Chord tuples (see chorales.prepare_poly) to musicXML" timestep = Duration(1. / FRAMES_PER_CROTCHET) musicxml_score = Stream() prev_chord = dict( ) # midi->(note instance from previous chord), used to determine tie type (start, continue, stop) for has_fermata, chord_notes in sc_enc: notes = [] if len(chord_notes) == 0: # no notes => rest for this frame r = Rest() r.duration = timestep musicxml_score.append(r) else: for note_tuple in chord_notes: note = Note() if has_fermata: note.expressions.append(expressions.Fermata()) note.midi = note_tuple[0] if note_tuple[1]: # current note is tied note.tie = Tie('stop') if prev_chord and note.pitch.midi in prev_chord: prev_note = prev_chord[note.pitch.midi] if prev_note.tie is None: prev_note.tie = Tie('start') else: prev_note.tie = Tie('continue') notes.append(note) prev_chord = {note.pitch.midi: note for note in notes} chord = Chord(notes=notes, duration=timestep) if has_fermata: chord.expressions.append(expressions.Fermata()) musicxml_score.append(chord) return musicxml_score
def test_14(): top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.0625)) top.append(Note('G4', quarterLength=0.0625)) # 0.0625 top.append(Note('G4', quarterLength=0.0625)) # 0.125 top.append(Note('G4', quarterLength=0.0625)) # 0.1875 top.append(Note('G4', quarterLength=0.0625)) # 0.25 top.append(Note('G4', quarterLength=0.0625)) # 0.3125 top.append(Note('G4', quarterLength=0.0625)) # 0.375 top.append(Note('G4', quarterLength=0.0625)) # 0.4375 top.append(Note('G4', quarterLength=0.5)) # 0.5 bot.append(Note('G3', quarterLength=0.125)) bot.append(Rest(quarterLength=0.125)) # 0.125 bot.append(Note('A3', quarterLength=0.125)) # 0.25 bot.append(Rest(quarterLength=0.0625)) # 0.375 bot.append(Rest(quarterLength=0.0625)) # 0.4375 bot.append(Note('G3', quarterLength=0.5)) # 0.5 return Score([top, bot])
def test_19(): """ NB: This test is designed specifically to ensure that the _event_finder() finds Rest objects when the happen at the same time as Note objects, when only Rest objects are requested to be found. """ top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.5)) top.append(Note('G5', quarterLength=0.5)) bot.append(Note('G3', quarterLength=0.5)) bot.append(Rest(quarterLength=0.5)) return Score([top, bot])
def generate_notes_in_batch(note_params_df, output_dir, audio_format='flac', sample_rate=44100): """ Generates a batch of single note samples from the given table of parameters. `note_params_df` - a Pandas Dataframe with columns: `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note. `output_dir` - output directory for the MIDI files Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a """ os.makedirs(output_dir, exist_ok=True) fs = FluidSynth(sample_rate=sample_rate) stream = Stream() for i, row in note_params_df.iterrows(): stream.append(MetronomeMark(number=row['tempo'])) stream.append(make_instrument(int(row['midi_instrument']))) duration = row['duration'] stream.append( chord_with_volume( Chord([ Note(midi=int(row['midi_number']), duration=Duration(duration)) ]), row['volume'])) stream.append(Rest(duration=Duration(2 * duration))) midi_file = '{0}/all_samples.midi'.format(output_dir) audio_file_stereo = '{0}/all_samples_stereo.{1}'.format( output_dir, audio_format) audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format) audio_index_file = '{0}/all_samples_index.csv'.format(output_dir) # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!! # The parts should be split according to an index. audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate) audio_index.to_csv(audio_index_file) write_midi(stream, midi_file) fs.midi_to_audio(midi_file, audio_file_stereo) convert_to_mono(audio_file_stereo, audio_file) os.remove(audio_file_stereo) x, sample_rate = sf.read(audio_file) parts = split_audio_to_parts(x, sample_rate, audio_index) store_parts_to_files(parts, sample_rate, output_dir, audio_format)
def test_17(): top, bot = _setup_parts() top.append(Note('G4', quarterLength=0.5)) top.append(Note('A4', quarterLength=0.75)) # 0.5 top.append(Note('F4', quarterLength=0.75)) # 1.25 top.append(Note('E4', quarterLength=0.5)) # 2.0 bot.append(Note('G3', quarterLength=0.5)) bot.append(Note('A3', quarterLength=0.25)) # 0.5 bot.append(Note('F3', quarterLength=0.375)) # 0.75 bot.append(Rest(quarterLength=0.25)) # 1.125 bot.append(Note('G3', quarterLength=0.625)) # 1.375 bot.append(Note('G3', quarterLength=0.5)) # 2.0 return Score([top, bot])
def make_music21_note( pitch_number=None, duration=1.0, staccato=False, tenuto=False, accent=False, falloff=False, plop=False, scoop=False, doit=False, breath_mark=False, ): if pitch_number == None or pitch_number == 'rest': n = Rest() elif isinstance(pitch_number, list): pitches = [Pitch(p) for p in pitch_number] for p in pitches: if p.accidental.name is 'natural': p.accidental = None n = Chord(pitches) else: p = Pitch(pitch_number) if p.accidental.name is 'natural': p.accidental = None n = Note(p) d = Duration() d.quarterLength = duration n.duration = d if staccato: n.articulations.append(Staccato()) if tenuto: n.articulations.append(Tenuto()) if accent: n.articulations.append(Accent()) if falloff: n.articulations.append(Falloff()) if plop: n.articulations.append(Plop()) if scoop: n.articulations.append(Scoop()) if doit: n.articulations.append(Doit()) if breath_mark: n.articulations.append(BreathMark()) return n
def add_to_melody_sequence(new_melody_sequence, elem, bar_length): if type(elem) not in [Note, Rest]: pass elif bar_length + elem.quarterLength >= time_signature: extra = bar_length + elem.quarterLength - time_signature elem.quarterLength = time_signature - bar_length if elem.quarterLength > 0.0: new_melody_sequence += [elem] bar_length = extra # The possible extra note elem = Note(elem.nameWithOctave) if type(elem) is Note else Rest() elem.quarterLength = extra if elem.quarterLength > 0.0: new_melody_sequence += [elem] else: new_melody_sequence += [elem] bar_length += elem.quarterLength return (new_melody_sequence, elem, bar_length)
def add_piano_riff(roman, duration, piano, show_symbols=False): '''Given a Roman chord, duration in eighths/quavers and a keyboard part, generate a riff and add it to the keyboard part''' # Add a chord symbol at the start symbol = ChordSymbol(chordSymbolFigureFromChord(roman)) if show_symbols: print symbol piano.append(symbol) # Add the actual notes filled = 0 while filled < duration: # NOTE: higher chance to rest if on beat = more syncopated rhythm to piano if random.randint(0, 1 + filled%2 + filled%4): # XXX: Must deepcopy, do not change original or it will break bassline chord = Chord(deepcopy(roman.pitches)) # invert chord randomly, root inversion twice as likely as others max_inv=len(chord.pitches) chord.inversion(random.randint(0,max_inv)%max_inv) # TODO try randomly ommitting some chord notes # Randomly hold notes for longer if we have longer before # the next chord change max_length = min(duration-filled, 4) # Cap at 1/2 bar length = random.randint(1,max_length) chord.quarterLength = length/2.0 # length is in eighths # Add an extra root note 1 octave lower root = deepcopy(chord.root()) root.octave -= 1 chord.add(root) # Add the chord at soft volume and update duration chord.volume = Volume(velocity=16,velocityIsRelative=False) piano.append(chord) filled += length else: piano.append(Rest(quarterLength=0.5)) filled += 1
def flatten_equalised_parts(song, interval=0.25): # Only look at notes, and rests (not e.g. chords, time signature, key signature, page layouts) parts = [[[elem for elem in bar if type(elem) in [Note, Rest]] for bar in part.getElementsByClass('Measure')] for part in song.getElementsByClass('Part')] # Fix melodies with errors for index, _ in enumerate(parts[0]): # Get the current bars bars = [equalise_interval(part[index], interval) for part in parts] bar_length = max([len(bar) for bar in bars]) # Fix bars which for some reason don't fill the whole bar # Add rests instead bars = [ bar + [Rest(interval * (bar_length - len(bar)))] for bar in bars ] for i, part in enumerate(parts): part[index] = bars[i] return parts
def get_notes_from_sequence(self, note_sequence): """ Converts an array of MIDI represented notes to an array of music21.note objects. :param note_sequence: MIDI represented note sequence. :return: array of Note and Rest objects. """ note_objects = [] notes = self.count_note_events(note_sequence) for pair in notes: if pair[0] == 128: note_objects.append( Rest(quarterLength=pair[1] * self.T_STEP_LENGTH)) else: note_objects.append( Note(pair[0], quarterLength=pair[1] * self.T_STEP_LENGTH)) return note_objects
def mimic_melody(note_sequence, melody): melody_sequence = [] queue = collections.deque(note_sequence) for elem in melody: if type(elem) is Note: melody_sequence += [ Note(queue.popleft(), quarterLength=elem.quarterLength) ] elif type(elem) is Rest: melody_sequence += [Rest(quarterLength=elem.quarterLength)] for index, elem in enumerate(melody_sequence): if type(elem) is Rest: if index > 0 and type(melody_sequence[index - 1]) is Note: melody_sequence[index] = Note( melody_sequence[index - 1].nameWithOctave, quarterLength=elem.quarterLength) elif index < len(melody_sequence) - 1 and type( melody_sequence[index + 1]) is Note: melody_sequence[index] = Note( melody_sequence[index + 1].nameWithOctave, quarterLength=elem.quarterLength) return melody_sequence
# A aMeasureA0 = stream.Measure() aMeasureA0.append(Chord([Kick(duration=duration.Duration(1.0))])) aMeasureA0.append(Snare(duration=duration.Duration(0.5))) aMeasureA0.append(Snare(duration=duration.Duration(0.5))) aMeasureA0.append(Snare(duration=duration.Duration(0.5))) aMeasureA0.append(Snare(duration=duration.Duration(0.5))) aPart0.repeatAppend(aMeasureA0, 3) aMeasureA3 = stream.Measure() aMeasureA3.append(Chord([Kick(duration=duration.Duration(1.0))])) aNote0 = Snare(duration=duration.Duration(1.0)) aNote0.articulations = [articulations.Accent()] aMeasureA3.append(aNote0) #aMeasureA3.append(Snare(duration=duration.Duration(1.0), articulations=[articulations.StrongAccent()])) aMeasureA3.append(Rest(duration=duration.Duration(0.5))) aMeasureA3.append(Snare(duration=duration.Duration(0.5))) aPart0.append(aMeasureA3) # B aMeasureB0 = stream.Measure() aMeasureB0.append(Chord([Kick(duration=duration.Duration(1.0))])) aNote0 = Snare(duration=duration.Duration(1.0)) aNote0.articulations = [articulations.Accent()] aMeasureB0.append(aNote0) aMeasureB0.append(Snare(duration=duration.Duration(0.5))) aMeasureB0.append(Snare(duration=duration.Duration(0.5))) aPart0.append(aMeasureB0) aMeasureB1 = stream.Measure() aMeasureB1.append(Chord([Kick(duration=duration.Duration(1.0))]))
def __init__(self, ranges=False): score = self.score = Score() self.instruments = self.i = Instruments() self.parts = Parts(self.i) # Make Metadata timestamp = datetime.datetime.utcnow() metadata = Metadata() metadata.title = 'Early Montreal' metadata.composer = 'Jonathan Marmor' metadata.date = timestamp.strftime('%Y/%m/%d') score.insert(0, metadata) [score.insert(0, part) for part in self.parts.l] score.insert(0, StaffGroup(self.parts.l)) if ranges: # Don't make a piece, just show the instrument ranges for inst, part in zip(self.instruments.l, self.parts.l): measure = Measure() measure.timeSignature = TimeSignature('4/4') low = Note(inst.lowest_note) measure.append(low) high = Note(inst.highest_note) measure.append(high) part.append(measure) return # 18 to 21 minutes piece_duration_minutes = scale(random.random(), 0, 1, 18, 21) # Make the "songs" songs = [] total_minutes = 0 n = 1 while total_minutes < piece_duration_minutes: print 'Song {}'.format(n) n += 1 song = Song(self) songs.append(song) total_minutes += song.duration_minutes # Make notation previous_duration = None for song in songs: for bar in song.bars: for part in bar.parts: measure = Measure() if bar.tempo: measure.insert( 0, MetronomeMark(number=bar.tempo, referent=Duration(1))) measure.leftBarline = 'double' if bar.duration != previous_duration: ts = TimeSignature('{}/4'.format(bar.duration)) measure.timeSignature = ts # Fix Durations durations = [note['duration'] for note in part['notes']] components_list = split_at_beats(durations) components_list = [ join_quarters(note_components) for note_components in components_list ] for note, components in zip(part['notes'], components_list): note['durations'] = components for note in part['notes']: if note['pitch'] == 'rest': n = Rest() if isinstance(note['pitch'], list): pitches = [] for pitch_number in note['pitch']: p = Pitch(pitch_number) # Force all flats if p.accidental.name == 'sharp': p = p.getEnharmonic() pitches.append(p) n = Chord(notes=pitches) # TODO add slurs # TODO add glissandos # TODO add -50 cent marks else: p = Pitch(note['pitch']) # Force all flats if p.accidental.name == 'sharp': p = p.getEnharmonic() n = Note(p) # TODO add slurs # TODO add glissandos # TODO add -50 cent marks d = Duration() if note['duration'] == 0: d.quarterLength = .5 d = d.getGraceDuration() else: d.fill(note['durations']) n.duration = d measure.append(n) self.parts.d[part['instrument_name']].append(measure) previous_duration = bar.duration
def add_rests(proportion, population): while not is_proportion_in_range(proportion, population): random_index = random.randint(1, len(population) - 2) population[random_index].music_note = Rest()
def melody_and_chords_streams(self) -> Tuple[Stream, Stream]: """ The chord stream contains realized chords and chord symbols and rests for NC :return: """ melody = Stream() chord_dict = defaultdict(list) measure_duration = None for measure_idx, measure in enumerate( self.ls.recurse().getElementsByClass(Measure)): if measure_duration is None: measure_duration = measure.duration.quarterLength else: if measure_duration != measure.duration.quarterLength: raise WrongBarDurationError() mel_measure = measure.cloneEmpty() if measure_idx == 0: anacrusis = measure.barDuration.quarterLength - measure.duration.quarterLength if anacrusis: mel_measure.append(Rest(duration=Duration(anacrusis))) for elt in measure: if elt.isClassOrSubclass((ChordSymbol, )): chord_dict[measure_idx].append(elt) else: mel_measure.append(deepcopy(elt)) melody.append(mel_measure) chords = deepcopy(melody) clef = None for _clef in chords.recurse().getElementsByClass(Clef): clef = _clef break if clef: clef.activeSite.insert(0, BassClef()) clef.activeSite.remove(clef) last_chord_symbol = None for measure_idx, measure in enumerate( chords.getElementsByClass(Measure)): original_measure_duration = measure.duration.quarterLength measure.removeByClass([Rest, Note]) if chord_dict[measure_idx]: beats = [floor(ch.beat) for ch in chord_dict[measure_idx]] \ + [1 + original_measure_duration] durations = [(beats[i + 1] - beats[i]) for i in range(len(beats) - 1)] if beats[0] > 1: if last_chord_symbol is None: measure.insert(0, Rest(duration=Duration(beats[0] - 1))) else: _cs = deepcopy(last_chord_symbol) _cs.duration = Duration(beats[0] - 1) measure.insert(0, _cs) for chord_symbol_idx, chord_symbol in enumerate( chord_dict[measure_idx]): chord_symbol.duration = Duration( durations[chord_symbol_idx]) measure.insert(beats[chord_symbol_idx] - 1, chord_symbol) last_chord_symbol = chord_symbol else: if last_chord_symbol is None: measure.insert( 0, Rest(duration=Duration(original_measure_duration))) else: _cs = deepcopy(last_chord_symbol) _cs.duration = Duration(original_measure_duration) measure.insert(0, _cs) return melody, chords
def tensors_to_stream(outputs, config, metadata=None): cur_measure_number = 0 parts = {} for part_name in outputs.keys(): if part_name == 'extra': continue part = Part(id=part_name) parts[part_name] = part last_time_signature = None cur_time_signature = '4/4' for step in range(outputs['soprano'].shape[0]): extra = outputs['extra'][step] if extra[indices_extra['has_time_signature_3/4']].item() == 1: cur_time_signature = '3/4' elif extra[indices_extra['has_time_signature_4/4']].item() == 1: cur_time_signature = '4/4' elif extra[indices_extra['has_time_signature_3/2']].item() == 1: cur_time_signature = '3/2' cur_time_pos = extra[indices_extra['time_pos']].item() has_fermata = extra[indices_extra['has_fermata']].item() == 1 if cur_time_pos == 1.0 or cur_measure_number == 0: for part_name, part in parts.items(): part.append(Measure(number=cur_measure_number)) if cur_measure_number == 0: if part_name in ['soprano', 'alto']: part[-1].append(clef.TrebleClef()) else: part[-1].append(clef.BassClef()) key = int( torch.argmax( outputs['extra'][0, indices_extra['has_sharps_0']: indices_extra['has_sharps_11'] + 1], dim=0).item()) if key >= 6: key -= 12 part[-1].append(KeySignature(key)) part[-1].append(MetronomeMark(number=90)) cur_measure_number += 1 if last_time_signature is None or cur_time_signature != last_time_signature: for part in parts.values(): part[-1].append(TimeSignature(cur_time_signature)) last_time_signature = cur_time_signature for part_name, part in parts.items(): idx = torch.argmax(outputs[part_name][step]).item() if idx == indices_parts['is_continued']: try: last_element = part[-1].flat.notesAndRests[-1] cur_element = deepcopy(last_element) if last_element.tie is not None and last_element.tie.type == 'stop': last_element.tie = Tie('continue') else: last_element.tie = Tie('start') cur_element.tie = Tie('stop') except IndexError: logging.debug( 'Warning: "is_continued" on first beat. Replaced by rest.' ) cur_element = Rest(quarterLength=config.time_grid) part[-1].append(cur_element) elif idx == indices_parts['is_rest']: part[-1].append(Rest(quarterLength=config.time_grid)) else: pitch = Pitch() part[-1].append(Note(pitch, quarterLength=config.time_grid)) # Set pitch value AFTER appending to measure in order to avoid unnecessary accidentals pitch.midi = idx + min_pitches[part_name] - len(indices_parts) if has_fermata: for part in parts.values(): fermata = Fermata() fermata.type = 'upright' part[-1][-1].expressions.append(fermata) score = Score() if metadata is not None: score.append(Metadata()) score.metadata.title = f"{metadata.title} ({metadata.number})" score.metadata.composer = f"Melody: {metadata.composer}\nArrangement: BachNet ({datetime.now().year})" for part in parts.values(): part[-1].rightBarline = 'light-heavy' score.append(parts['soprano']) if 'alto' in parts: score.append(parts['alto']) score.append(parts['tenor']) score.append(parts['bass']) score.stripTies(inPlace=True, retainContainers=True) return score
def get_midi_stream_1(): part1 = [Rest(), Rest(), Note('E-'), Rest()] part2 = [Rest(), Rest(), Note('A-'), Rest()] part3 = [Note('B-'), Rest(), Note('E-'), Rest()] part4 = [Note('B-'), Rest(), Note('A-'), Rest()] part5 = [Note('B-'), Rest(), Rest(), Rest()] part6 = [Note('G'), Rest(), Note('C'), Rest()] part7 = [Note('D'), Rest(), Note('E-'), Rest()] stream_instance = Stream() stream_instance.append(deepcopy(part1)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part3)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part3)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part4)) stream_instance.append(deepcopy(part5)) stream_instance.append(deepcopy(part1)) stream_instance.append(deepcopy(part6)) stream_instance.append(deepcopy(part7)) stream_instance.append(deepcopy(part6)) stream_instance.append(deepcopy(part7)) return stream_instance
def populate_measures(song): # Initial values time_signature_length = util.time_signature(song) seen_length = 0 bars = [] current_bar = [] # Helper functions # Appends an item to a bar def append_bar(current_bar, seen_length, item): current_bar += [item] seen_length += item.duration.quarterLength return (current_bar, seen_length) # Checks to see if the item finishes the bar def check_bar(bars, current_bar, seen_length): if seen_length >= time_signature_length: bars += [current_bar] current_bar = [] seen_length = 0 return (bars, current_bar, seen_length) # Finds the notes def find_bars(part): if isinstance(part, Iterable): for item in part: if type(item) is Note: return part else: res = find_bars(item) if res is not None: return res return None # Find the part which has the notes part = find_bars(song) # Search through each item in the bar for item in part: if type(item) is Note: # Note (current_bar, seen_length) = append_bar(current_bar, seen_length, item) (bars, current_bar, seen_length) = check_bar(bars, current_bar, seen_length) elif type(item) is Rest: # Rest (current_bar, seen_length) = append_bar(current_bar, seen_length, item) (bars, current_bar, seen_length) = check_bar(bars, current_bar, seen_length) # LilyPond might forget a rest at the end if time_signature_length - seen_length > 0: (current_bar, seen_length) = append_bar( current_bar, seen_length, Rest(quarterLength=time_signature_length - seen_length)) (bars, current_bar, seen_length) = check_bar(bars, current_bar, seen_length) # Populate song.elements, which is where the items should have been song.elements = [] for bar in bars: measure = Measure() for n in bar: measure.append(copy.deepcopy(n)) song.append(measure) return song
def append_rest(part, duration): part.append(Rest(quarterLength=duration))
def parse_elem(elem, i): return Rest(quarterLength=interval ) if type(elem) is Rest else elem if i == 0 else Hold()