def testVerify(): s1 = Stream(converter.parse("tinyNotation: d1 a g f e d f e d'")) s2 = Stream(converter.parse("tinyNotation: d'1 c' b- a g f a c'# d'")) biggerStream = Stream() biggerStream.append(stream.Part(s1)) biggerStream.append(stream.Part(s2)) #biggerStream.show() verifyCounterpointVerbose(s1, s2)
def getChordSequence(self): s = Stream() for part in self.getParts(): # type: Part for elt in part.recurse().getElementsByClass( ChordSymbol): # type: ChordSymbol s.insert(elt.getOffsetInHierarchy(part), copy(elt)) return s
def decode_score(encoding, num_measures, ts, image=False): score = Stream() score.timeSignature = TimeSignature(ts) steps_per_measure = len(encoding) / num_measures measure_ind = 0 while measure_ind < num_measures: start_beat = int(measure_ind * steps_per_measure) end_beat = int((measure_ind + 1) * steps_per_measure) measure = Measure() for beat_ind in range(start_beat, end_beat): if image: played_pitches = np.nonzero(encoding[beat_ind])[0] else: played_pitches = np.nonzero(encoding[beat_ind]) if len(played_pitches) == 0: measure.append(Rest(quarterLength=4.0 / GRANULARITY)) else: played_notes = [ midi_to_note(int(pitch + MIN_PITCH)) for pitch in played_pitches ] chord = Chord(played_notes, quarterLength=4.0 / GRANULARITY) measure.append(chord) score.append(measure) measure_ind += 1 return score
def __init__(self, path): self.path = path self.nfft = 2048 self.overlap = 0.5 self.hop_length = int(self.nfft * (1 - self.overlap)) self.n_bins = 72 self.mag_exp = 4 self.pre_post_max = 6 self.threshold = -71 self.audio_sample, self.sr = self.load() self.cqt = self.compute_cqt() self.thresh_cqt = self.compute_thresholded_cqt(self.cqt) self.onsets = self.compute_onset(self.thresh_cqt) self.tempo, self.beats, self.mm = self.estimate_tempo() self.music_info = np.array([ self.estimate_pitch_and_notes(i) for i in range(len(self.onsets[1]) - 1) ]) self.note_info = list(self.music_info[:, 2]) self.stream = Stream()
def value_list_to_midi(value_list): length = value_list.get(LENGTH) num_of_parts = value_list.get(NUM_OF_PARTS) parts = [] for _ in range(num_of_parts): parts.append(get_part(length, value_list)) stream = Stream(parts) return stream
def main(): parser = get_cmd_line_parser(description=__doc__) ParserArguments.filename(parser) ParserArguments.tempo(parser) ParserArguments.framerate(parser) ParserArguments.set_defaults(parser) ParserArguments.best(parser) args = parser.parse_args() defaults.framerate = args.framerate song = Stream() roots = 'ABCDEFG' scales = [scale.MajorScale, scale.MinorScale, scale.WholeToneScale, scale.ChromaticScale] print('Choosing a random scale from Major, Minor, Whole Tone, Chromatic.') rscale = random.choice(scales)(Pitch(random.choice(roots))) print('Using: %s' % rscale.name) print('Generating a score...') random_note_count = 50 random_note_speeds = [0.5, 1] print('100 Random 1/8th and 1/4th notes in rapid succession...') for i in range(random_note_count): note = Note(random.choice(rscale.pitches)) note.duration.quarterLength = random.choice(random_note_speeds) song.append(note) scale_practice_count = 4 print('Do the scale up and down a few times... maybe %s' % scale_practice_count) rev = rscale.pitches[:] rev.reverse() updown_scale = rscale.pitches[:] updown_scale.extend(rev[1:-1]) print('updown scale: %s' % updown_scale) for count, pitch in enumerate(cycle(updown_scale)): print(' note %s, %s' % (count, pitch)) song.append(Note(pitch)) if count >= scale_practice_count * len(updown_scale): break print('Composition finished:') song.show('txt') if args.best: print('Audifying the song to file "{}"...') wave = audify_to_file(song, args.tempo, args.filename, verbose=True) else: wave = audify_basic(song, args.tempo, verbose=True) print('Writing Song to file "{}"...'.format(args.filename)) with wav_file_context(args.filename) as fout: fout.write_frames(wave.frames) return 0
def test(): from music21.stream import Stream n1 = music21.note.Note() n1.name = "E" n1.duration.type = "half" n3 = music21.note.Note() n3.name = "D" n3.duration.type = "half" n2 = music21.note.Note() n2.name = "C#" n2.octave = 5 n2.duration.type = "half" n4 = n3.clone() n4.octave = 5 st1 = Stream() st2 = Stream() st1.append([n1, n3]) st2.append([n2, n4]) staff1 = LilyStaff() staff1.appendElement(st1) staff2 = LilyStaff() staff2.appendElement(st2) vs1 = LilyVoiceSection(staff2, staff1) vs1.prependTimeSignature("2/2") isStaff2 = vs1.firstContents("staff") assert isStaff2 is staff2, "first staff in Voice Section should be staff2" s1 = LilyScore(vs1, LilyLayout(), LilyMidi()) lf1 = LilyFile(s1) isStaff2 = lf1.firstContents("staff") assert isStaff2 is staff2, "first staff in File should be staff2" print(lf1) if lf1: lf1.showPNGandPlayMIDI() print(lf1.midiFilename)
def create_note_stream(self, notes_sequence): """ Creates a music21.stream.Stream object to which notes are added. :param notes_sequence: sequence of notes to add in a stream. :return: a Stream of Note objects. """ notes_arr = self.get_notes_from_sequence(notes_sequence) stream = Stream() for note in notes_arr: stream.append(note) return stream
def compose_repository_song(repo_data): vprint('Composing a song using the data from your Git Repository...') song = Stream() scale = MajorScale('%s4' % random.choice('ABCDEFG')) print('Using Scale: %s' % scale) clips, phrases = phrasify(repo_data, scale) for sha in repo_data: for clip in phrases[hash(sha)]: for note in clips[clip]: song.append(note) return song
def _realizeM21Sequence(self, chords): s = Stream() offset = 0 # step through the template and add notes to stream for chord in chords: duration = chord.getDuration() for pitch in chord.getPitchSet(): n = Note(pitch) n.duration.quarterLength = duration s.insert(offset, n) offset += duration return s
def _realizeM21Sequence(self, notes): s = Stream() offset = 0 # step through the backbone notes and add notes to stream for note in notes: duration = 1 pitch = note.getPitch() n = m21Note.Note(pitch) n.duration.quarterLength = duration s.insert(offset, n) offset += duration return s
def show_sequence(chord_sequence): stream = Stream() chord_names = [chord.standard_name for chord in chord_sequence] print(chord_names) chord_sequence = [chord_sequence[0], *chord_sequence] # to solve a music21 problem for extended_chord in chord_sequence: chord = Chord(notes=extended_chord.components, type='whole') stream.append(chord) stream.show() stream.show('midi')
def get_midi_stream_1(): part1 = [Rest(), Rest(), Note('E-'), Rest()] part2 = [Rest(), Rest(), Note('A-'), Rest()] part3 = [Note('B-'), Rest(), Note('E-'), Rest()] part4 = [Note('B-'), Rest(), Note('A-'), Rest()] part5 = [Note('B-'), Rest(), Rest(), Rest()] part6 = [Note('G'), Rest(), Note('C'), Rest()] part7 = [Note('D'), Rest(), Note('E-'), Rest()] stream_instance = Stream() stream_instance.append(deepcopy(part1)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part3)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part3)) stream_instance.append(deepcopy(part2)) stream_instance.append(deepcopy(part4)) stream_instance.append(deepcopy(part5)) stream_instance.append(deepcopy(part1)) stream_instance.append(deepcopy(part6)) stream_instance.append(deepcopy(part7)) stream_instance.append(deepcopy(part6)) stream_instance.append(deepcopy(part7)) return stream_instance
def melody_and_chords_streams(self) -> Tuple[Stream, Stream]: """ The chord stream contains realized chords and chord symbols and rests for NC :return: """ melody = Stream() chord_dict = defaultdict(list) measure_duration = None for measure_idx, measure in enumerate( self.ls.recurse().getElementsByClass(Measure)): if measure_duration is None: measure_duration = measure.duration.quarterLength else: if measure_duration != measure.duration.quarterLength: raise WrongBarDurationError() mel_measure = measure.cloneEmpty() if measure_idx == 0: anacrusis = measure.barDuration.quarterLength - measure.duration.quarterLength if anacrusis: mel_measure.append(Rest(duration=Duration(anacrusis))) for elt in measure: if elt.isClassOrSubclass((ChordSymbol, )): chord_dict[measure_idx].append(elt) else: mel_measure.append(deepcopy(elt)) melody.append(mel_measure) chords = deepcopy(melody) clef = None for _clef in chords.recurse().getElementsByClass(Clef): clef = _clef break if clef: clef.activeSite.insert(0, BassClef()) clef.activeSite.remove(clef) last_chord_symbol = None for measure_idx, measure in enumerate( chords.getElementsByClass(Measure)): original_measure_duration = measure.duration.quarterLength measure.removeByClass([Rest, Note]) if chord_dict[measure_idx]: beats = [floor(ch.beat) for ch in chord_dict[measure_idx]] \ + [1 + original_measure_duration] durations = [(beats[i + 1] - beats[i]) for i in range(len(beats) - 1)] if beats[0] > 1: if last_chord_symbol is None: measure.insert(0, Rest(duration=Duration(beats[0] - 1))) else: _cs = deepcopy(last_chord_symbol) _cs.duration = Duration(beats[0] - 1) measure.insert(0, _cs) for chord_symbol_idx, chord_symbol in enumerate( chord_dict[measure_idx]): chord_symbol.duration = Duration( durations[chord_symbol_idx]) measure.insert(beats[chord_symbol_idx] - 1, chord_symbol) last_chord_symbol = chord_symbol else: if last_chord_symbol is None: measure.insert( 0, Rest(duration=Duration(original_measure_duration))) else: _cs = deepcopy(last_chord_symbol) _cs.duration = Duration(original_measure_duration) measure.insert(0, _cs) return melody, chords
def testGen(): s1 = Stream(converter.parse("tinyNotation: d1 a g f e d f e d'")) s2 = generateMelody(s1) for slist in s2: print(slist)