def main(): sample = [sample_note_sequence2] v_sample = vectorize(sample_note_sequence2) encoder = SequenceEncoder(n_time_shift_events=125, n_velocity_events=32) assert encoder.n_events == 413 encoded = encoder.encode_sequences([v_sample]) decoded = encoder.decode_sequences(encoded) original_seq = sorted(sample[0], key=lambda x: x.start) decoded_seq = sorted(decoded[0], key=lambda x: x.start) for o, d in zip(original_seq, decoded_seq): try: assert o.start == d.start assert o.end == d.end assert o.pitch == d.pitch assert o.velocity == d.velocity except AssertionError: print("Encoding/Decoding error detected!") print("Original note:") print(o) print("Decoded encoded note:") print(d) print('************') print("Successful encoding and decoding of sequence!")
def run(self): midis = self.parse_files(chdir=True) #midis -> PrettyMidi total_time = sum([m.get_end_time() for m in midis]) print("\n{} midis read, or {:.1f} minutes of music"\ .format(len(midis), total_time/60)) print("Done MIDIS read") note_sequences = self.get_note_sequences(midis) del midis #vectorize note sequences note_sequences = [vectorize(ns) for ns in note_sequences] print("{} note sequences extracted\n".format(len(note_sequences))) self.note_sequences = self.partition(note_sequences) for mode, sequences in self.note_sequences.items(): print(f"Processing {mode} data...") print(f"{len(sequences):,} note sequences") if mode == "training": sequences = self.stretch_note_sequences(sequences) print(f"{len(sequences):,} stretched note sequences") samples = self.split_sequences(sequences) self.quantize(samples) print(f"{len(samples):,} quantized, split samples") self.split_samples[mode] = samples self.encoded_sequences[mode] = self.encoder.encode_sequences( samples) print(f"Encoded {mode} sequences!\n")
def run(self): """ Main pipeline call...parse midis, split into test and validation sets, augment, quantize, sample, and encode as event sequences. """ midis = self.parse_files(chdir=True) total_time = sum([m.get_end_time() for m in midis]) print("\n{} midis read, or {:.1f} minutes of music"\ .format(len(midis), total_time/60)) note_sequences = self.get_note_sequences(midis) del midis #vectorize note sequences note_sequences = [vectorize(ns) for ns in note_sequences] print("{} note sequences extracted\n".format(len(note_sequences))) self.note_sequences = self.partition(note_sequences) for mode, sequences in self.note_sequences.items(): print(f"Processing {mode} data...") print(f"{len(sequences):,} note sequences") if mode == "training": sequences = self.stretch_note_sequences(sequences) print(f"{len(sequences):,} stretched note sequences") samples = self.split_sequences(sequences) self.quantize(samples) print(f"{len(samples):,} quantized, split samples") if mode == "training": samples = self.transpose_samples(samples) print(f"{len(samples):,} transposed samples") self.split_samples[mode] = samples self.encoded_sequences[mode] = self.encoder.encode_sequences(samples) print(f"Encoded {mode} sequences!\n")
def read(midis, n_velocity_events=32, n_time_shift_events=125): note_sequence = [] i = 0 for m in midis: if m.instruments[0].program == 0: piano_data = m.instruments[0] else: raise PreprocessingError("Non-piano midi detected") note_sequence = self.apply_sustain(piano_data) note_sequence = sorted(note_sequence, key=lambda x: (x.start, x.pitch)) note_sequences.append(note_sequence) live_notes = {} while i < len(midis): info, time_delta = midis[i] if i == 0: #start time tracking from zero time = 0 else: #shift forward time = time + time_delta pitch = info[1] velocity = info[2] if velocity > 0: #(pitch (on), velocity, start_time (relative) live_notes.update({pitch: (velocity, time)}) #how to preserve info ...? else: note_info = live_notes.get(pitch) if note_info is None: raise MidiInputError("what?") note_sequence.append( Note(pitch=pitch, velocity=note_info[0], start=note_info[1], end=time)) live_notes.pop(pitch) i += 1 note_sequence = quantize(note_sequence, n_velocity_events, n_time_shift_events) note_sequence = vectorize(note_sequence) return note_sequence
def read(n_velocity_events=32, n_time_shift_events=125): if True: midiin = rtmidi.MidiIn() available_ports = midiin.get_ports() if available_ports: print("Connecting to midi-in port!") midiin.open_port(0) else: raise MidiInputError("Midi ports not availabled...") msg_sequence = [] while True: proceed = input( "Play something on the keyboard and enter 'c' to continue or 'q' to quit.\n" ) if proceed == "c": midiin.close_port() break elif proceed == "q": return else: print("Command not recognized") continue while True: msg = midiin.get_message() if msg is None: break else: msg_sequence.append(msg) if len(msg_sequence) == 0: raise MidiInputError("No messages detected") note_sequence = [] i = 0 #notes that haven't ended yet live_notes = {} while i < len(msg_sequence): info, time_delta = msg_sequence[i] if i == 0: #start time tracking from zero time = 0 else: #shift forward time = time + time_delta pitch = info[1] velocity = info[2] if velocity > 0: #(pitch (on), velocity, start_time (relative) live_notes.update({pitch: (velocity, time)}) #how to preserve info ...? else: note_info = live_notes.get(pitch) if note_info is None: raise MidiInputError("what?") note_sequence.append( Note(pitch=pitch, velocity=note_info[0], start=note_info[1], end=time)) live_notes.pop(pitch) i += 1 note_sequence = quantize(note_sequence, n_velocity_events, n_time_shift_events) note_sequence = vectorize(note_sequence) return note_sequence
def main(): parser = argparse.ArgumentParser("Script to generate MIDI tracks by sampling from a trained model.") parser.add_argument("--model", type=str, help="Key in saved_models/model.yaml, helps look up model arguments and path to saved checkpoint.") parser.add_argument("--sample_length", type=int, default=512, help="number of events to generate") parser.add_argument("--temps", nargs="+", type=float, default=[1.0], help="space-separated list of temperatures to use when sampling") parser.add_argument("--n_trials", type=int, default=3, help="number of MIDI samples to generate per experiment") parser.add_argument("--live_input", action='store_true', default = False, help="if true, take in a seed from a MIDI input controller") parser.add_argument("--play_live", action='store_true', default=False, help="play sample(s) at end of script if true") parser.add_argument("--keep_ghosts", action='store_true', default=True) parser.add_argument("--stuck_note_duration", type=int, default=1) args=parser.parse_args() model = args.model ''' try: model_dict = yaml.safe_load(open('saved_models/model.yaml'))[model_key] except: raise GeneratorError(f"could not find yaml information for key {model_key}") ''' #model_path = model_dict["path"] #model_args = model_dict["args"] #Change the value here to the model you want to run model_path = 'saved_models/'+model try: state = torch.load(model_path) except RuntimeError: state = torch.load(model_path, map_location="cpu") n_velocity_events = 32 n_time_shift_events = 125 decoder = SequenceEncoder(n_time_shift_events, n_velocity_events, min_events=0) if args.live_input: pretty_midis = [] m = 'twinkle.midi' with open(m, "rb") as f: try: midi_str = six.BytesIO(f.read()) pretty_midis.append(pretty_midi.PrettyMIDI(midi_str)) #print("Successfully parsed {}".format(m)) except: print("Could not parse {}".format(m)) pipeline = PreprocessingPipeline(input_dir="data") note_sequence = pipeline.get_note_sequences(pretty_midis) note_sequence = [vectorize(ns) for ns in note_sequence] prime_sequence = decoder.encode_sequences(note_sequence) prime_sequence = prime_sequence[1:6] else: prime_sequence = [] #model = MusicTransformer(**model_args) model = MusicTransformer(256+125+32, 1024, d_model = 64, n_heads = 8, d_feedforward=256, depth = 4, positional_encoding=True, relative_pos=True) model.load_state_dict(state, strict=False) temps = args.temps trial_key = str(uuid.uuid4())[:6] n_trials = args.n_trials keep_ghosts = args.keep_ghosts stuck_note_duration = None if args.stuck_note_duration == 0 else args.stuck_note_duration for temp in temps: print(f"sampling temp={temp}") note_sequence = [] for i in range(n_trials): print("generating sequence") output_sequence = sample(model, prime_sequence = prime_sequence, sample_length=args.sample_length, temperature=temp) note_sequence = decoder.decode_sequence(output_sequence, verbose=True, stuck_note_duration=0.5, keep_ghosts=True) output_dir = f"output/midis/{trial_key}/" file_name = f"sample{i+1}_{temp}" write_midi(note_sequence, output_dir, file_name) '''
def main(): parser = argparse.ArgumentParser( "Script to generate MIDI tracks by sampling from a trained model.") parser.add_argument( "--model_key", type=str, help= "Key in saved_models/model.yaml, helps look up model arguments and path to saved checkpoint." ) parser.add_argument("--sample_length", type=int, default=512, help="number of events to generate") parser.add_argument( "--temps", nargs="+", type=float, default=[1.0], help="space-separated list of temperatures to use when sampling") parser.add_argument( "--n_trials", type=int, default=3, help="number of MIDI samples to generate per experiment") parser.add_argument("--primer", type=str, default=None, help="Path to the primer") parser.add_argument("--play_live", action='store_true', default=False, help="play sample(s) at end of script if true") parser.add_argument("--keep_ghosts", action='store_true', default=False) parser.add_argument("--stuck_note_duration", type=int, default=0) args = parser.parse_args() model_key = args.model_key try: model_dict = yaml.safe_load(open('saved_models/model.yaml'))[model_key] except: raise GeneratorError( f"could not find yaml information for key {model_key}") model_path = model_dict["path"] model_args = model_dict["args"] try: state = torch.load(model_path) except RuntimeError: state = torch.load(model_path, map_location="cpu") n_velocity_events = 32 n_time_shift_events = 125 decoder = SequenceEncoder(n_time_shift_events, n_velocity_events, min_events=0) if args.primer: # Read midi primer midi_str = six.BytesIO(open(args.primer, 'rb').read()) p = pretty_midi.PrettyMIDI(midi_str) piano_data = p.instruments[0] notes = apply_sustain(piano_data) note_sequence = sorted(notes, key=lambda x: (x.start, x.pitch)) ns = vectorize(note_sequence) prime_sequence = decoder.encode_sequences([ns])[0] else: prime_sequence = [] model = MusicTransformer(**model_args) model.load_state_dict(state, strict=False) temps = args.temps trial_key = str(uuid.uuid4())[:6] n_trials = args.n_trials keep_ghosts = args.keep_ghosts stuck_note_duration = None if args.stuck_note_duration == 0 else args.stuck_note_duration for temp in temps: print(f"sampling temp={temp}") note_sequence = [] for i in range(n_trials): print("generating sequence") output_sequence = sample(model, prime_sequence=prime_sequence, sample_length=args.sample_length, temperature=temp) note_sequence = decoder.decode_sequence(output_sequence, verbose=True, stuck_note_duration=None) output_dir = f"output/{model_key}/{trial_key}/" file_name = f"sample{i+1}_{temp}" write_midi(note_sequence, output_dir, file_name)