else: # edge case if ALL voices rest satb[v].append(0) # was ordered btas satb = satb[::-1] for i in range(len(satb)): decoded_satb_midi_s = [ norm_to_midi_lu[j] if j in norm_to_midi_lu else 0 for j in satb[i] ] decoded_satb_note_s = [ norm_to_note_lu[j] if j in norm_to_note_lu else 0 for j in satb[i] ] decoded_satb_midi[i].extend(decoded_satb_midi_s) decoded_satb_notes[i].extend(decoded_satb_note_s) quantized_to_pretty_midi( [decoded_satb_midi], 0.25, save_dir="samples", name_tag="multichannel_sample_{}_seed_{}_temp_{}".format( offset, args.seed, args.temp) + "_{}.mid", default_quarter_length=220, voice_params="woodwinds") print("saved sample {}".format(offset)) from IPython import embed embed() raise ValueError()
events[to[1]] = [0, 0, 0, 0] events[to[1]][v] = to[0] satb =[[], [], [], []] for v in range(x_rec_i.shape[-1]): for ts in range(measure_len): if ts in events: satb[v].append(events[ts][v]) else: # edge case if ALL voices rest satb[v].append(0) # was ordered btas satb = satb[::-1] for i in range(len(satb)): satb_midi[i].extend(satb[i]) satb_notes[i].extend(midi_to_notes([satb[i]])[0]) if args.chords == None: name_tag="pianoroll_multichannel_sample_{}_seed_{}_temp_{}".format(offset, args.seed, args.temp) + "_{}.mid" else: name_tag="pianoroll_multichannel_sample_{}_seed_{}_temp_{}".format(args.chords, args.seed, args.temp) + "_{}.mid" np.savez("samples/sample_{}_seed_{}.npz".format(offset, args.seed), pr=x_rec_i, midi=satb_midi, notes=satb_notes, labelnames=these_labelnames) quantized_to_pretty_midi([satb_midi], 0.25, save_dir="samples", name_tag=name_tag, default_quarter_length=220, voice_params="woodwinds") print("saved sample {}".format(offset)) from IPython import embed; embed(); raise ValueError()
prev = None decoded_satb_midi = [[], [], [], []] decoded_satb_notes = [[], [], [], []] for n in range(len(x_rec_i)): # 48 x 48 measure in satb = decode_measure(x_rec_i[n][..., 0], prev) prev = [satb[i][-1] for i in range(len(satb))] for i in range(len(satb)): decoded_satb_midi_s = [ norm_to_midi_lu[j] if j in norm_to_midi_lu else 0 for j in satb[i] ] decoded_satb_note_s = [ norm_to_note_lu[j] if j in norm_to_note_lu else 0 for j in satb[i] ] decoded_satb_midi[i].extend(decoded_satb_midi_s) decoded_satb_notes[i].extend(decoded_satb_note_s) quantized_to_pretty_midi([decoded_satb_midi], 0.25, save_dir="samples", name_tag="sample_{}".format(offset) + "_{}.mid", default_quarter_length=220, voice_params="piano") print("saved sample {}".format(offset)) from IPython import embed embed() raise ValueError()
start_chunks = [ i for i in range(len(non_rest) - num_to_plot) if np.max(np.diff(non_rest[i:i + num_to_plot])) == 1 ] random_state.shuffle(start_chunks) random_state.shuffle(start_chunks) ii = non_rest[start_chunks[0]] skeleton = np.array([sk for sk in ce[ii:(ii + num_to_plot)]]) joined = np.zeros((len(x_rec), x_rec.shape[2], skeleton.shape[-1])) joined += skeleton[:, None, :] idxs = np.argmax(x_rec, axis=1) lu = {k: v for k, v in enumerate(np.arange(-23, 24))} res = np.zeros_like(idxs) for kk in sorted(lu.keys()): res[idxs == kk] = lu[kk] # use only top res[:, :, 1:] *= 0 joined[:, :, :res.shape[-1]] += res joined = joined.reshape(-1, joined.shape[-1]) joined_voices = [joined[:, c] for c in range(joined.shape[-1])] quantized_to_pretty_midi([joined_voices], 0.25, default_quarter_length=440, voice_params="piano") from IPython import embed embed() raise ValueError()