コード例 #1
0
def _reconstruct():
    log.info("Choosing a random sample and autoencoding it...")
    samples = glob(cfg.Paths.samples + "/*sample*.npz")
    sample = np.random.choice(samples)
    sample = np.load(sample)
    sample = sample['sample']
    tracks = []
    for track in range(sample.shape[0]):
        t = pproll.Track(pianoroll=sample[track, :, :], program=0)
        t = t.binarize()
        tracks.append(t)

    song = pproll.Multitrack(tracks=tracks, resolution=midi_cfg.resolution)
    song = song.set_nonzeros(1)
    pproll.write(multitrack=song,
                 path=os.path.join(cfg.Paths.generated, "original.mid"))

    sample = dataset.preprocess_single(sample)
    e = best_encoder.predict(sample)
    d = best_decoder.predict(e)
    d = d.reshape((1, 1, midi_cfg.phrase_size, 130))
    reconstructed = dataset.postprocess(d)
    tracks = []
    for sample in range(reconstructed.shape[0]):
        for track in range(reconstructed.shape[1]):
            t = pproll.Track(pianoroll=reconstructed[sample, track, :, :],
                             program=0)
            t = t.binarize()
            tracks.append(t)

        song = pproll.Multitrack(tracks=tracks, resolution=midi_cfg.resolution)
        song = song.set_nonzeros(1)
        pproll.write(multitrack=song,
                     path=os.path.join(cfg.Paths.generated,
                                       f"reconstructed{sample}.mid"))
コード例 #2
0
ファイル: lpd_to_mid.py プロジェクト: pybnen/smg
def main():
    parser = argparse.ArgumentParser(
        description="Converts the LPD 5 dataset to midi files")
    parser.add_argument("--input_dir", help="dataset directory")
    parser.add_argument("--output_dir", help="output directory")
    args = parser.parse_args()

    assert (args.input_dir and args.output_dir)

    input_dir = Path(args.input_dir)
    mask = str(input_dir / "**/*.npz")
    ouput_dir = Path(args.output_dir)

    filenames = glob(mask, recursive=True)
    for filename in filenames:
        print(f"Converting {filename}...")

        filename = Path(filename)
        file_output_dir = ouput_dir / Path(os.path.relpath(
            filename, input_dir)).parent

        if not file_output_dir.is_dir():
            file_output_dir.mkdir(parents=True)

        sample = pp.load(str(filename))
        pp.write(sample, str(file_output_dir / (filename.stem + ".mid")))
コード例 #3
0
def main():
    args = parser()
    pypianoroll_file = abspath(args.input_file)
    loaded = pypianoroll.load(pypianoroll_file)

    if args.savepath is None:
        savepath = os.getcwd()
    else:
        savepath = args.savepath
        os.makedirs(savepath, exist_ok=True)

    #Save to plot
    if args.output is None:
        output_filename = os.path.basename(pypianoroll_file).split(".")[0]
        output_filename = join(savepath, output_filename)
    else:
        output_filename = ags.output
        output_filename = join(savepath, output_filename)

    print(output_filename)

    plot_multitrack(loaded, filename=output_filename + ".svg", preset="frame")

    #Save to WAV
    pypianoroll.write(loaded, (output_filename + ".mid"))

    fs = FluidSynth('FluidR3_GM.sf2')
    fs.midi_to_audio((output_filename + ".mid"), (output_filename + ".wav"))
コード例 #4
0
ファイル: demo.py プロジェクト: alex21th/music-generation
def main():
    data = np.load("output_{}_songs_augmented_19.npy".format(MODEL_NAME),
                   allow_pickle=True)
    chord = np.load("output_{}_chords_augmented_19.npy".format(MODEL_NAME),
                    allow_pickle=True)

    instrument = int(
        input(
            'which instrument you want to play? from 0 to 128, default = 0: '))
    volume = int(
        input('how loud you want to play? from 1 to 127, default = 40: '))

    for i in random.sample(range(data.shape[0]), 4):
        one_song = data[i]
        song = []
        for item in one_song:
            item = item.detach().numpy()
            item = item.reshape(16, 128)
            song.append(item)

        eight_bar = reshape_bar(song)
        eight_bar_binarized = find_pitch(eight_bar, volume)
        track = make_a_track(eight_bar_binarized, instrument)

        song_chord = chord_list(chord, i)
        chord_player = get_chord(song_chord)
        chord_track = make_chord_track(chord_player, instrument, volume)
        a = make_a_demo(track, chord_track, i)
        pypiano.write(a, "{}_augmented_19epoch_{}.mid".format(MODEL_NAME, i))
        print('saved')
コード例 #5
0
def numpy2midi(m, c, theta, filename):
	resolution = 12
	ratio = int(resolution/4) # 3
	bar = int(m.shape[0]/4)

	mr = m[:,-4:].flatten()
	m = np.argmax(m[:,:-4].reshape(m.shape[0]*4, 49), 1)
	midi_m = np.zeros((resolution*bar*4, 128))
	for i in range(len(m)):
		if m[i] == 48: # stop
			continue

		if i+1 != len(m):
			if mr[i+1] > theta:
				midi_m[i*ratio:(i+1)*ratio - 1, m[i]+48] = 100
			else:
				midi_m[i*ratio:(i+1)*ratio, m[i]+48] = 100
		else: #i+1 != len(m) and mr[i+1] == 0:
			midi_m[i*ratio:(i+1)*ratio - 1, m[i]+48] = 100
		# else: #i+1 == len(m):
			# midi_m[i*ratio:(i+1)*ratio - 1, m[i]+48] = 100

	midi_c = np.zeros((resolution*bar*4, 128))
	nextchord = -1
	for i in range(len(c)):
		# round
		# midi_c[i*resolution:(i+1)*resolution-1, np.where(np.round(c[i])==1)[0]+48] = 100

		# dot
		if np.sum(c[i]) == 0:
			chord = len(chord_composition)-1
		else:
			chord = np.argmax( np.dot(chord_composition, c[i])/(np.linalg.norm(chord_composition, axis=1)+1e-5)/(np.linalg.norm(c)+1e-5) )
		if i < len(c)-1 and i%4!=3:
			nextchord = np.argmax( np.dot(chord_composition, c[i+1])/(np.linalg.norm(chord_composition, axis=1)+1e-5)/(np.linalg.norm(c)+1e-5) )
		else:
			nextchord = -1

		main = int(chord/7)
		for j in np.where(chord_composition[chord]==1)[0]:
			if j < main:
				if chord == nextchord:
					midi_c[i*resolution:(i+1)*resolution, j+60] = 100
				else:
					midi_c[i*resolution:(i+1)*resolution-1, j+60] = 100
			else:
				if chord == nextchord:
					midi_c[i*resolution:(i+1)*resolution, j+48] = 100
				else:
					midi_c[i*resolution:(i+1)*resolution-1, j+48] = 100


	track_m = Track(pianoroll=midi_m, program=0, is_drum=False)
	track_c = Track(pianoroll=midi_c, program=0, is_drum=False)
	multitrack = Multitrack(tracks=[track_m, track_c], tempo=80.0, beat_resolution=resolution)
	pypianoroll.write(multitrack, filename)
コード例 #6
0
    def train(self, epochs, train_data, batch_size=128, sample_interval=50):

        # Load and convert the data
        #notes = get_notes()
        #n_vocab = len(set(notes))
        #X_train, y_train = prepare_sequences(notes, n_vocab)
        X_train = train_data
        # Adversarial ground truths
        real = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        # Training the model
        for epoch in range(epochs):

            # Training the discriminator
            # Select a random batch of note sequences
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            real_seqs = X_train[idx]

            # noise = np.random.choice(range(484), (batch_size, self.latent_dim))
            # noise = (noise-242)/242
            noise = np.random.normal(
                0, 1, (batch_size, self.seq_length, self.latent_dim))

            # Generate a batch of new note sequences
            gen_seqs = self.generator.predict(noise)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(real_seqs, real)
            d_loss_fake = self.discriminator.train_on_batch(gen_seqs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            #  Training the Generator
            noise = np.random.normal(
                0, 1, (batch_size, self.seq_length, self.latent_dim))

            # Train the generator (to have the discriminator label samples as real)
            g_loss = self.combined.train_on_batch(noise, real)

            # Print the progress and save into loss lists
            if epoch % sample_interval == 0:
                print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                      (epoch, d_loss[0], 100 * d_loss[1], g_loss))
                self.disc_loss.append(d_loss[0])
                self.gen_loss.append(g_loss)
        noise = np.random.normal(0, 1, (1, self.seq_length, self.latent_dim))
        predictions = self.generator.predict(noise)
        track = pypianoroll.Track(pianoroll=predictions,
                                  program=0,
                                  is_drum=False,
                                  name='my awesome piano')
        pypianoroll.write(
            track,
            "C:\\Users\\10413\\Desktop\\deep_learning\\project\\midi-lstm-gan-master\\output"
        )
コード例 #7
0
def _generate_samples(decoder: k.Model):
    z = np.random.randn(1, 512)
    predicted = decoder.predict(z)
    x = dataset.postprocess(predicted)

    guitar = pproll.Track(pianoroll=predicted[0, :, :], program=0)

    guitar = guitar.binarize()

    song = pproll.Multitrack(tracks=[guitar], resolution=midi_cfg.resolution)
    song = song.set_nonzeros(1)
    song.validate()
    pproll.write(multitrack=song,
                 path=os.path.join(cfg.Paths.generated, f"{0}.mid"))
コード例 #8
0
def sample2midi(path, sample, resolution):
    music = sample.reshape(512, 72)

    all_notes = np.zeros((512, 128), dtype=np.uint8)
    all_notes[:, 24:96] = music

    pypianoroll.write(path=path,
                      multitrack=pypianoroll.Multitrack(
                          resolution=resolution,
                          tracks=[
                              pypianoroll.BinaryTrack(program=0,
                                                      is_drum=False,
                                                      pianoroll=all_notes)
                          ]))
コード例 #9
0
def create_midi_from_piano_roll(
        roll: np.ndarray, midi_path: str, lowest_note: str, tempo: int,
        instrument: int, velocity: float
) -> None:
    """
    Create MIDI file from array with piano roll.

    :param roll:
        piano roll
    :param midi_path:
        path where resulting MIDI file is going to be saved
    :param lowest_note:
        note that corresponds to the lowest row of piano roll
    :param tempo:
        number of piano roll's time steps per minute
    :param instrument:
        ID (number) of instrument according to General MIDI specification
    :param velocity:
        one common velocity for all notes
    :return:
        None
    """
    notes_order = {
        'C': 0, 'C#': 1, 'D': 2, 'D#': 3, 'E': 4, 'F': 5, 'F#': 6,
        'G': 7, 'G#': 8, 'A': 9, 'A#': 10, 'B': 11
    }
    n_semitones_per_octave = 12
    n_rows_below = (
        n_semitones_per_octave * int(lowest_note[-1])
        + notes_order[lowest_note[:-1]]
    )
    n_pypianoroll_pitches = 128
    n_rows_above = n_pypianoroll_pitches - n_rows_below - roll.shape[0]
    resized_roll = np.hstack((
        np.zeros((roll.shape[1], n_rows_below)),
        roll.T,
        np.zeros((roll.shape[1], n_rows_above))
    ))

    track = pypianoroll.Track(velocity * resized_roll, instrument)
    multitrack = pypianoroll.Multitrack(
        tracks=[track],
        tempo=tempo,
        beat_resolution=1
    )
    pypianoroll.write(multitrack, midi_path)
コード例 #10
0
def save_midi_pypiano(m, midi_path, filename):
    # velocity = 100

    # zero1 = np.zeros((m.shape[0],47))
    # zero2 = np.zeros((m.shape[0],128 - 95))
    # m1 = np.concatenate((zero1,m[:,:-1],zero2),axis=1)
    m1 = np.zeros((m.shape[0], 128))
    m1[:, 48:96] = m[:, :-1]
    velocity_matrix = get_velocity_matrix(m1)
    m1 = m1 * velocity_matrix
    # m1 = m1 * velocity
    resolution = DATA_CONFIG['data_tpb']
    track_m = Track(pianoroll=m1, program=0, is_drum=False, name='Melody')
    multitrack = Multitrack(tracks=[track_m],
                            tempo=80.0,
                            beat_resolution=resolution)
    pypiano.write(multitrack, midi_path + filename + ".mid")
コード例 #11
0
def pianoroll_to_midi(snippet, filename="Sampled/sample.midi"):
    snippet = np.asarray(snippet, dtype=np.uint8)
    snippet = snippet * 127  # sets velocity of notes from 1 to 127 (max MIDI velocity)

    if snippet.shape[1] == 89:
        snippet = one_hot_pianoroll_to_small_pianoroll(snippet)
        snippet = small_to_full_pianoroll(snippet)
    elif snippet.shape[1] == 88:
        snippet = small_to_full_pianoroll(snippet)
    else:
        if not snippet.shape[1] == 128:
            raise ValueError(
                "input shape does not have 128 pitches (or 88, then it will be converted automatically) and cannot be converted to MIDI!"
            )

    snippet = ppr.Track(pianoroll=snippet)
    snippet = ppr.Multitrack(tracks=[snippet], tempo=120, beat_resolution=4)
    ppr.write(snippet, path_to_root + filename)
コード例 #12
0
ファイル: decode.py プロジェクト: scmvp301135/SurpriseNet
def write_pianoroll(result_dir, melody_data, accompany_pianoroll_frame,
                    chord_groundtruth_frame, length, tempos, downbeats):

    print('write pianoroll...')
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    counter = 0
    for melody_roll, chord_roll, truth_roll, l, tempo, downbeat in tqdm(
            zip(melody_data, accompany_pianoroll_frame,
                chord_groundtruth_frame, length, tempos, downbeats),
            total=len(melody_data)):

        melody_roll, chord_roll, truth_roll = melody_roll[:
                                                          l], chord_roll[:
                                                                         l], truth_roll[:
                                                                                        l]

        track1 = Track(pianoroll=melody_roll)
        track2 = Track(pianoroll=chord_roll)
        track3 = Track(pianoroll=truth_roll)

        generate = Multitrack(tracks=[track1, track2],
                              tempo=tempo,
                              downbeat=downbeat,
                              beat_resolution=Constants.BEAT_RESOLUTION)
        truth = Multitrack(tracks=[track1, track3],
                           tempo=tempo,
                           downbeat=downbeat,
                           beat_resolution=Constants.BEAT_RESOLUTION)

        pr.write(generate, result_dir + '/generate_' + str(counter) + '.mid')
        pr.write(truth, result_dir + '/groundtruth_' + str(counter) + '.mid')

        fig, axs = generate.plot()
        plt.savefig(result_dir + '/generate_' + str(counter) + '.png')
        plt.close()
        fig, axs = truth.plot()
        plt.savefig(result_dir + '/groundtruth_' + str(counter) + '.png')
        plt.close()

        counter += 1

    print('Finished!')
コード例 #13
0
def numpy_to_pianoroll(folder):
    #Bass Drums Guitar Piano Strings
    programs = [34,0,30,1,51]
    names = ['Bass' ,'Drums' ,'Guitar' ,'Piano' ,'Strings']
    tempo = np.full((96), 105)
    for filename in os.listdir(folder):
        multisample = np.load(os.path.join(folder,filename))

        for sample,i  in zip(multisample,range(multisample.shape[0])):
            tracks = []
            classes = []
            for instrument,program,name in zip(sample,programs,names):

                print(instrument.shape)
                track = np.vstack(instrument)
                print(track.shape)
                track[track > 0.5] = 100
                track[track < 0.5] = 0
                print(track.shape)
                track = np.pad(track.astype(int),((0,0),(0,44)),mode='constant')
                if name !='Guitar':
                   print(ppr.metrics.qualified_note_rate((track),2))
                print(ppr.metrics.n_pitches_used((track)))
                classes.append(ppr.metrics.n_pitches_used((track)))
                print(track.shape)
                isdrum = False
                if program == 0:
                    isdrum = True
                ppr_track = ppr.Track(track,program,isdrum,name)
                tracks.append(ppr_track)
            ppr_song = ppr.Multitrack(tracks=tracks, tempo=tempo, beat_resolution=24)
            for instrument, clasnum in zip(names,classes):
                print(instrument+':'+str(clasnum))

            print(123)
            plot = ppr.plot_multitrack(ppr_song,mode='separate',ytick='off')
            plt.savefig('gen_samples/'+filename+str(i)+".png",dpi=400)
            ppr.write(ppr_song, 'gen_samples/'+filename+"song")
コード例 #14
0
ファイル: decode.py プロジェクト: scmvp301135/SurpriseNet
def write_one_pianoroll(result_dir, filename, melody_data,
                        accompany_pianoroll_frame, chord_groundtruth_frame,
                        length, tempo, downbeat):

    print('write pianoroll...')
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    l = length

    melody_roll, chord_roll, truth_roll = melody_data[
        0][:l], accompany_pianoroll_frame[0][:l], chord_groundtruth_frame[
            0][:l]

    track1 = Track(pianoroll=melody_roll)
    track2 = Track(pianoroll=chord_roll)
    track3 = Track(pianoroll=truth_roll)

    generate = Multitrack(tracks=[track1, track2],
                          tempo=tempo[0],
                          downbeat=downbeat[0],
                          beat_resolution=Constants.BEAT_RESOLUTION)
    truth = Multitrack(tracks=[track1, track3],
                       tempo=tempo[0],
                       downbeat=downbeat[0],
                       beat_resolution=Constants.BEAT_RESOLUTION)

    pr.write(generate, result_dir + '/generate-' + filename + '.mid')
    pr.write(truth, result_dir + '/groundtruth-' + filename + '.mid')

    fig, axs = generate.plot()
    plt.savefig(result_dir + '/generate-' + filename + '.png')
    plt.close()
    fig, axs = truth.plot()
    plt.savefig(result_dir + '/groundtruth-' + filename + '.png')
    plt.close()

    print('Finished!')
コード例 #15
0
ファイル: postprocess.py プロジェクト: janyanti/MidiNetGan
def write_midi(sequence, output_path):
    """
       Transform the given sequence into MIDI format and store it in the given path
          sequence (List(num_bars): torch.Tensor(1 x 1 x 128 x 96)): sequence of bar encodings
          output_path (str): path to store the transformed MIDI
    """
    # get number of bars
    num_bars = len(sequence)

    # squeeze into List(num_bars): numpy.ndarray(128 x 96)
    melody = [sequence[i][0][0].detach().cpu().numpy() for i in range(num_bars)]

    # transform into MIDI track format (num_bars*RESOLUTION x 128)
    melody = np.concatenate(melody, axis=1).transpose()

    # pack into binary track
    melody_track = pypianoroll.BinaryTrack(pianoroll = melody > 0)

    # pack into multi-track
    multi_track = pypianoroll.Multitrack(resolution=RESOLUTION, tracks=[melody_track])

    # write to output path
    pypianoroll.write(output_path, multi_track)
コード例 #16
0
def main():
    args = parser()
    pypianoroll_directory = abspath(args.input_directory)

    if args.savepath is None:
        savepath = os.getcwd()
    else:
        savepath = args.savepath
        os.makedirs(savepath, exist_ok=True)

    for pypianoroll_file in os.listdir(pypianoroll_directory):
        if os.path.basename(pypianoroll_file).split(".")[1] == "npz":
            loaded = pypianoroll.load(
                join(pypianoroll_directory, pypianoroll_file))

            #Save to plot
            if args.output is None:
                output_filename = os.path.basename(pypianoroll_file).split(
                    ".")[0]
                output_filename = join(savepath, output_filename)
            else:
                output_filename = ags.output + os.path.basename(
                    pypianoroll_file).split(".")[0]
                output_filename = join(savepath, output_filename)

            fig, axs = plot_multitrack(loaded,
                                       filename=output_filename + ".svg",
                                       preset="frame")
            plt.close(fig)

            #Save to WAV
            pypianoroll.write(loaded, (output_filename + ".mid"))

            fs = FluidSynth('FluidR3_GM.sf2')
            fs.midi_to_audio((output_filename + ".mid"),
                             (output_filename + ".wav"))
コード例 #17
0
    def train(self, epochs, train_data, batch_size=128, sample_interval=50):

        # Load and convert the data
        #notes = get_notes()
        #n_vocab = len(set(notes))
        #X_train, y_train = prepare_sequences(notes, n_vocab)
        X_train = train_data
        # Adversarial ground truths
        real = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        # Training the model
        for epoch in range(epochs):

            # Training the discriminator
            # Select a random batch of note sequences
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            real_seqs = X_train[idx]

            # noise = np.random.choice(range(484), (batch_size, self.latent_dim))
            # noise = (noise-242)/242
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))

            # Generate a batch of new note sequences
            gen_seqs = self.generator.predict(noise)
            # Train the discriminator
            if epoch > 99 or epoch == 0:
                if epoch % 5 == 0:
                    d_loss_real = self.discriminator.train_on_batch(
                        real_seqs, real)
                    d_loss_fake = self.discriminator.train_on_batch(
                        gen_seqs, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            #  Training the Generator
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))

            # Train the generator (to have the discriminator label samples as real)
            g_loss = self.combined.train_on_batch(noise, real)

            # Print the progress and save into loss lists
            if epoch % sample_interval == 0:
                print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                      (epoch, d_loss[0], 100 * d_loss[1], g_loss))
                self.disc_loss.append(d_loss[0])
                self.gen_loss.append(g_loss)
        noise = np.random.normal(0, 1, (1, self.seq_length, self.latent_dim))
        predictions = self.generator.predict(noise)
        predictions[predictions > 0.5] = 1
        predictions[predictions <= 0.5] = 0
        a = predictions[predictions == 1]
        b = a.shape
        track = pypianoroll.Track(pianoroll=np.squeeze(predictions),
                                  program=0,
                                  is_drum=False,
                                  name='Piano')
        emp0 = np.zeros(self.seq_shape)
        track0 = pypianoroll.Track(pianoroll=emp0,
                                   program=0,
                                   is_drum=True,
                                   name='Drums')
        emp2 = np.zeros(self.seq_shape)
        track2 = pypianoroll.Track(pianoroll=emp2,
                                   program=0,
                                   is_drum=False,
                                   name='Guitar')
        emp3 = np.zeros(self.seq_shape)
        track3 = pypianoroll.Track(pianoroll=emp2,
                                   program=0,
                                   is_drum=False,
                                   name='Bass')
        emp4 = np.zeros(self.seq_shape)
        track4 = pypianoroll.Track(pianoroll=emp2,
                                   program=0,
                                   is_drum=False,
                                   name='Strings')
        track_list = [track0, track, track2, track3, track4]
        Multrack = pypianoroll.Multitrack(filename=None,
                                          tracks=track_list,
                                          tempo=120.0,
                                          downbeat=None,
                                          beat_resolution=24,
                                          name='Piano')
        pypianoroll.write(
            Multrack,
            "C:\\Users\\10413\\Desktop\\deep_learning\\project\\midi-lstm-gan-master\\output\\output1.midi"
        )
コード例 #18
0
        print(j)
        xo = np.expand_dims(x_con[j], axis=1)
        yo = np.expand_dims(y_con[j], axis=1)
        for k in range(len(x_con[j])):
            print(model.train_on_batch(xo[k], xo[k]), end=' ')
        model.reset_states()
        print('\n')
    print('\n')
    print('Testing')
    print('\n')
    kolo = []
    for j in range(5):
        print(j)
        bolo = []
        xo = np.expand_dims(x_ton[j], axis=1)
        for k in range(len(x_ton[j])):
            bolo.extend(model.predict_on_batch(xo[k]))
        model.reset_states()
        bolo = np.concatenate(np.array(bolo))
        kolo.append(bolo)
    jojo = np.array(kolo)
    for j in range(5):
        r2 = pn.Track(pianoroll=jojo[j] * 300,
                      program=0,
                      is_drum=False,
                      name='my awesome piano')
        multitrack2 = pn.Multitrack(tracks=[r2])
        os.system('mkdir ./results/' + str(i))
        pn.write(multitrack2, './results/' + str(i) + '/' + str(j) + '.mid')
    model.save('stacked.h5')
コード例 #19
0
    for j in range(len(x_train)):
        temp_x = np.array(
            np.array_split(pad_along_axis(x_train[j], seq_len),
                           np.ceil(len(x_train[j]) / seq_len)))
        windows.append(temp_x)
    return np.array(windows)


x = []
for i in range(1, len(sys.argv)):
    a = pn.parse(sys.argv[i])
    x.append(1 * np.sign(a.tracks[0].pianoroll))

x_final = np.array(x)
x_con = trunc_sequences(x_final)
for j in range(len(x_con)):
    bolo = []
    xo = np.expand_dims(x_con[j], axis=1)
    for k in range(len(x_con[j])):
        bolo.extend(model.predict_on_batch(xo[k]))
    model.reset_states()
    bolo = np.concatenate(np.array(bolo))
    result = pn.binarize(pn.Track(pianoroll=bolo * 100,
                                  program=0,
                                  is_drum=False,
                                  name='my awesome piano'),
                         threshold=0.05)
    multitracksam11 = pn.Multitrack(tracks=[result])
    pn.write(multitracksam11,
             './result/10_' + sys.argv[j + 1].split('_')[0] + '.mid')
コード例 #20
0
        plt.ylabel('Loss')
        plt.savefig('GAN_Loss_per_Epoch_final_1000.png', transparent=True)
        plt.close()


if __name__ == '__main__':
    path = "C:\\Users\\10413\\Desktop\\deep_learning\\project\\midi-lstm-gan-master\\lpd_5\\lpd_5_cleansed"  # 文件夹目录
    listOfFiles = getListOfFiles(path)
    piano_roll = pypianoroll.load(listOfFiles[1])
    piano_roll.tracks[2].pianoroll = piano_roll.tracks[2].pianoroll > 2
    piano_roll.tracks[0].pianoroll = piano_roll.tracks[0].pianoroll > 2
    piano_roll.tracks[3].pianoroll = piano_roll.tracks[3].pianoroll > 3
    piano_roll.tracks[4].pianoroll = piano_roll.tracks[4].pianoroll > 3
    data = piano_roll.tracks[2].pianoroll
    b = np.sum(data)
    pypianoroll.write(piano_roll, path)
    print(listOfFiles[1])
    data_train = np.empty([300, 20 * 96, 128])
    i = 0
    for files in listOfFiles:
        piano_roll = pypianoroll.load(files)
        data = piano_roll.tracks[1].pianoroll
        if data.shape[0] > (20 * 96 - 1):
            data_train[i, 0:20 * 96 - 1, 0:127] = data[0:20 * 96 - 1, 0:127]
            i = i + 1
        if i > 299:
            break
    gan = GAN(rows=20 * 96)
    gan.train(epochs=200,
              batch_size=32,
              train_data=data_train,