示例#1
0
def generate_latent_walk(checkpoint_dir, dataset_name, output_size, n_epochs,
                         jump_length, output_file_name):
    n_jumps = n_epochs * 64

    boundary = 1.3
    z = np.zeros((n_jumps, 100))
    walk = np.zeros((n_jumps, output_size, output_size, 3))
    for t in range(1, n_jumps):
        jumps = mixture_pareto(mu=0.04, alpha=1.6, size=[1, 100])
        # keep walk confined within hypercube with reflective boundaries
        new_z = z[t - 1, :] + jumps
        outside_domain_left = new_z < -boundary
        outside_domain_right = new_z > boundary
        new_z_2 = new_z[:]
        new_z_2[outside_domain_left] = -boundary - (
            new_z[outside_domain_left] + boundary)
        new_z_2[outside_domain_right] = boundary - (
            new_z[outside_domain_right] - boundary)
        new_z[outside_domain_left] = new_z_2[outside_domain_left]
        new_z[outside_domain_right] = new_z_2[outside_domain_right]
        z[t, :] = new_z

    with tf.Session() as sess:
        dcgan = DCGAN(sess,
                      checkpoint_dir=checkpoint_dir,
                      dataset_name=dataset_name,
                      output_size=output_size,
                      is_train=False)
        for e in range(n_epochs):
            this_start = e * 64
            this_end = (e + 1) * 64
            walk[this_start:this_end, :, :, :] = dcgan.sample(
                z[this_start:this_end, :])
    save_latent_walk(walk, output_file_name)
    return walk
示例#2
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        dcgan = DCGAN(sess,
                      phase=FLAGS.phase,
                      image_size=FLAGS.image_size,
                      batch_size=FLAGS.batch_size,
                      output_size=FLAGS.output_size,
                      c_dim=FLAGS.c_dim,
                      is_crop=FLAGS.is_crop,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.sample(64)
示例#3
0
        dcgan = DCGAN(sess,
                      wav_size=wav_size,
                      batch_size=batch_size,
                      dataset_name=dataset,
                      is_crop=is_crop,
                      checkpoint_dir=checkpoint_dir)
        dcgan.load(checkpoint_dir)

        data = glob(os.path.join("./training", "*.wav"))
        sample_file = data[0]
        sample = tensorflow_wav.get_wav(sample_file)
        print(sample)

        full_audio = []
        for i in range(1):
            audio = dcgan.sample()

            audio = np.reshape(audio, [-1])
            print("Audio shape", np.shape(audio))
            full_audio += audio[:bitrate * batch_size].tolist()
            print("Full audio shape", np.shape(full_audio))

        samplewav = sample.copy()
        samplewav
        print("Generated stats 'min', 'max', 'mean', 'stddev'",
              np.min(full_audio), np.max(full_audio), np.mean(full_audio),
              np.std(full_audio))
        samplewav['data'] = np.reshape(np.array(full_audio), [-1, 64])
        print("samplewav shape", np.shape(samplewav['data']))

        filename = "./compositions/song.wav.stft"