예제 #1
0
파일: plot.py 프로젝트: cai-mj/ddgm
def plot(filename="gen"):
    try:
        os.mkdir(args.plot_dir)
    except:
        pass

    x_negative = ddgm.generate_x(100, test=True, as_numpy=True)
    visualizer.tile_binary_images(x_negative.reshape((28, 28)),
                                  dir=args.plot_dir,
                                  filename=filename)
예제 #2
0
파일: plot.py 프로젝트: cai-mj/ddgm
def plot(filename="gen"):
    try:
        os.mkdir(args.plot_dir)
    except:
        pass

    x_negative = ddgm.generate_x(100, test=True, as_numpy=True)
    # x_negative = (x_negative + 1) / 2
    visualizer.tile_rgb_images(x_negative.transpose(0, 2, 3, 1),
                               dir=args.plot_dir,
                               filename=filename)
예제 #3
0
def main():
	try:
		os.mkdir(args.plot_dir)
	except:
		pass

	x_positive = sampler.sample_from_gaussian_mixture(1000, 2, 10)
	visualizer.plot_z(x_positive, dir=args.plot_dir, filename="positive", xticks_range=4, yticks_range=4)

	x_negative = ddgm.generate_x(1000, test=True)
	if params.gpu_enabled:
		x_negative.to_cpu()
	visualizer.plot_z(x_negative.data, dir=args.plot_dir, filename="negative", xticks_range=4, yticks_range=4)
예제 #4
0
파일: train.py 프로젝트: cai-mj/ddgm
def main():
    # load MNIST images
    images = load_rgb_images(args.image_dir)

    # config
    config_energy_model = to_object(params_energy_model["config"])
    config_generative_model = to_object(params_generative_model["config"])

    # settings
    max_epoch = 1000
    n_trains_per_epoch = 500
    batchsize_positive = 128
    batchsize_negative = 128
    plot_interval = 5

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # init weightnorm layers
    if config_energy_model.use_weightnorm:
        print "initializing weight normalization layers of the energy model ..."
        x_positive = sample_from_data(images, batchsize_positive * 5)
        ddgm.compute_energy(x_positive)

    if config_generative_model.use_weightnorm:
        print "initializing weight normalization layers of the generative model ..."
        x_negative = ddgm.generate_x(batchsize_negative * 5)

    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_energy_positive = 0
        sum_energy_negative = 0
        sum_loss = 0
        sum_kld = 0

        for t in xrange(n_trains_per_epoch):
            # sample from data distribution
            x_positive = sample_from_data(images, batchsize_positive)

            # sample from generator
            x_negative = ddgm.generate_x(batchsize_negative)

            # train energy model
            energy_positive = ddgm.compute_energy_sum(x_positive)
            energy_negative = ddgm.compute_energy_sum(x_negative)
            loss = energy_positive - energy_negative
            ddgm.backprop_energy_model(loss)

            # train generative model
            # TODO: KLD must be greater than or equal to 0
            x_negative = ddgm.generate_x(batchsize_negative)
            kld = ddgm.compute_kld_between_generator_and_energy_model(
                x_negative)
            ddgm.backprop_generative_model(kld)

            sum_energy_positive += float(energy_positive.data)
            sum_energy_negative += float(energy_negative.data)
            sum_loss += float(loss.data)
            sum_kld += float(kld.data)
            progress.show(t, n_trains_per_epoch, {})

        progress.show(
            n_trains_per_epoch, n_trains_per_epoch, {
                "x+": int(sum_energy_positive / n_trains_per_epoch),
                "x-": int(sum_energy_negative / n_trains_per_epoch),
                "loss": sum_loss / n_trains_per_epoch,
                "kld": sum_kld / n_trains_per_epoch
            })
        ddgm.save(args.model_dir)

        if epoch % plot_interval == 0 or epoch == 1:
            plot(filename="epoch_{}_time_{}min".format(
                epoch, progress.get_total_time()))
예제 #5
0
def main():
    try:
        os.mkdir(args.plot_dir)
    except:
        pass

    # settings
    max_epoch = 1000
    n_trains_per_epoch = 2
    batchsize_positive = 100
    batchsize_negative = 100
    plotsize = 400

    # config
    config_energy_model = to_object(params_energy_model["config"])
    config_generative_model = to_object(params_generative_model["config"])

    # seed
    np.random.seed(args.seed)
    if args.gpu_enabled:
        cuda.cupy.random.seed(args.seed)

    # init weightnorm layers
    if config_energy_model.use_weightnorm:
        print "initializing weight normalization layers of the energy model ..."
        x_positive = sampler.sample_from_swiss_roll(batchsize_positive * 10, 2,
                                                    10)
        ddgm.compute_energy(x_positive)

    if config_generative_model.use_weightnorm:
        print "initializing weight normalization layers of the generative model ..."
        x_negative = ddgm.generate_x(batchsize_negative * 10)

    fixed_z = ddgm.sample_z(plotsize)
    fixed_target = sampler.sample_from_swiss_roll(600, 2, 10)

    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_energy_positive = 0
        sum_energy_negative = 0
        sum_kld = 0

        for t in xrange(n_trains_per_epoch):
            # sample from data distribution
            x_positive = sampler.sample_from_swiss_roll(
                batchsize_positive, 2, 10)

            # sample from generator
            x_negative = ddgm.generate_x(batchsize_negative)

            # train energy model
            energy_positive = ddgm.compute_energy_sum(x_positive)
            energy_negative = ddgm.compute_energy_sum(x_negative)
            loss = energy_positive - energy_negative
            ddgm.backprop_energy_model(loss)

            # train generative model
            x_negative = ddgm.generate_x(batchsize_negative)
            kld = ddgm.compute_kld_between_generator_and_energy_model(
                x_negative)
            ddgm.backprop_generative_model(kld)

            sum_energy_positive += float(energy_positive.data)
            sum_energy_negative += float(energy_negative.data)
            sum_kld += float(kld.data)
            if t % 10 == 0:
                progress.show(t, n_trains_per_epoch, {})

        progress.show(
            n_trains_per_epoch, n_trains_per_epoch, {
                "x+": sum_energy_positive / n_trains_per_epoch,
                "x-": sum_energy_negative / n_trains_per_epoch,
                "KLD": int(sum_kld / n_trains_per_epoch)
            })
        ddgm.save(args.model_dir)

        # init
        fig = pylab.gcf()
        fig.set_size_inches(8.0, 8.0)
        pylab.clf()

        plot(fixed_target, color="#bec3c7", s=20)
        plot(ddgm.generate_x_from_z(fixed_z, as_numpy=True, test=True),
             color="#e84c3d",
             s=20)

        # save
        pylab.savefig("{}/{}.png".format(args.plot_dir, 100000 + epoch))
예제 #6
0
def main():
    # settings
    max_epoch = 1000
    n_trains_per_epoch = 500
    batchsize_positive = 100
    batchsize_negative = 100

    # config
    config_energy_model = to_object(params_energy_model["config"])
    config_generative_model = to_object(params_generative_model["config"])

    # seed
    np.random.seed(args.seed)
    if args.gpu_enabled:
        cuda.cupy.random.seed(args.seed)

    # init weightnorm layers
    if config_energy_model.use_weightnorm:
        print "initializing weight normalization layers of the energy model ..."
        x_positive = sampler.sample_from_gaussian_mixture(
            batchsize_positive * 10, 2, 10)
        ddgm.compute_energy(x_positive)

    if config_generative_model.use_weightnorm:
        print "initializing weight normalization layers of the generative model ..."
        x_negative = ddgm.generate_x(batchsize_negative * 10)

    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_energy_positive = 0
        sum_energy_negative = 0
        sum_kld = 0

        for t in xrange(n_trains_per_epoch):
            # sample from data distribution
            x_positive = sampler.sample_from_gaussian_mixture(
                batchsize_positive, 2, 10)

            # sample from generator
            x_negative = ddgm.generate_x(batchsize_negative)

            # train energy model
            energy_positive = ddgm.compute_energy_sum(x_positive)
            energy_negative = ddgm.compute_energy_sum(x_negative)
            loss = energy_positive - energy_negative
            ddgm.backprop_energy_model(loss)

            # train generative model
            x_negative = ddgm.generate_x(batchsize_negative)
            kld = ddgm.compute_kld_between_generator_and_energy_model(
                x_negative)
            ddgm.backprop_generative_model(kld)

            sum_energy_positive += float(energy_positive.data)
            sum_energy_negative += float(energy_negative.data)
            sum_kld += float(kld.data)
            if t % 10 == 0:
                progress.show(t, n_trains_per_epoch, {})

        progress.show(
            n_trains_per_epoch, n_trains_per_epoch, {
                "x+": sum_energy_positive / n_trains_per_epoch,
                "x-": sum_energy_negative / n_trains_per_epoch,
                "KLD": int(sum_kld / n_trains_per_epoch)
            })
        ddgm.save(args.model_dir)