def visualize_walkthrough():
	x_batch = sample_x_from_data_distribution(20)
	z_batch = gen(x_batch, test=True)
	if use_gpu:
		z_batch.to_cpu()

	fig = pylab.gcf()
	fig.set_size_inches(16.0, 16.0)
	pylab.clf()
	if config.img_channel == 1:
		pylab.gray()
	
	z_a = z_batch.data[:10,:]
	z_b = z_batch.data[10:,:]
	for col in range(10):
		_z_batch = z_a * (1 - col / 9.0) + z_b * col / 9.0
		_z_batch = Variable(_z_batch)
		if use_gpu:
			_z_batch.to_gpu()
		_x_batch = dec(_z_batch, test=True)
		if use_gpu:
			_x_batch.to_cpu()
		for row in range(10):
			pylab.subplot(10, 10, row * 10 + col + 1)
			if config.img_channel == 1:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
			elif config.img_channel == 3:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
			pylab.axis("off")
				
	pylab.savefig("%s/walk_through.png" % args.visualization_dir)
def visualize_labeled_z():
	x_batch, label_batch = sample_x_and_label_from_data_distribution(len(dataset), sequential=True)
	z_batch = gen(x_batch, test=True)
	z_batch = z_batch.data
	# if z_batch[0].shape[0] != 2:
	# 	raise Exception("Latent code vector dimension must be 2.")

	fig = pylab.gcf()
	fig.set_size_inches(20.0, 16.0)
	pylab.clf()
	colors = ["#2103c8", "#0e960e", "#e40402","#05aaa8","#ac02ab","#aba808","#151515","#94a169", "#bec9cd", "#6a6551"]
	for n in xrange(z_batch.shape[0]):
		result = pylab.scatter(z_batch[n, 0], z_batch[n, 1], c=colors[label_batch[n]], s=40, marker="o", edgecolors='none')

	classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
	recs = []
	for i in range(0, len(colors)):
	    recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=colors[i]))

	ax = pylab.subplot(111)
	box = ax.get_position()
	ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
	ax.legend(recs, classes, loc="center left", bbox_to_anchor=(1.1, 0.5))
	pylab.xticks(pylab.arange(-4, 5))
	pylab.yticks(pylab.arange(-4, 5))
	pylab.xlabel("z1")
	pylab.ylabel("z2")
	pylab.savefig("%s/labeled_z.png" % args.visualization_dir)
Exemple #3
0
def visualize_walkthrough():
	x_batch = sample_x_from_data_distribution(20)
	z_batch = gen(x_batch, test=True)
	if use_gpu:
		z_batch.to_cpu()

	fig = pylab.gcf()
	fig.set_size_inches(16.0, 16.0)
	pylab.clf()
	if config.img_channel == 1:
		pylab.gray()
	
	z_a = z_batch.data[:10,:]
	z_b = z_batch.data[10:,:]
	for col in range(10):
		_z_batch = z_a * (1 - col / 9.0) + z_b * col / 9.0
		_z_batch = Variable(_z_batch)
		if use_gpu:
			_z_batch.to_gpu()
		_x_batch = dec(_z_batch, test=True)
		if use_gpu:
			_x_batch.to_cpu()
		for row in range(10):
			pylab.subplot(10, 10, row * 10 + col + 1)
			if config.img_channel == 1:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
			elif config.img_channel == 3:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
			pylab.axis("off")
				
	pylab.savefig("%s/walk_through.png" % args.visualization_dir)
Exemple #4
0
def visualize_labeled_z():
	x_batch, label_batch = sample_x_and_label_from_data_distribution(len(dataset), sequential=True)
	z_batch = gen(x_batch, test=True)
	z_batch = z_batch.data
	# if z_batch[0].shape[0] != 2:
	# 	raise Exception("Latent code vector dimension must be 2.")

	fig = pylab.gcf()
	fig.set_size_inches(20.0, 16.0)
	pylab.clf()
	colors = ["#2103c8", "#0e960e", "#e40402","#05aaa8","#ac02ab","#aba808","#151515","#94a169", "#bec9cd", "#6a6551"]
	for n in xrange(z_batch.shape[0]):
		result = pylab.scatter(z_batch[n, 0], z_batch[n, 1], c=colors[label_batch[n]], s=40, marker="o", edgecolors='none')

	classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
	recs = []
	for i in range(0, len(colors)):
	    recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=colors[i]))

	ax = pylab.subplot(111)
	box = ax.get_position()
	ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
	ax.legend(recs, classes, loc="center left", bbox_to_anchor=(1.1, 0.5))
	pylab.xticks(pylab.arange(-4, 5))
	pylab.yticks(pylab.arange(-4, 5))
	pylab.xlabel("z1")
	pylab.ylabel("z2")
	pylab.savefig("%s/labeled_z.png" % args.visualization_dir)
Exemple #5
0
def visualize_reconstruction():
    x_batch = sample_x_from_data_distribution(100)

    z_batch = gen(x_batch, test=True)
    _x_batch = dec(z_batch, test=True)
    if use_gpu:
        _x_batch.to_cpu()

    fig = pylab.gcf()
    fig.set_size_inches(16.0, 16.0)
    pylab.clf()
    if config.img_channel == 1:
        pylab.gray()
    for m in range(100):
        pylab.subplot(10, 10, m + 1)
        if config.img_channel == 1:
            pylab.imshow(np.clip((_x_batch.data[m] + 1.0) / 2.0, 0.0,
                                 1.0).reshape(
                                     (config.img_width, config.img_width)),
                         interpolation="none")
        elif config.img_channel == 3:
            pylab.imshow(np.clip(
                (_x_batch.data[m] + 1.0) / 2.0, 0.0, 1.0).reshape(
                    (config.img_channel, config.img_width, config.img_width)),
                         interpolation="none")
        pylab.axis("off")
    pylab.savefig("%s/reconstruction.png" % args.visualization_dir)
Exemple #6
0
 def get(self):
     try:
         latent = get_latent_vector()
         image = gen(latent).detach().numpy()[0]
         image = np.transpose(image, (1, 2, 0)) * .5 + .5
         image = (image * 255).astype(np.uint8)
         image = Image.fromarray(image)
         buff = io.BytesIO()
         image.save(buff, format="PNG")
         img_str = base64.b64encode(buff.getvalue()).decode('utf-8')
         image_bytes = img_str.encode('utf-8')
         return {"image": img_str}
     except Exception as e:
         print(e)
         return {"error": "A error happened"}, 404
def visualize_reconstruction():
	x_batch = sample_x_from_data_distribution(100)
	x_batch = dec(gen(x_batch, test=True), test=True)
	if use_gpu:
		x_batch.to_cpu()

	fig = pylab.gcf()
	fig.set_size_inches(16.0, 16.0)
	pylab.clf()
	if config.img_channel == 1:
		pylab.gray()
	for m in range(100):
		pylab.subplot(10, 10, m + 1)
		if config.img_channel == 1:
			pylab.imshow(np.clip((x_batch.data[m] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
		elif config.img_channel == 3:
			pylab.imshow(np.clip((x_batch.data[m] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
		pylab.axis("off")
	pylab.savefig("%s/reconstruction.png" % args.visualization_dir)
def visualize_labeled_z():
	x_batch, label_batch = sample_x_and_label_from_data_distribution(len(dataset), sequential=True)
	z_batch = gen(x_batch, test=True)
	z_batch = z_batch.data
	if z_batch[0].shape[0] != 2:
		raise Exception("Latent code vector dimension must be 2.")
		
	pylab.rcParams["figure.figsize"] = (5.0, 5.0)
	pylab.clf()
	colors = ["#2103c8", "#0e960e", "#e40402","#05aaa8","#ac02ab","#aba808","#151515","#94a169", "#bec9cd", "#6a6551"]
	for n in xrange(z_batch.shape[0]):
		result = pylab.scatter(z_batch[n, 0], z_batch[n, 1], c=colors[label_batch[n]], s=40, marker="o", edgecolors='none')

	classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
	recs = []
	for i in range(0, len(colors)):
	    recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=colors[i]))
	pylab.legend(recs, classes, loc="lower right")
	# pylab.xticks(pylab.arange(-2, 3))
	# pylab.yticks(pylab.arange(-2, 3))
	pylab.xlabel("z1")
	pylab.ylabel("z2")
	pylab.savefig("%s/labeled_z.png" % args.visualization_dir)
def train(dataset, labels):
	if config.n_z % 2 != 0:
		raise Exception("The dimension of the latent code z must be a multiple of 2.")
	batchsize = 100
	n_epoch = 10000
	n_train_each_epoch = 2000
	total_time = 0

	xp = cuda.cupy if config.use_gpu else np

	# Discriminatorの学習回数
	## 詳細は[Generative Adversarial Networks](http://arxiv.org/abs/1406.2661)
	n_steps_to_optimize_dis = 1

	# Use Adam
	optimizer_dec = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen.setup(gen)
	optimizer_dec.setup(dec)
	optimizer_dis.setup(dis)
	optimizer_dec.add_hook(optimizer.WeightDecay(0.0001))
	optimizer_gen.add_hook(optimizer.WeightDecay(0.0001))
	optimizer_dis.add_hook(optimizer.WeightDecay(0.0001))

	start_epoch = 1 if args.load_epoch == 0 else args.load_epoch + 1

	for epoch in xrange(start_epoch, n_epoch):
		# Adversarial Networksの誤差
		sum_loss_regularization = 0
		# 復号誤差
		sum_loss_reconstruction = 0

		start_time = time.time()

		for i in xrange(0, n_train_each_epoch):

			# Sample minibatch of examples
			x_batch, label_index_batch, label_one_hot = sample_x_and_label_from_data_distribution(batchsize)

			# Reconstruction phase
			z_fake_batch = gen(x_batch)
			## 12d -> 2d
			_x_batch = dec(z_fake_batch)

			## 復号誤差を最小化する
			loss_reconstruction = F.mean_squared_error(x_batch, _x_batch)
			sum_loss_reconstruction += loss_reconstruction.data

			optimizer_dec.zero_grads()
			optimizer_gen.zero_grads()
			loss_reconstruction.backward()
			optimizer_dec.update()
			optimizer_gen.update()

			# Adversarial phase
			for k in xrange(n_steps_to_optimize_dis):
				if k > 0:
					x_batch, label_index_batch, label_one_hot = sample_x_and_label_from_data_distribution(batchsize)

				z_real_batch = util.sample_z_from_n_2d_gaussian_mixture(batchsize, config.n_z, label_index_batch, 10, config.use_gpu)
				z_real_labeled_batch = util.add_label(z_real_batch, label_one_hot)

				## Discriminator loss
				p_real_batch = dis(z_real_labeled_batch)
				## p_real_batch[0] -> 本物である度合い
				## p_real_batch[1] -> 偽物である度合い
				loss_dis_real = F.softmax_cross_entropy(p_real_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))

				## 上で一度z_fake_batchは計算しているため省く
				if k > 0:
					z_fake_batch = gen(x_batch)
				z_fake_batch_labeled = util.add_label(z_fake_batch, label_one_hot)

				p_fake_batch = dis(z_fake_batch_labeled)
				## p_fake_batch[0] -> 本物である度合い
				## p_fake_batch[1] -> 偽物である度合い
				loss_dis_fake = F.softmax_cross_entropy(p_fake_batch, Variable(xp.ones(batchsize, dtype=np.int32)))

				loss_dis = loss_dis_fake + loss_dis_real
				sum_loss_regularization += loss_dis.data / float(k + 1)
				
				optimizer_dis.zero_grads()
				loss_dis.backward()
				optimizer_dis.update()

			## p_fake_batch[0] -> 本物である度合い
			## p_fake_batch[1] -> 偽物である度合い
			## generatorの学習では偽のデータを本物であると思い込ませる
			loss_gen = F.softmax_cross_entropy(p_fake_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))
			sum_loss_regularization += loss_gen.data

			optimizer_gen.zero_grads()
			loss_gen.backward()
			optimizer_gen.update()

		# Saving the models
		print "epoch", epoch
		print "	reconstruction_loss", (sum_loss_reconstruction / n_train_each_epoch)
		print "	regularization_loss", (sum_loss_regularization / n_train_each_epoch)
		p_real_batch.to_cpu()
		p_real_batch = p_real_batch.data.transpose(1, 0)
		p_real_batch = np.exp(p_real_batch)
		sum_p_real_batch = p_real_batch[0] + p_real_batch[1]
		win_real = p_real_batch[0] / sum_p_real_batch
		print "	D(real_z)", win_real.mean()
		p_fake_batch.to_cpu()
		p_fake_batch = p_fake_batch.data.transpose(1, 0)
		p_fake_batch = np.exp(p_fake_batch)
		sum_p_fake_batch = p_fake_batch[0] + p_fake_batch[1]
		win_fake = p_fake_batch[0] / sum_p_fake_batch
		print "	D(gen_z) ", win_fake.mean()
		serializers.save_hdf5("%s/gen_epoch_%d.model" % (args.model_dir, epoch), gen)
		serializers.save_hdf5("%s/dis_epoch_%d.model" % (args.model_dir, epoch), dis)
		serializers.save_hdf5("%s/dec_epoch_%d.model" % (args.model_dir, epoch), dec)
		elapsed_time = time.time() - start_time
		print "	time", elapsed_time
		total_time += elapsed_time
		print "	total_time", total_time
Exemple #10
0
def train(dataset, labels):
	if config.n_z != 12:
		raise Exception("The dimension of the latent code z must be 12 (2d for z, 10d for number label).")
	batchsize = 100
	n_epoch = 10000
	n_train_each_epoch = 2000
	total_time = 0

	xp = cuda.cupy if config.use_gpu else np

	# Discriminatorの学習回数
	## 詳細は[Generative Adversarial Networks](http://arxiv.org/abs/1406.2661)
	n_steps_to_optimize_dis = 1

	# Use Adam
	optimizer_dec = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen.setup(gen)
	optimizer_dec.setup(dec)
	optimizer_dis.setup(dis)

	start_epoch = 1 if args.load_epoch == 0 else args.load_epoch + 1

	for epoch in xrange(start_epoch, n_epoch):
		# Adversarial Networksの誤差
		sum_loss_regularization = 0
		# 復号誤差
		sum_loss_reconstruction = 0

		start_time = time.time()

		z_mask_batch = xp.zeros((batchsize, 12), dtype=xp.float32)
		z_mask_batch[:,:2] = xp.ones((batchsize, 2), dtype=xp.float32)
		z_mask_batch = Variable(z_mask_batch)
		
		for i in xrange(0, n_train_each_epoch):

			# Sample minibatch of examples
			x_batch, label_index_batch, label_1ofK_batch_extended = sample_x_and_label_from_data_distribution(batchsize)

			# Reconstruction phase
			z_fake_batch = gen(x_batch)
			## 12d -> 2d
			z_fake_batch = z_fake_batch * z_mask_batch
			_x_batch = dec(z_fake_batch)

			## 復号誤差を最小化する
			loss_reconstruction = F.mean_squared_error(x_batch, _x_batch)
			sum_loss_reconstruction += loss_reconstruction.data

			optimizer_dec.zero_grads()
			optimizer_gen.zero_grads()
			loss_reconstruction.backward()
			optimizer_dec.update()
			optimizer_gen.update()

			# Adversarial phase
			for k in xrange(n_steps_to_optimize_dis):
				if k > 0:
					x_batch, label_index_batch, label_1ofK_batch_extended = sample_x_and_label_from_data_distribution(batchsize)

				z_real_batch = sample_z_from_swiss_roll_distribution(batchsize, label_index_batch, 10, config.use_gpu)
				z_real_batch_labeled = xp.zeros((batchsize, 12), dtype=xp.float32)
				z_real_batch_labeled[:,:2] = z_real_batch.data[:,:]
				z_real_batch_labeled = z_real_batch_labeled + label_1ofK_batch_extended.data
				z_real_batch_labeled = Variable(z_real_batch_labeled)

				## Discriminator loss
				p_real_batch = dis(z_real_batch_labeled)
				## p_real_batch[0] -> 本物である度合い
				## p_real_batch[1] -> 偽物である度合い
				loss_dis_real = F.softmax_cross_entropy(p_real_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))

				## 上で一度z_fake_batchは計算しているため省く
				if k > 0:
					z_fake_batch = gen(x_batch)
				z_fake_batch_labeled = z_fake_batch * z_mask_batch + label_1ofK_batch_extended

				p_fake_batch = dis(z_fake_batch_labeled)
				## p_fake_batch[0] -> 本物である度合い
				## p_fake_batch[1] -> 偽物である度合い
				loss_dis_fake = F.softmax_cross_entropy(p_fake_batch, Variable(xp.ones(batchsize, dtype=np.int32)))

				loss_dis = loss_dis_fake + loss_dis_real
				sum_loss_regularization += loss_dis.data / float(k + 1)
				
				optimizer_dis.zero_grads()
				loss_dis.backward()
				optimizer_dis.update()


			## p_fake_batch[0] -> 本物である度合い
			## p_fake_batch[1] -> 偽物である度合い
			## generatorの学習では偽のデータを本物であると思い込ませる
			loss_gen = F.softmax_cross_entropy(p_fake_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))
			sum_loss_regularization += loss_gen.data

			optimizer_gen.zero_grads()
			loss_gen.backward()
			optimizer_gen.update()

		# Saving the models
		print "epoch", epoch
		print "	reconstruction_loss", (sum_loss_reconstruction / n_train_each_epoch)
		print "	regularization_loss", (sum_loss_regularization / n_train_each_epoch)
		p_real_batch.to_cpu()
		p_real_batch = p_real_batch.data.transpose(1, 0)
		p_real_batch = np.exp(p_real_batch)
		sum_p_real_batch = p_real_batch[0] + p_real_batch[1]
		win_real = p_real_batch[0] / sum_p_real_batch
		print "	D(real_z)", win_real.mean()
		p_fake_batch.to_cpu()
		p_fake_batch = p_fake_batch.data.transpose(1, 0)
		p_fake_batch = np.exp(p_fake_batch)
		sum_p_fake_batch = p_fake_batch[0] + p_fake_batch[1]
		win_fake = p_fake_batch[0] / sum_p_fake_batch
		print "	D(gen_z) ", win_fake.mean()
		serializers.save_hdf5("%s/gen_epoch_%d.model" % (args.model_dir, epoch), gen)
		serializers.save_hdf5("%s/dis_epoch_%d.model" % (args.model_dir, epoch), dis)
		serializers.save_hdf5("%s/dec_epoch_%d.model" % (args.model_dir, epoch), dec)
		elapsed_time = time.time() - start_time
		print "	time", elapsed_time
		total_time += elapsed_time
		print "	total_time", total_time
def train(dataset, labels):
	batchsize = 100
	n_epoch = 10000
	n_train_each_epoch = 500
	visualization_interval = 5

	xp = cuda.cupy if config.use_gpu else np

	# Discriminatorの学習回数
	## 詳細は[Generative Adversarial Networks](http://arxiv.org/abs/1406.2661)
	n_steps_to_optimize_dis = 3

	# Use Adam
	optimizer_dec = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
	optimizer_gen.setup(gen)
	optimizer_dec.setup(dec)
	optimizer_dis.setup(dis)

	start_epoch = 1 if args.load_epoch == 0 else args.load_epoch + 1

	for epoch in xrange(start_epoch, n_epoch):
		# Adversarial Networksの誤差
		sum_loss_regularization = 0
		# 復号誤差
		sum_loss_reconstruction = 0

		start_time = time.time()
		
		for i in xrange(0, n_train_each_epoch):

			# Sample minibatch of examples
			x_batch, label_batch = sample_x_and_label_from_data_distribution(batchsize)

			# Reconstruction phase
			z_fake_batch = gen(x_batch)
			_x_batch = dec(z_fake_batch)

			## 復号誤差を最小化する
			loss_reconstruction = F.mean_squared_error(x_batch, _x_batch)
			sum_loss_reconstruction += loss_reconstruction.data

			optimizer_dec.zero_grads()
			optimizer_gen.zero_grads()
			loss_reconstruction.backward()
			optimizer_dec.update()
			optimizer_gen.update()

			# Adversarial phase
			for k in xrange(n_steps_to_optimize_dis):
				if k > 0:
					x_batch, label_batch = sample_x_and_label_from_data_distribution(batchsize)

				z_real_batch = sample_z_from_noise_prior(batchsize, config.n_z, config.use_gpu)

				## Discriminator loss
				p_real_batch = dis(z_real_batch)
				## p_real_batch[0] -> 本物である度合い
				## p_real_batch[1] -> 偽物である度合い
				loss_dis_real = F.softmax_cross_entropy(p_real_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))

				## 上で一度z_fake_batchは計算しているため省く
				if k > 0:
					z_fake_batch = gen(x_batch)

				p_fake_batch = dis(z_fake_batch)
				## p_fake_batch[0] -> 本物である度合い
				## p_fake_batch[1] -> 偽物である度合い
				loss_dis_fake = F.softmax_cross_entropy(p_fake_batch, Variable(xp.ones(batchsize, dtype=np.int32)))

				loss_dis = loss_dis_fake + loss_dis_real
				sum_loss_regularization += loss_dis.data / float(k + 1)
				
				optimizer_dis.zero_grads()
				loss_dis.backward()
				optimizer_dis.update()


			## p_fake_batch[0] -> 本物である度合い
			## p_fake_batch[1] -> 偽物である度合い
			## generatorの学習では偽のデータを本物であると思い込ませる
			loss_gen = F.softmax_cross_entropy(p_fake_batch, Variable(xp.zeros(batchsize, dtype=np.int32)))
			sum_loss_regularization += loss_gen.data

			optimizer_gen.zero_grads()
			loss_gen.backward()
			optimizer_gen.update()

		# Saving the models
		print "epoch", epoch
		print "	reconstruction loss", (sum_loss_reconstruction / n_train_each_epoch)
		print "	regularization loss", (sum_loss_regularization / n_train_each_epoch)
		p_real_batch.to_cpu()
		p_real_batch = p_real_batch.data.transpose(1, 0)
		p_real_batch = np.exp(p_real_batch)
		sum_p_real_batch = p_real_batch[0] + p_real_batch[1]
		win_real = p_real_batch[0] / sum_p_real_batch
		loose_real = p_real_batch[1] / sum_p_real_batch
		print "	p_real_batch"
		print "		win  : ave", win_real.mean(), "std", win_real.std()
		print "		loose: ave", loose_real.mean(), "std", loose_real.std()
		p_fake_batch.to_cpu()
		p_fake_batch = p_fake_batch.data.transpose(1, 0)
		p_fake_batch = np.exp(p_fake_batch)
		sum_p_fake_batch = p_fake_batch[0] + p_fake_batch[1]
		win_fake = p_fake_batch[0] / sum_p_fake_batch
		loose_fake = p_fake_batch[1] / sum_p_fake_batch
		print "	p_fake_batch"
		print "		win  : ave", win_fake.mean(), "std", win_fake.std()
		print "		loose: ave", loose_fake.mean(), "std", loose_fake.std()
		# print "	x_p_real average", np.sum(x_p_real.data) / batchsize
		# print "	x_p_fake average", np.sum(x_p_fake.data) / batchsize
		serializers.save_hdf5("%s/gen_epoch_%d.model" % (args.model_dir, epoch), gen)
		serializers.save_hdf5("%s/dis_epoch_%d.model" % (args.model_dir, epoch), dis)
		serializers.save_hdf5("%s/dec_epoch_%d.model" % (args.model_dir, epoch), dec)
		print "	time", time.time() - start_time
Exemple #12
0
batch_size = 100
ep = 100000
zed = 256
print("test")

if __name__ == '__main__':
    datagen = ImageDataGenerator(
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
    )

    X_train_64 = images.load_images(FPATH64)
    X_train_128 = images.load_images(FPATH128)

    gen_model = model.gen(INPUT_SHAPE_64, 256, batch_size)
    dis_model = model.dis(INPUT_SHAPE_64)
    ups_gen_model = model.upsample_gen(INPUT_SHAPE_64, INPUT_SHAPE_128,
                                       batch_size)
    ups_dis_model = model.dis(INPUT_SHAPE_128)
    gan_feed = model.gan(gen_model,
                         dis_model,
                         batch_size,
                         ups_gen_model,
                         ups_dis_model,
                         upsample=True)

    print('Generator...')
    gen_model.summary()
    print('Discriminator...')
    dis_model.summary()
Exemple #13
0
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(
        gen(),
        mimetype='multipart/x-mixed-replace; boundary=frame')