Exemple #1
0
    def test_adam(self):
        lr = 0.1
        n, m = 4, 6
        p1 = np.random.rand(n, m)
        p2 = np.random.rand(n, m)
        g1 = np.random.rand(n, m) * 0.01
        g2 = np.random.rand(n, m) * 0.01
        m1 = np.zeros((n, m))
        m2 = np.zeros((n, m))
        v1 = np.zeros((n, m))
        v2 = np.zeros((n, m))
        t1 = tensor.from_numpy(p1)
        t2 = tensor.from_numpy(p2)
        tg1 = tensor.from_numpy(g1)
        tg2 = tensor.from_numpy(g2)

        for t in range(1, 10):
            np_adam([p1, p2], [g1, g2], [m1, m2], [v1, v2], lr, t)

        adam = opt.Adam(lr=lr)
        for t in range(1, 10):
            adam.apply(0, tg1, t1, 'p1', t)
            adam.apply(0, tg2, t2, 'p2', t)

        t1 = tensor.to_numpy(t1)
        t2 = tensor.to_numpy(t2)
        for t, p in zip([t1, t2], [p1, p2]):
            for i in range(n):
                for j in range(m):
                    self.assertAlmostEqual(t[i, j], p[i, j], 6)
Exemple #2
0
	def train(self):
		train_data, _, _, _, _, _ = load_data(self.dataset_filepath)
		opt_0 = optimizer.Adam(lr=self.learning_rate) # optimizer for discriminator 
		opt_1 = optimizer.Adam(lr=self.learning_rate) # optimizer for generator, aka the combined model
		for (p, specs) in zip(self.dis_net.param_names(), self.dis_net.param_specs()):
			opt_0.register(p, specs)
		for (p, specs) in zip(self.gen_net.param_names(), self.gen_net.param_specs()):
			opt_1.register(p, specs)

		for epoch in range(self.epochs):
			idx = np.random.randint(0, train_data.shape[0], self.batch_size)
			real_imgs = train_data[idx]
			real_imgs = tensor.from_numpy(real_imgs)
			real_imgs.to_device(self.dev)
			noise = tensor.Tensor((self.batch_size, self.noise_size))
			noise.uniform(-1, 1)
			noise.to_device(self.dev)
			fake_imgs = self.gen_net.forward(flag=False, x=noise)
			real_labels = tensor.Tensor((self.batch_size, 1))
			fake_labels = tensor.Tensor((self.batch_size, 1))
			real_labels.set_value(1.0)
			fake_labels.set_value(0.0)
			real_labels.to_device(self.dev)
			fake_labels.to_device(self.dev)
			grads, (d_loss_real, _) = self.dis_net.train(real_imgs, real_labels)
			for (s, p ,g) in zip(self.dis_net.param_names(), self.dis_net.param_values(), grads):
				opt_0.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
			grads, (d_loss_fake, _) = self.dis_net.train(fake_imgs, fake_labels)
			for (s, p ,g) in zip(self.dis_net.param_names(), self.dis_net.param_values(), grads):
				opt_0.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
			d_loss = d_loss_real + d_loss_fake
			noise = tensor.Tensor((self.batch_size, self.noise_size))
			noise.uniform(-1,1)
			noise.to_device(self.dev)
			real_labels = tensor.Tensor((self.batch_size, 1))
			real_labels.set_value(1.0)
			real_labels.to_device(self.dev)
			grads, (g_loss, _) = self.combined_net.train(noise, real_labels)
			for (s, p ,g) in zip(self.gen_net.param_names(), self.gen_net.param_values(), grads):
				opt_1.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
			
			if epoch % self.interval == 0:
				self.save_image(epoch)
				print_log('The {} epoch, G_LOSS: {}, D_LOSS: {}'.format(epoch, g_loss, d_loss))