import numpy as np from torch.autograd import Variable # Hyper Parameters EPOCH = 100 # 训练的epoch数 Z_DIMENSION = 110 # 生成器的idea数,最后十位为label G_EPOCH = 1 # 判别器的epoch数 NUM_IMG = 100 # 图像的batch size LR = 0.0003 # 学习率 OPTIMIZER = torch.optim.Adam # 优化器 CRITERION = nn.BCELoss() # 损失函数 NUM_OF_WORKERS = 10 # 线程数 N_IDEAS = 100 # 随机数,Z_DEMENSION比它多了tag的数量 D = DCGAN.Discriminator() G = DCGAN.Generator(Z_DIMENSION, 1 * 56 * 56) # Training_Set, Testing_Set, Training_Loader, Testing_Loader = DCGAN.load_image( NUM_IMG, NUM_OF_WORKERS) D = D.cuda() G = G.cuda() d_optimizer = OPTIMIZER(D.parameters(), lr=LR) g_optimizer = OPTIMIZER(G.parameters(), lr=LR) if __name__ == '__main__': for count, i in enumerate(range(EPOCH)): for (img, label) in Training_Loader: labels_one_hot = np.zeros((NUM_IMG, 10)) labels_one_hot[np.arange(NUM_IMG), label.numpy()] = 1 img = Variable(img).cuda() real_label = Variable( torch.from_numpy(labels_one_hot).float()).cuda()
import torch import DCGAN import numpy as np import matplotlib.pyplot as plt NUM_IMG = 9 Z_DEMENSION = 110 N_IDEAS = 100 D = DCGAN.Discriminator() G = DCGAN.Generator(Z_DEMENSION, 3136) D.load_state_dict(torch.load(r'./DC-GAN-Networks/discriminator_cpu_.pkl')) G.load_state_dict(torch.load(r'./DC-GAN-Networks/generator_cpu_.pkl')) lis = [] for i in range(10): z = torch.randn((NUM_IMG, N_IDEAS)) x = np.zeros((NUM_IMG, Z_DEMENSION - N_IDEAS)) x[:, i] = 1 z = np.concatenate((z.numpy(), x), 1) z = torch.from_numpy(z).float() fake_img = G(z) lis.append(fake_img.detach().numpy()) output = D(fake_img) DCGAN.show(fake_img) plt.savefig('./GAN_IMAGE/Test %d.png' % i, bbox_inches='tight') DCGAN.show_all(lis) plt.savefig('./GAN_IMAGE/Test_All.png', bbox_inches='tight') plt.show()
from mxnet import nd from mxnet import random from matplotlib import pyplot as plt import numpy as np #if not updating the seed by system time, you'll get the same results import time seed = int(time.time() * 100) random.seed(seed) import sys sys.path.append('./dependencies') import utils ctx = utils.try_gpu() filename = './params/dcgan.netG.save' netG = dcgan.Generator() netG.collect_params() netG.load_params(filename, ctx=ctx) z = nd.random_normal(0, 1, shape=(4, 100, 1, 1), ctx=ctx) #print(z) output = netG(z) for i in range(4): plt.subplot(1, 4, i + 1) plt.imshow(((output[i].asnumpy().transpose(1, 2, 0) + 1.0) * 127.5).astype( np.uint8)) plt.axis('off') plt.show()
if not fname.endswith('.jpg'): continue img = os.path.join(path, fname) img_arr = mx.image.imread(img) img_arr = transform(img_arr) img_list.append(img_arr) train_data = mx.io.NDArrayIter(data=nd.concatenate(img_list), batch_size=batch_size) # Initial Model ## loss loss = gluon.loss.SigmoidBinaryCrossEntropyLoss() ## generator and discriminator filename1 = './params/dcgan.netG.get' filename2 = './params/dcgan.netD.get' netG = dc.Generator() netD = dc.Discriminator() #netG.load_params(filename1, ctx = ctx) #netD.load_params(filename2, ctx = ctx) netG.initialize(mx.init.Normal(0.02), ctx=ctx) netD.initialize(mx.init.Normal(0.02), ctx=ctx) ## trainer for the generator and the discriminator trainerG = gluon.Trainer(netG.collect_params(), 'adam', { 'learning_rate': lr, 'beta1': beta1 }) trainerD = gluon.Trainer(netD.collect_params(), 'adam', { 'learning_rate': lr, 'beta1': beta1 })