import DCGAN import numpy as np from torch.autograd import Variable # Hyper Parameters EPOCH = 100 # 训练的epoch数 Z_DIMENSION = 110 # 生成器的idea数,最后十位为label G_EPOCH = 1 # 判别器的epoch数 NUM_IMG = 100 # 图像的batch size LR = 0.0003 # 学习率 OPTIMIZER = torch.optim.Adam # 优化器 CRITERION = nn.BCELoss() # 损失函数 NUM_OF_WORKERS = 10 # 线程数 N_IDEAS = 100 # 随机数,Z_DEMENSION比它多了tag的数量 D = DCGAN.Discriminator() G = DCGAN.Generator(Z_DIMENSION, 1 * 56 * 56) # Training_Set, Testing_Set, Training_Loader, Testing_Loader = DCGAN.load_image( NUM_IMG, NUM_OF_WORKERS) D = D.cuda() G = G.cuda() d_optimizer = OPTIMIZER(D.parameters(), lr=LR) g_optimizer = OPTIMIZER(G.parameters(), lr=LR) if __name__ == '__main__': for count, i in enumerate(range(EPOCH)): for (img, label) in Training_Loader: labels_one_hot = np.zeros((NUM_IMG, 10)) labels_one_hot[np.arange(NUM_IMG), label.numpy()] = 1 img = Variable(img).cuda() real_label = Variable(
continue img = os.path.join(path, fname) img_arr = mx.image.imread(img) img_arr = transform(img_arr) img_list.append(img_arr) train_data = mx.io.NDArrayIter(data=nd.concatenate(img_list), batch_size=batch_size) # Initial Model ## loss loss = gluon.loss.SigmoidBinaryCrossEntropyLoss() ## generator and discriminator filename1 = './params/dcgan.netG.get' filename2 = './params/dcgan.netD.get' netG = dc.Generator() netD = dc.Discriminator() #netG.load_params(filename1, ctx = ctx) #netD.load_params(filename2, ctx = ctx) netG.initialize(mx.init.Normal(0.02), ctx=ctx) netD.initialize(mx.init.Normal(0.02), ctx=ctx) ## trainer for the generator and the discriminator trainerG = gluon.Trainer(netG.collect_params(), 'adam', { 'learning_rate': lr, 'beta1': beta1 }) trainerD = gluon.Trainer(netD.collect_params(), 'adam', { 'learning_rate': lr, 'beta1': beta1 }) # Training Loop