示例#1
0
文件: shiyan.py 项目: hduyuanfu/GAN
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据处理
    transforms = transforms.Compose([
                                    transforms.Resize(opt.image_size), #重新设置图片大小,opt.image_size默认值为96
                                    transforms.CenterCrop(opt.image_size), #从中心截取大小为opt.image_size的图片
                                    transforms.ToTensor(), #转为Tensor格式,并将值取在[0,1]中
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) #标准化,得到在[-1,1]的值
                                    ])
    dataset = datasets.ImageFolder(opt.data_path, transform=transforms) #从data中读取图片,图片类别会设置为文件夹名faces
    dataloader = torch.utils.data.DataLoader(dataset, #然后对得到的图片进行批处理,默认一批为256张图,使用4个进程读取数据
                                            batch_size=opt.batch_size,
                                            shuffle=True,
                                            num_workers=opt.num_workers,
                                            drop_last=True  # 什么鬼
                                            )


    # 网络,gnet为生成器,dnet为判别器
    gnet, dnet = GNet(opt), DNet(opt)
    map_location = lambda storage, loc: storage
        if opt.dnet_path:
            dnet.load_state_dict(torch.load(opt.dnet_path, map_location=map_location))
        if opt.gnet_path:
            gnet.load_state_dict(torch.load(opt.gnet_path, map_location=map_location))
示例#2
0
文件: shiyan.py 项目: hduyuanfu/GAN
def generate(**kwargs):#进行验证
    """
    随机生成动漫头像,并根据dnet的分数选择较好的
    """
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    
    #device=torch.device('cuda') if opt.gpu else torch.device('cpu')

    gnet, dnet = GNet(opt).eval(), DNet(opt).eval()

    noises = torch.randn(opt.get_search_num, opt.nd, 1, 1).normal_(opt.noise_mean, opt.noise_std)
    #noises = noises.to(device)
    noises = noises.cuda()
    
    map_location = lambda storage, loc: storage
    dnet.load_state_dict(torch.load(opt.dnet_path, map_location=map_location))
    gnet.load_state_dict(torch.load(opt.gnet_path, map_location=map_location))
    dnet.cuda()
    gnet.cuda()

    # 生成图片,并计算图片在判别器的分数
    fake_img = gnet(noises)
    scores = dnet(fake_img).detach()

    # 挑选最好的某几张,默认opt.get_num=64张,并得到其索引
    indexs = scores.topk(opt.get_num)[1]  # tokp()返回元组,一个为分数,一个为索引
    result = []
    for i in indexs:
        result.append(fake_img.data[i])
    # 保存图片
    tv.utils.save_image(torch.stack(result), opt.get_img, normalize=True, range=(-1, 1))
示例#3
0
文件: shiyan2.py 项目: hduyuanfu/GAN
def generate():
    opt = Config()
    criterion = nn.BCELoss()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 训练可能多卡,预测一张就够了,所以有点小不同
    #dnet = torch.load('dnet1.pth').to(device)#可能需要从其他GPU移动到0号,若满足条件则不作为
    #gnet = torch.load('gnet1.pth').to(device)
    dnet = DNet(opt).to(device)
    gnet = GNet(opt).to(device)
    
    state_dict = torch.load('dd.pth')
    new_state_dict = OrderedDict()
    for k,v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v
    dnet.load_state_dict(new_state_dict)
    
    state_dict = torch.load('gg.pth')
    new_state_dict = OrderedDict()
    for k,v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v
    gnet.load_state_dict(new_state_dict)

    
    dnet.eval()
    gnet.eval()
    noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
    #with torch.no_grad():
    fake = gnet(noise)
    output = dnet(fake)
    label = torch.full((opt.batch_size, ), opt.real_label, device=device)
    d_err_fake = criterion(output, label)  # 生成图像的损失;还是tensor
    mean_score = output.mean()  #生成图像的平均得分;还是tensor
    fake_img = vutils.make_grid(fake, normalize=True)

    writer = SummaryWriter(log_dir='generate_rusult')
    writer.add_image('fake_img', fake_img)
    writer.close()
    print('生成图像的平均损失值:%.4f'%d_err_fake.item())
    print('生成图像的平均得分:%.4f'%mean_score.item())
示例#4
0
文件: generate.py 项目: hduyuanfu/GAN
def generate(opt, device):

    criterion = nn.BCELoss()

    dnet = DNet(opt).to(device)  # 可能需要从其他GPU移动到0号,若满足条件则不作为
    gnet = GNet(opt).to(device)

    state_dict = torch.load('dnet.pth')
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove 'module.'
        new_state_dict[name] = v
    dnet.load_state_dict(new_state_dict)

    state_dict = torch.load('gnet.pth')
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v
    gnet.load_state_dict(new_state_dict)

    dnet.eval()
    gnet.eval()

    noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
    with torch.no_grad():
        fake = gnet(noise)
        output = dnet(fake)
    label = torch.full((opt.batch_size, ), opt.real_label, device=device)
    d_err_fake = criterion(output, label)  # 生成图像的损失;还是tensor
    mean_score = output.mean()  #生成图像的平均得分;还是tensor
    fake_img = vutils.make_grid(fake, normalize=True)

    writer = SummaryWriter(log_dir='generate_result')
    writer.add_image('fake_img', fake_img)
    writer.close()
    print('生成图像的平均损失值:%.4f' % d_err_fake.item())
    print('生成图像的平均得分:%.4f' % mean_score.item())
示例#5
0
文件: shiyan2.py 项目: hduyuanfu/GAN
def train():
    opt = Config()  # 配置的实例
    dataloader = data_loader(opt)
    criterion = nn.BCELoss()

    device = torch.device("cuda: 0, 1, 2" if torch.cuda.is_available() else "cpu")  # 有cuda这个架构就使用0卡,无则cpu;device(可以指定任何设备,cpu,哪块或哪几块显卡)
    gnet = GNet(opt).to(device)
    dnet = DNet(opt).to(device)
    #writer.add_graph(gnet)'''做实验试验下第二个参数'''
    #writer.add_graph(dnet)
    if device.type == 'cuda':
        gnet = nn.DataParallel(gnet, [0, 1, 2])
        dnet = nn.DataParallel(dnet, [0, 1, 2])
    gnet.apply(weight_init)
    dnet.apply(weight_init)
    print('Generative NetWork:')
    print(gnet)
    print('')
    print('Discriminative NetWork:')
    print(dnet)

    g_optimizer = optim.Adam(gnet.parameters(), lr=opt.lr1, betas=(opt.beta1, 0.999))
    d_optimizer = optim.Adam(dnet.parameters(), lr=opt.lr2, betas=(opt.beta1, 0.999))
    print('g_optimizer:')
    print(g_optimizer)
    print('d_optimizer:')
    print(d_optimizer)

    writer = SummaryWriter(log_dir='result_shiyan')
    #dummy1_input = torch.rand(opt.batch_size, 3, 96,96)
    #dummy2_input = torch.rand(opt.batch_size, opt.nd,1,1)
    #writer.add_graph(dnet, dummy1_input.detach())
    #writer.add_graph(gnet, dumm2_input.detach())
    # Training Loop
    # Lists to keep track of progress
    img_list = []
    G_losses = []
    D_losses = []
    iters = 0
    fixed_noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
    print("Starting Training Loop...")
    # For each epoch
    for epoch in range(1, opt.max_epoch + 1):
        # For each batch in the dataloader
        print(len(dataloader))
        print(type(dataloader))
        for i, (imgs, _) in enumerate(dataloader, 1):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ############################
            ## Train with all-real batch
            dnet.zero_grad()
            '''先训练判别器,再训练生成器'''
            # Format batch
            real_img = imgs.to(device)
            label = torch.full((opt.batch_size, ), opt.real_label, device=device)
            # Forward pass real batch through D
            output = dnet(real_img)  # 在model模块中已经被展成一维的啦
            # Calculate loss on all-real batch
            d_err_real = criterion(output, label)
            # Calculate gradients for D in backward pass
            d_err_real.backward()
            D_x = output.mean().item()  # 真实图片的平均得分,当然是接近1越好

            ## Train with all-fake batch
            # Generate batch of latent vectors  latent:隐藏的,潜伏的
            noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
            # Generate fake image batch with G
            fake = gnet(noise)
            label.fill_(opt.fake_label)
            # Classify all fake batch with D
            output = dnet(fake.detach())
            # Calculate D's loss on the all-fake batch
            d_err_fake = criterion(output, label)
            # Calculate the gradients for this batch
            d_err_fake.backward()
            D_G_z1 = output.mean().item()  # 假图像的分数,自然是越接近0越好
            # Add the gradients from the all-real and all-fake batches
            d_err = d_err_real + d_err_fake  #tensor(1.272)+tensor(0.183)可以直接相加,不需要先取出数值。tensor
            # 自成体系,tensor和tensor的加减乘除和标量一模一样;只是tensor和标量之间不能直接算
            # Update D
            d_optimizer.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            gnet.zero_grad()
            label.fill_(opt.real_label)  # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = dnet(fake)  # 更新了一步D网络后,同样一批假图片,自然是希望判别得分output比更新前的假图片得分要小,也就是使下面的g_err扩大
            # Calculate G's loss based on this output
            g_err = criterion(output, label)
            '''生成器就是要把假图片往真标签身上凑;所以假图片+真标签,进行比较后,损失越小越好'''
            # Calculate gradients for G
            g_err.backward()
            D_G_z2 = output.mean().item()  # 因为更新过一次判别器,所以这个假图片的output均值应该比上面的假图片的output均值更接近0才健康
            # Update G
            g_optimizer.step()

            # Output training stats
            if i % 50 == 0:
                print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\treal_img_mean_score: %.4f\tfake_img_mean_score_1/2: %.4f / %.4f'
                    % (epoch, opt.max_epoch, i, len(dataloader),d_err.item(), g_err.item(), D_x, D_G_z1, D_G_z2))

            # Save Losses for plotting later
            G_losses.append(g_err.item())
            D_losses.append(d_err.item())

            writer.add_scalars('dnet_gnet_loss', {'G_losses': G_losses[iters], 'D_losses': D_losses[iters]}, iters)
            # Check how the generator is doing by saving G's output on fixed_noise
            if (iters % 500 == 0) or ((epoch == opt.max_epoch) and (i == len(dataloader))):
                with torch.no_grad():
                    fake = gnet(fixed_noise)#.detach().cpu()
                img_list.append(vutils.make_grid(fake, normalize=True))
                '''还不知道合成的图有多少个小图呢'''
                writer.add_image('fake%d'%(iters/500), img_list[int(iters/500)], int(iters/500))

            iters += 1
    
    #torch.save(dnet, 'dnet1.pth')
    #torch.save(gnet, 'gnet1.pth')
    torch.save(dnet.state_dict(), 'dd.pth')
    torch.save(gnet.state_dict(), 'gg.pth')

    #writer = SummaryWriter(log_dir='result_')
    print('最后的iters为: %d'%iters)
    print('G_losses长度为: %d'%len(G_losses))
    print('D_losses长度为: %d'%len(D_losses))
    #for i in range(iters):
        #writer.add_scalars('dnet_gnet_loss', {'G_losses': G_losses[i], 'D_losses': D_losses[i]}, i)
    print('img_list的长度为: %d'%len(img_list))
    #for i in range(len(img_list)):
        #writer.add_image('fake%d'%i, img_list[i], i)
    #writer.add_graph(dnet, input_to_model=(torch.rand(opt.batch_size, 3, 96, 96), ), verbose=False)
    #writer.add_graph(gnet, input_to_model=(torch.rand(opt.batch_size, op.nd, 1, 1), ), verbose=False)
    writer.close()
示例#6
0
                    default=None,
                    metavar='G',
                    help='if not None, generate meshes to this folder')
parser.add_argument('--show_img',
                    type=bool,
                    default=False,
                    metavar='S',
                    help='whether or not to show the images')
parser.add_argument('--load',
                    type=str,
                    metavar='M',
                    help='model file to load for evaluating.')
args = parser.parse_args()

# Model
model_gcn = GNet()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
state_dict = torch.load(args.load, map_location=device)
model_gcn.load_state_dict(state_dict)

# Turn batch norm into eval mode
# for child in model_gcn.feat_extr.children():
#     for ii in range(len(child)):
#         if type(child[ii]) == torch.nn.BatchNorm2d:
#             child[ii].track_running_stats = False
model_gcn.eval()

# Cuda
use_cuda = torch.cuda.is_available()
if use_cuda:
    model_gcn.cuda()
示例#7
0
parser.add_argument('--experiment', type=str, default='./model/', metavar='E',
                    help='folder where model and optimizer are saved.')
parser.add_argument('--load_model', type=str, default=None, metavar='M',
                    help='model file to load to continue training.')
parser.add_argument('--load_optimizer', type=str, default=None, metavar='O',
                    help='model file to load to continue training.')
args = parser.parse_args()

# Cuda
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Model
if args.load_model is not None: # Continue training
    state_dict = torch.load(args.load_model, map_location=device)
    model_gcn = GNet()
    model_gcn.load_state_dict(state_dict)
else:
    model_gcn = GNet()

# Optimizer
if args.load_optimizer is not None:
    state_dict_opt = torch.load(args.load_optimizer, map_location=device)
    optimizer = optim.Adam(model_gcn.parameters(), lr=args.lr)
    optimizer.load_state_dict(state_dict_opt)
else:
    optimizer = optim.Adam(model_gcn.parameters(), lr=args.lr)
model_gcn.train()

# Graph
graph = Graph("./ellipsoid/init_info.pickle")
示例#8
0
def train():
    learning_rate = tf.Variable(initial_lrate, trainable=False)
    g_net = GNet()
    grad_net = GradNet()
    g_checkpoint = tf.train.Checkpoint(g_model=g_net)
    grad_checkpoint = tf.train.Checkpoint(grad_model=grad_net)
    # 恢复权重
    if restore_model:
        # tf.train.Checkpoint(grad_model=grad_net).restore(grad_weights)
        tf.train.Checkpoint(g_model=g_net).restore(g_weights)
        print('restore grad model from %s, g model from %s' %
              (grad_weights, g_weights))
        # print('restore model from %s'%grad_weights)
    g_net.build(input_shape=(batch_size, 256, 256, 10))
    grad_net.build(input_shape=(batch_size, 256, 256, 10))

    # Adam 优化器
    g_optimizer = tf.keras.optimizers.Adam(beta_1=0.5, lr=learning_rate)
    grad_optimizer = tf.keras.optimizers.Adam(beta_1=0.5, lr=learning_rate)
    # tensorboard
    summary_writer = tf.summary.create_file_writer(log_dir)

    best_g_loss = inf
    best_grad_loss = inf
    # 训练集和验证集
    training_data = iter(
        data_generator("%s%s_*.tfrecords" % (train_dir, 'train'), batch_size))
    val_data = iter(
        data_generator("%s%s_*.tfrecords" % (val_dir, 'train'), batch_size))

    # train
    for epoch in range(1, max_epochs):
        # log loss
        all_loss, g_loss, val_loss, fst_loss = [], [], [], []

        # =========================================================
        # grad branch
        # =========================================================
        epoch_start_time = time.time()
        for _ in range(steps_per_epoch):
            start_time = time.time()
            batch_x = next(training_data)
            # input image
            origin_i = batch_x[:, :, :, :1]
            # input grad x and grad y
            grad_x = batch_x[:, :, :, 8:9]
            grad_y = batch_x[:, :, :, 9:10]

            with tf.GradientTape() as tape:

                pred = grad_net(batch_x, training=True)

                # revert hdr compression to calculate gradient
                re_hdr = revert_hdr_tf(pred)
                dx, dy = hdr_compression_tf(
                    calc_grad_x(re_hdr)), hdr_compression_tf(
                        calc_grad_y(re_hdr))

                if epoch > 5:
                    # epoch > 5  add first_order_loss to loss
                    lamda = min(0.1 * (1.1**epoch), 2)
                    # depth normal albedo
                    features = batch_x[:, :, :, 1:8]
                    # color loss and grad loss
                    loss = alpha * data_loss(pred, origin_i) +\
                        grad_loss(dx, dy, grad_x, grad_y)

                    # first order loss
                    G = g_net(batch_x, training=False)
                    f_loss = lamda * first_order_loss(pred, features, origin_i,
                                                      G)
                    loss = loss + f_loss

                    fst_loss.append(tf.reduce_mean(f_loss))

                else:
                    # only color loss and grad loss
                    loss = alpha * data_loss(pred, origin_i) + \
                            grad_loss(dx, dy, grad_x, grad_y)
                    fst_loss.append(0)

            grads = tape.gradient(loss, grad_net.trainable_variables)
            grad_optimizer.apply_gradients(
                zip(grads, grad_net.trainable_variables))
            all_loss.append(tf.reduce_mean(loss))
            print('grad step:%d/%d all_loss:%f 1st_loss:%f %fs' %
                  (_, steps_per_epoch, all_loss[-1], fst_loss[-1],
                   time.time() - start_time))

        print('epoch: %d grad_loss: %f 1st_loss:%f time: %fs' % (
            epoch,
            tf.reduce_mean(all_loss),
            tf.reduce_mean(fst_loss),
            (time.time() - epoch_start_time),
        ))

        # image = denoiseImage(grad_net, test_data, epoch, outputDir)

        if best_grad_loss > tf.reduce_mean(all_loss):
            print('grad loss improve from %f to %f' %
                  (best_grad_loss, tf.reduce_mean(all_loss)))
            best_grad_loss = tf.reduce_mean(all_loss)
        else:
            print('grad loss did not improve from %f' % (best_grad_loss))

        # save model weight
        grad_checkpoint.save(save_dir + 'grad_net-%d-%f.ckpt' %
                             (epoch, tf.reduce_mean(all_loss)))
        print('saving checkpoint to %sgrad_net-%d-%f.ckpt' %
              (save_dir, epoch, tf.reduce_mean(all_loss)))

        # =========================================================
        # g branch
        # =========================================================
        # train g branch
        epoch_start_time = time.time()
        for _ in range(steps_per_epoch):
            start_time = time.time()
            batch_x = next(training_data)
            # depth normal albedo
            features = batch_x[:, :, :, 1:8]
            origin_i = batch_x[:, :, :, :1]

            with tf.GradientTape() as tape:
                G = g_net(batch_x, training=True)
                loss = first_order_loss(origin_i, features, origin_i, G)
            grads = tape.gradient(loss, g_net.trainable_variables)
            g_optimizer.apply_gradients(zip(grads, g_net.trainable_variables))
            g_loss.append(tf.reduce_mean(loss))
            print('g step:%d/%d g_loss:%f %fs' %
                  (_, steps_per_epoch, g_loss[-1], time.time() - start_time))

        print('epoch: %d g_loss: %f time: %fs' %
              (epoch, tf.reduce_mean(g_loss),
               (time.time() - epoch_start_time)))
        if best_g_loss > tf.reduce_mean(g_loss):
            print('g_loss improve from %f to %f' %
                  (best_g_loss, tf.reduce_mean(g_loss)))
            best_g_loss = tf.reduce_mean(g_loss)
        else:
            print('g_loss did not improve from %f' % (best_g_loss))
        # save g branch model
        g_checkpoint.save('%sg_net-%d-%f.ckpt' %
                          (save_dir, epoch, tf.reduce_mean(g_loss)))
        print('saving checkpoint to %sg_net-%d-%f.ckpt' %
              (save_dir, epoch, tf.reduce_mean(g_loss)))

        # =========================================================
        # val
        # =========================================================
        if val:
            val_loss_tmp = []
            for _ in range(val_steps):
                batch_x = next(val_data)
                # input image
                origin_i = batch_x[:, :, :, :1]
                # input grad x and grad y
                grad_x = batch_x[:, :, :, 8:9]
                grad_y = batch_x[:, :, :, 9:10]

                pred = grad_net(batch_x, training=False)

                re_hdr = revert_hdr_tf(pred)
                dx, dy = hdr_compression_tf(
                    calc_grad_x(re_hdr)), hdr_compression_tf(
                        calc_grad_y(re_hdr))
                if epoch > 5:
                    lamda = min(0.1 * (1.1**epoch), 2)
                    features = batch_x[:, :, :, 1:8]
                    G = g_net(batch_x, training=False)
                    loss = alpha * data_loss(pred, origin_i) + \
                           grad_loss(dx, dy, grad_x, grad_y) + \
                           lamda * first_order_loss(pred, features, origin_i, G)
                else:
                    loss = alpha * data_loss(pred, origin_i) + \
                           grad_loss(dx, dy, grad_x, grad_y)
                val_loss_tmp.append(loss)
            val_loss.append(tf.reduce_mean(val_loss_tmp))
            print('val_loss: %f' % (tf.reduce_mean(val_loss)))

        # =========================================================
        # tensorboard
        # =========================================================
        with summary_writer.as_default():
            tf.summary.scalar("g loss", tf.reduce_mean(g_loss), step=epoch)
            tf.summary.scalar("all loss", tf.reduce_mean(all_loss), step=epoch)
            tf.summary.scalar('learning_rate', learning_rate, step=epoch)
            # tf.summary.image("image_%s" % epoch, image, step=epoch)
            tf.summary.scalar('1st loss', tf.reduce_mean(fst_loss), step=epoch)
            if val:
                tf.summary.scalar("val loss",
                                  tf.reduce_mean(val_loss),
                                  step=epoch)

        # update learning rate
        lrate = initial_lrate * np.math.pow(0.95, epoch)
        # if lrate < 1e-6:
        #     lrate = 1e-6
        tf.keras.backend.set_value(learning_rate, lrate)
示例#9
0
    default=8,
    help='number of cpu threads to use during batch generation')

parser.add_argument('--frequency',
                    type=int,
                    default=5,
                    help='frequency of saving model\'s parameters')

opt = parser.parse_args()

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

Gnet_AB = GNet(opt.G_init_filter, opt.G_depth, opt.G_width)
Gnet_BA = GNet(opt.G_init_filter, opt.G_depth, opt.G_width)
Dnet_A = DNet(opt.D_init_filter, opt.D_depth)
Dnet_B = DNet(opt.D_init_filter, opt.D_depth)

if opt.cuda:
    Gnet_AB.cuda()
    Gnet_BA.cuda()
    Dnet_A.cuda()
    Dnet_B.cuda()

# Weight Initialization from a Gaussian distribution N(0, 0:02)
Gnet_AB.apply(weights_init_normal)
Gnet_BA.apply(weights_init_normal)
Dnet_A.apply(weights_init_normal)
Dnet_B.apply(weights_init_normal)
示例#10
0
文件: train.py 项目: hduyuanfu/GAN
def train(opt, device):

    dataloader = data_loader(opt)

    gnet = GNet(opt).to(device)
    dnet = DNet(opt).to(device)
    #writer.add_graph(gnet)'''做实验试验下第二个参数'''
    #writer.add_graph(dnet)
    if device.type == 'cuda':  # 就算device里有多个GPU可见,但是若不用分发功能,仍然只有第0块在跑
        gnet = nn.DataParallel(gnet, [0, 1, 2])  # list(range(ngpu))不好使,只能用前几个
        dnet = nn.DataParallel(dnet, [0, 1, 2])
    gnet.apply(
        weight_init)  # 也就是初始化了下面的d/gnet.parameters();不进行初始化则会系统给你进行一次随机初始
    dnet.apply(weight_init)
    print('Generative NetWork:')
    print(gnet)
    print('')
    print('Discriminative NetWork:')
    print(dnet)

    criterion = nn.BCELoss()
    '''
    params (iterable): iterable of parameters to optimize or dicts defining parameter groups
    除了下面的整体赋值,还可以通过迭代给优化器赋值,把模型中所有需要参数的过程都分别设置值;如学长代码:
    optimizer = optim.SGD([
                            {'params': model.features.parameters(), 'lr': 0.1 * lr},
                            {'params': model.sample_128.parameters(), 'lr': lr},
                            {'params': model.sample_256.parameters(), 'lr': lr},
                            {'params': model.fc_concat.parameters(), 'lr': lr}
                        ], lr=1e-1, momentum=0.9, weight_decay=1e-5)
    '''
    g_optimizer = optim.Adam(gnet.parameters(),
                             lr=opt.lr1,
                             betas=(opt.beta1, 0.999))
    d_optimizer = optim.Adam(dnet.parameters(),
                             lr=opt.lr2,
                             betas=(opt.beta1, 0.999))
    # 优化器只会进行一次初始赋值,其他都是反向调整
    print('g_optimizer:')
    print(g_optimizer)
    print('d_optimizer:')
    print(d_optimizer)

    writer = SummaryWriter(log_dir='train_result')
    # 定义writer时候就会生成events文件,而tensorboard执行时会搜索大文件下的所有路径,找出所有需要的文件
    #dummy1_input = torch.rand(opt.batch_size, 3, 96,96)
    #dummy2_input = torch.rand(opt.batch_size, opt.nd,1,1)
    #writer.add_graph(dnet, dummy1_input)
    #writer.add_graph(gnet, dumm2_input

    # Training Loop
    # Lists to keep track of progress
    '''完全可以不用列表,但是为了以后可能有其他用,就保留了'''
    img_list = []
    G_losses = []
    D_losses = []
    iters = 0
    fixed_noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
    print("Starting Training Loop...")
    dnet.train()
    gnet.train()
    # 不写也默认为train模式;当有BN层和dropout层时,肯定得考虑模式切换,因为训练时这两个层有变化,验证时不能让它变,而eval()模式就不变,train()会变
    # For each epoch
    for epoch in range(1, opt.max_epoch + 1):
        # For each batch in the dataloader
        print(len(dataloader))
        print(type(dataloader))
        for i, (imgs, _) in enumerate(dataloader, 1):
            # torch.utils.data.DataLoader()返回的就是二元组组成的一个特殊的对象(不是列表等,也不能切片);
            # 在MNIST数据集中img, label = data;这些动漫头像没有标签,打印出来后发现是tensor([0, 0,...0, 0 0])
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ############################
            ## Train with all-real batch
            dnet.zero_grad()
            '''先训练判别器,再训练生成器'''
            # Format batch
            real_img = imgs.to(device)  # 每个batch.to(device)
            # torch.full((2,3), 1.2),第一个参数必须是元组,可以是任意维数,但想要一维填充时也得为元组,而元组只有一个元素时后面必须有个,
            label = torch.full((opt.batch_size, ),
                               opt.real_label,
                               device=device)
            # Forward pass real batch through D
            output = dnet(real_img)  # 在model模块中已经被展成一维的啦
            # Calculate loss on all-real batch
            d_err_real = criterion(output, label)  # 平均损失
            # Calculate gradients for D in backward pass
            d_err_real.backward()
            D_x = output.mean().item()  # 真实图片的平均得分,当然是接近1越好

            ## Train with all-fake batch
            # Generate batch of latent vectors  latent:隐藏的,潜伏的
            noise = torch.randn(opt.batch_size, opt.nd, 1, 1, device=device)
            # gnet会生成opt.batch_size个图像,因为一个(opt.nd,1,1)可以生成一个图像;在gnet中,每张图有otp.nd个feature maps
            # ,每个feature map大小为1 x 1,所以每个opt.nd(也就是一个值),控制着生成图像中的一个特征
            # Generate fake image batch with G
            fake = gnet(noise)
            label.fill_(opt.fake_label)
            # Classify all fake batch with D
            output = dnet(fake.detach())
            # Calculate D's loss on the all-fake batch
            d_err_fake = criterion(output, label)
            # Calculate the gradients for this batch
            d_err_fake.backward()
            D_G_z1 = output.mean().item()  # 假图像的分数,自然是越接近0越好
            # Add the gradients from the all-real and all-fake batches
            d_err = d_err_real + d_err_fake  #tensor(1.272)+tensor(0.183)可以直接相加,不需要先取出数值。
            # tensor自成体系,tensor和tensor的加减乘除和标量一模一样;只是tensor和标量之间不能直接算
            # Update D
            d_optimizer.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            gnet.zero_grad()
            label.fill_(
                opt.real_label)  # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = dnet(
                fake
            )  # 更新了一步D网络后,同样一批假图片,自然是希望判别得分output比更新前的假图片得分要小,也就是使下面的g_err扩大
            # Calculate G's loss based on this output
            g_err = criterion(output, label)
            '''生成器就是要把假图片往真标签身上凑;所以假图片+真标签,进行比较后,损失越小越好'''
            # Calculate gradients for G
            g_err.backward()
            D_G_z2 = output.mean().item(
            )  # 因为更新过一次判别器,所以这个假图片的output均值应该比上面的假图片的output均值更接近0才健康
            # Update G
            g_optimizer.step()

            # Output training stats
            if i % 50 == 0:
                print(
                    '[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\treal_img_mean_score: %.4f\tfake_img_mean_score_1/2: %.4f / %.4f'
                    % (epoch, opt.max_epoch, i, len(dataloader), d_err.item(),
                       g_err.item(), D_x, D_G_z1, D_G_z2))

            # Save Losses for plotting later
            G_losses.append(g_err.item())
            D_losses.append(d_err.item())

            writer.add_scalars('dnet_gnet_loss', {
                'G_losses': G_losses[iters],
                'D_losses': D_losses[iters]
            }, iters)

            # Check how the generator is doing by saving G's output on fixed_noise
            if (iters % 500 == 0) or ((epoch == opt.max_epoch) and
                                      (i == len(dataloader))):
                with torch.no_grad(
                ):  # 上下文管理,处于with范围内的tensor待会不反向,所以前向时不用求局部梯度了,节省计算。因为forward时就会把每层对应局部梯度公式求出来
                    fake = gnet(
                        fixed_noise
                    )  #.detach().cpu() 截断再放CPU里没什么特殊用啊,有没有效果一样,只是拷贝一份假图片存放cpu里
                img_list.append(vutils.make_grid(fake, normalize=True))
                '''还不知道合成的图有多少个小图呢'''
                writer.add_image('fake%d' % (iters / 500),
                                 img_list[int(iters / 500)], int(iters / 500))

            iters += 1

    torch.save(dnet.state_dict(), 'dnet.pth')
    torch.save(gnet.state_dict(), 'gnet.pth')

    writer.close()
    '''