Esempio n. 1
0
def main_worker(args):
    args.stage = 0
    ################
    # Define model #
    ################
    # 4/3 : scale factor in the paper
    scale_factor = 4 / 3
    tmp_scale = args.img_size_max / args.img_size_min
    args.num_scale = int(np.round(np.log(tmp_scale) / np.log(scale_factor)))

    args.size_list = [
        int(args.img_size_min * scale_factor**i)
        for i in range(args.num_scale + 1)
    ]

    discriminator = Discriminator()
    generator = Generator(args.img_size_min, args.num_scale, scale_factor)

    networks = [discriminator, generator]

    ######################
    # Loss and Optimizer #
    ######################
    d_opt = mindspore.nn.Adam(
        discriminator.sub_discriminators[0].get_parameters(), 5e-4, 0.5, 0.999)
    g_opt = mindspore.nn.Adam(generator.sub_generators[0].get_parameters(),
                              5e-4, 0.5, 0.999)

    #############
    # Set stage #
    #############
    args.stage = 0

    ###########
    # Dataset #
    ###########
    dataset = create_dataset(args)  # MSP: result is a Tensor

    ######################
    # Validate and Train #
    ######################
    args.batch_size = 1
    pad = mindspore.ops.Pad(((0, 0), (0, 0), (5, 5), (5, 5)))

    # 启动训练的Tensor,使用randn填充
    z_fix_list = [
        pad(
            Tensor(
                np.random.randn(args.batch_size, 3, args.size_list[0],
                                args.size_list[0]), mindspore.float32))
    ]
    # 扩充后,负责存放每个梯度的数据的Tensor,使用zero填充
    zero_list = [
        pad(
            Tensor(
                np.zeros((args.batch_size, 3, args.size_list[zeros_idx],
                          args.size_list[zeros_idx])), mindspore.float32))
        for zeros_idx in range(1, args.num_scale + 1)
    ]

    z_fix_list = z_fix_list + zero_list

    networks = [discriminator, generator]

    check_list = open(os.path.join(args.log_dir, "checkpoint.txt"), "a+")
    record_txt = open(os.path.join(args.log_dir, "record.txt"), "a+")
    record_txt.write('GANTYPE\t:\t{}\n'.format(args.gantype))
    record_txt.close()
    for stage in range(args.stage, args.num_scale + 1):
        trainSinGAN(dataset, networks, {
            "d_opt": d_opt,
            "g_opt": g_opt
        }, stage, args, {"z_rec": z_fix_list})
        validateSinGAN(dataset, networks, stage, args, {"z_rec": z_fix_list})
        discriminator.progress()
        generator.progress()

        # Update the networks at finest scale
        d_opt = mindspore.nn.Adam(
            discriminator.sub_discriminators[
                discriminator.current_scale].parameters(), 5e-4, 0.5, 0.999)
        g_opt = mindspore.nn.Adam(
            generator.sub_generators[generator.current_scale].parameters(),
            5e-4, 0.5, 0.999)
Esempio n. 2
0
def main_worker(args):
    args.stage = 0
    ################
    # Define model #
    ################
    # 4/3 : scale factor in the paper
    # 4/3 : 论文中说的scale factor
    scale_factor = 4 / 3
    tmp_scale = args.img_size_max / args.img_size_min
    args.num_scale = int(np.round(np.log(tmp_scale) / np.log(scale_factor)))
    args.size_list = [
        int(args.img_size_min * scale_factor**i)
        for i in range(args.num_scale + 1)
    ]

    # 创建GAN算法中著名的discriminator和generator
    discriminator = Discriminator()
    generator = Generator(args.img_size_min, args.num_scale, scale_factor)

    # 将他们绑定到一起,设定为networks
    networks = [discriminator, generator]

    ######################
    # Loss and Optimizer #
    ######################
    # 设定D和G的optimizer
    d_opt = mindspore.nn.Adam(
        discriminator.sub_discriminators[0].trainable_params(),
        learning_rate=5e-4,
        beta1=0.5,
        beta2=0.999)
    g_opt = mindspore.nn.Adam(generator.sub_generators[0].trainable_params(),
                              learning_rate=5e-4,
                              beta1=0.5,
                              beta2=0.999)
    #############
    # Set stage #
    #############
    args.stage = 0

    ###########
    # Dataset #
    ###########
    # 读取数据集,结果是:Tensor shape=(1, 3, max_size, max_size)
    dataset = create_dataset(args)
    ######################
    # Validate and Train #
    ######################
    # 设定Batch Size为1,一次只处理一张图片
    args.batch_size = 1
    pad = mindspore.nn.Pad(paddings=((0, 0), (0, 0), (5, 5), (5, 5)),
                           mode="CONSTANT")

    # 启动训练的Tensor,使用randn填充
    z_fix_list = [
        pad(
            Tensor(
                np.random.randn(args.batch_size, 3, args.size_list[0],
                                args.size_list[0]), mindspore.float32))
    ]
    # 扩充后,负责存放每个梯度的数据的Tensor,使用zero填充
    zero_list = [
        pad(
            Tensor(
                np.zeros((args.batch_size, 3, args.size_list[zeros_idx],
                          args.size_list[zeros_idx])), mindspore.float32))
        for zeros_idx in range(1, args.num_scale + 1)
    ]

    z_fix_list = z_fix_list + zero_list

    # 循环训练 stage
    for stage in range(args.stage, args.num_scale + 1):
        trainSinGAN(dataset, networks, {
            "d_opt": d_opt,
            "g_opt": g_opt
        }, stage, args, {"z_rec": z_fix_list})
        # validateSinGAN(dataset, networks, stage,
        #                args, {"z_rec": z_fix_list})
        discriminator.progress()
        generator.progress()

        # Update the networks at finest scale
        # 更新opts
        d_opt = mindspore.nn.Adam(
            discriminator.sub_discriminators[
                discriminator.current_scale].trainable_params(), 5e-4, 0.5,
            0.999)
        g_opt = mindspore.nn.Adam(
            generator.sub_generators[
                generator.current_scale].trainable_params(), 5e-4, 0.5, 0.999)