Esempio n. 1
0
def train(opt):

    opt.experiment = os.path.join(root_dir, opt.experiment)
    if not os.path.exists(opt.experiment):
        os.makedirs(opt.experiment)

    opt.save_model = os.path.join(opt.experiment, opt.save_model)
    if opt.load_model is not None:
        opt.load_model = os.path.join(opt.experiment, opt.load_model)
    opt.log_path = os.path.join(opt.experiment, 'log.train')
    opt.logger = make_logger(opt.log_path)

    # memory info
    print("encoder word2idx number: {}".format(opt.enc_word_vocab_size))
    print("decoder word2idx number: {}".format(opt.dec_word_vocab_size))

    # Model definition
    model = make_model(opt)

    if opt.load_word_emb:
        if opt.enlarge_word_vocab:
            enc_emb = opt.memory['word2idx_w_glove_emb']
        else:
            enc_emb = opt.memory['word2idx_emb']
        dec_emb = opt.memory['word2idx_emb']
        model.enc_word_emb.init_weight_from_pre_emb(enc_emb, opt.fix_word_emb)
        model.dec_word_emb.init_weight_from_pre_emb(dec_emb, opt.fix_word_emb)

    if opt.enc_word_vocab_size == opt.dec_word_vocab_size:
        model.dec_word_emb.embedding.weight.data = model.enc_word_emb.embedding.weight.data
        model.dec_word_emb.embedding.weight.requires_grad = model.enc_word_emb.embedding.weight.requires_grad

    if opt.load_model is not None:
        chkpt = torch.load(opt.load_model, map_location = lambda storage, log: storage)
        model.load_state_dict(chkpt)
    if opt.cuda:
        model = model.cuda()
    print(model)

    # optimizer details
    optimizer = Optim(opt.optim, opt.lr, max_grad_norm=opt.max_norm)
    optimizer.set_parameters(model.named_parameters())
    print("training parameters number: {}".format(len(optimizer.params)))

    nll_criterion = nn.NLLLoss(reduction='sum')
    if opt.cuda:
        nll_criterion = nll_criterion.cuda()

    # training procedure
    train_iter = DADataset(opt.data_root + opt.train_file, opt.memory, opt.cuda, True)
    valid_iter = DADataset(opt.data_root + opt.valid_file, opt.memory, opt.cuda, False)

    trainer = DATrainer(model, nll_criterion, optimizer, opt.logger, cuda=opt.cuda)
    trainer.train(opt.epochs, opt.batch_size, train_iter, valid_iter, opt.save_model)
Esempio n. 2
0
def train(args):

    max_images_num = data_reader.max_images_num()
    shuffle = True
    if args.run_ce:
        np.random.seed(10)
        fluid.default_startup_program().random_seed = 90
        max_images_num = 1
        shuffle = False
    data_shape = [-1] + data_reader.image_shape()

    input_A = fluid.layers.data(name='input_A',
                                shape=data_shape,
                                dtype='float32')
    input_B = fluid.layers.data(name='input_B',
                                shape=data_shape,
                                dtype='float32')
    fake_pool_A = fluid.layers.data(name='fake_pool_A',
                                    shape=data_shape,
                                    dtype='float32')
    fake_pool_B = fluid.layers.data(name='fake_pool_B',
                                    shape=data_shape,
                                    dtype='float32')

    g_A_trainer = GATrainer(input_A, input_B)
    g_B_trainer = GBTrainer(input_A, input_B)
    d_A_trainer = DATrainer(input_A, fake_pool_A)
    d_B_trainer = DBTrainer(input_B, fake_pool_B)

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    A_pool = ImagePool()
    B_pool = ImagePool()

    A_reader = paddle.batch(data_reader.a_reader(shuffle=shuffle),
                            args.batch_size)()
    B_reader = paddle.batch(data_reader.b_reader(shuffle=shuffle),
                            args.batch_size)()
    if not args.run_ce:
        A_test_reader = data_reader.a_test_reader()
        B_test_reader = data_reader.b_test_reader()

    def test(epoch):
        out_path = args.output + "/test"
        if not os.path.exists(out_path):
            os.makedirs(out_path)
        i = 0
        for data_A, data_B in zip(A_test_reader(), B_test_reader()):
            A_name = data_A[1]
            B_name = data_B[1]
            tensor_A = fluid.LoDTensor()
            tensor_B = fluid.LoDTensor()
            tensor_A.set(data_A[0], place)
            tensor_B.set(data_B[0], place)
            fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = exe.run(
                g_A_trainer.infer_program,
                fetch_list=[
                    g_A_trainer.fake_A, g_A_trainer.fake_B, g_A_trainer.cyc_A,
                    g_A_trainer.cyc_B
                ],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })
            fake_A_temp = np.squeeze(fake_A_temp[0]).transpose([1, 2, 0])
            fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
            cyc_A_temp = np.squeeze(cyc_A_temp[0]).transpose([1, 2, 0])
            cyc_B_temp = np.squeeze(cyc_B_temp[0]).transpose([1, 2, 0])
            input_A_temp = np.squeeze(data_A[0]).transpose([1, 2, 0])
            input_B_temp = np.squeeze(data_B[0]).transpose([1, 2, 0])

            imsave(out_path + "/fakeB_" + str(epoch) + "_" + A_name,
                   ((fake_B_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/fakeA_" + str(epoch) + "_" + B_name,
                   ((fake_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/cycA_" + str(epoch) + "_" + A_name,
                   ((cyc_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/cycB_" + str(epoch) + "_" + B_name,
                   ((cyc_B_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/inputA_" + str(epoch) + "_" + A_name,
                   ((input_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/inputB_" + str(epoch) + "_" + B_name,
                   ((input_B_temp + 1) * 127.5).astype(np.uint8))
            i += 1

    def checkpoints(epoch):
        out_path = args.output + "/checkpoints/" + str(epoch)
        if not os.path.exists(out_path):
            os.makedirs(out_path)
        fluid.io.save_persistables(exe,
                                   out_path + "/g_a",
                                   main_program=g_A_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/g_b",
                                   main_program=g_B_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/d_a",
                                   main_program=d_A_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/d_b",
                                   main_program=d_B_trainer.program)
        print("saved checkpoint to {}".format(out_path))
        sys.stdout.flush()

    def init_model():
        assert os.path.exists(
            args.init_model), "[%s] cann't be found." % args.init_mode
        fluid.io.load_persistables(exe,
                                   args.init_model + "/g_a",
                                   main_program=g_A_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/g_b",
                                   main_program=g_B_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/d_a",
                                   main_program=d_A_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/d_b",
                                   main_program=d_B_trainer.program)
        print("Load model from {}".format(args.init_model))

    if args.init_model:
        init_model()
    losses = [[], []]
    t_time = 0
    build_strategy = fluid.BuildStrategy()
    build_strategy.enable_inplace = False
    build_strategy.memory_optimize = False

    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = 1
    exec_strategy.use_experimental_executor = True

    g_A_trainer_program = fluid.CompiledProgram(
        g_A_trainer.program).with_data_parallel(
            loss_name=g_A_trainer.g_loss_A.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    g_B_trainer_program = fluid.CompiledProgram(
        g_B_trainer.program).with_data_parallel(
            loss_name=g_B_trainer.g_loss_B.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    d_B_trainer_program = fluid.CompiledProgram(
        d_B_trainer.program).with_data_parallel(
            loss_name=d_B_trainer.d_loss_B.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    d_A_trainer_program = fluid.CompiledProgram(
        d_A_trainer.program).with_data_parallel(
            loss_name=d_A_trainer.d_loss_A.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    for epoch in range(args.epoch):
        batch_id = 0
        for i in range(max_images_num):
            data_A = next(A_reader)
            data_B = next(B_reader)
            tensor_A = fluid.LoDTensor()
            tensor_B = fluid.LoDTensor()
            tensor_A.set(data_A, place)
            tensor_B.set(data_B, place)
            s_time = time.time()
            # optimize the g_A network
            g_A_loss, fake_B_tmp = exe.run(
                g_A_trainer_program,
                fetch_list=[g_A_trainer.g_loss_A, g_A_trainer.fake_B],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })

            fake_pool_B = B_pool.pool_image(fake_B_tmp)

            # optimize the d_B network
            d_B_loss = exe.run(d_B_trainer_program,
                               fetch_list=[d_B_trainer.d_loss_B],
                               feed={
                                   "input_B": tensor_B,
                                   "fake_pool_B": fake_pool_B
                               })[0]

            # optimize the g_B network
            g_B_loss, fake_A_tmp = exe.run(
                g_B_trainer_program,
                fetch_list=[g_B_trainer.g_loss_B, g_B_trainer.fake_A],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })

            fake_pool_A = A_pool.pool_image(fake_A_tmp)

            # optimize the d_A network
            d_A_loss = exe.run(d_A_trainer_program,
                               fetch_list=[d_A_trainer.d_loss_A],
                               feed={
                                   "input_A": tensor_A,
                                   "fake_pool_A": fake_pool_A
                               })[0]
            batch_time = time.time() - s_time
            t_time += batch_time
            print(
                "epoch{}; batch{}; g_A_loss: {}; d_B_loss: {}; g_B_loss: {}; d_A_loss: {}; "
                "Batch_time_cost: {}".format(epoch, batch_id, g_A_loss[0],
                                             d_B_loss[0], g_B_loss[0],
                                             d_A_loss[0], batch_time))
            losses[0].append(g_A_loss[0])
            losses[1].append(d_A_loss[0])
            sys.stdout.flush()
            batch_id += 1

        if args.run_test and not args.run_ce:
            test(epoch)
        if args.save_checkpoints and not args.run_ce:
            checkpoints(epoch)
    if args.run_ce:
        print("kpis,g_train_cost,{}".format(np.mean(losses[0])))
        print("kpis,d_train_cost,{}".format(np.mean(losses[1])))
        print("kpis,duration,{}".format(t_time / args.epoch))