コード例 #1
0
def train():

    ckpt_config = fluid.CheckpointConfig(checkpoint_dir="/workspace/ckpt", max_num_checkpoints=2)

    trainer = fluid.Trainer(
        train_func=train_network,
        place=get_place(),
        optimizer=get_optimizer(),
        param_path="/workspace/models",
        checkpoint_config=ckpt_config)

    train_reader, test_reader = get_reader()

    def event_handler(event):
        if isinstance(event, fluid.BeginStepEvent):
            pass

        if isinstance(event, fluid.EndStepEvent):
            loss, acc1, acc5 = event.metrics

            print("Epoch {0}, Step {1}, loss {2}, acc1 {3}, acc5 {4} time {5}".format(
                event.epoch,event.step, loss[0], acc1[0], acc5[0], "%2.2f sec" % 0.00))

    trainer.train(reader=train_reader, num_epochs=conf.num_passes, 
                                event_handler=event_handler, feed_order=['image', 'label'])
    trainer.save_params("/workspace/models")
コード例 #2
0
    def save_checkpoint(self):
        config = fluid.CheckpointConfig(self.dirname, self.max_num_checkpoints,
                                        self.epoch_interval,
                                        self.step_interval)

        trainer_args = {}
        trainer_args["epoch_id"] = self.epoch_id
        trainer_args["step_id"] = self.step_id

        program = fluid.Program()
        with fluid.program_guard(program):
            program.global_block().create_var(name="scale_0",
                                              psersistable=True,
                                              dtype="float32",
                                              shape=[32, 32])

            exe = fluid.Executor(self.place)
            for i in xrange(10):
                fluid.io.save_checkpoint(exe, config.checkpoint_dir,
                                         self.trainer_id, trainer_args,
                                         program, config.max_num_checkpoints)
コード例 #3
0
def train(dict_path):
    word_dict, dict_dim = get_worddict(dict_path)
    print("[get_worddict] The dictionary size is : %d" % dict_dim)

    cfg = fluid.CheckpointConfig(
        checkpoint_dir="/accuracy/checkpoint/text_classification/adam",
        epoch_interval=1,
        step_interval=1)
    #cfg = None

    trainer = fluid.Trainer(train_func=train_network(dict_dim),
                            place=get_place(),
                            parallel=conf.parallel,
                            optimizer_func=get_optimizer,
                            checkpoint_config=cfg)

    def event_handler(event):
        samples = 25000
        global step_start_time, epoch_start_time, speeds
        global accuracies, losses, t_accuracies, t_losses

        if isinstance(event, fluid.BeginEpochEvent):
            epoch_start_time = time.time()
            losses = []
            accuracies = []
            t_losses = []
            t_accuracies = []

        if isinstance(event, fluid.BeginStepEvent):
            if event.epoch == 0 and event.step == 0:
                speeds = []
            step_start_time = time.time()

        if isinstance(event, fluid.EndStepEvent):
            loss, accuracy = event.metrics
            losses.append(loss.mean())
            accuracies.append(accuracy.mean())

            #t_loss, t_accuracy = trainer.test(reader=test_reader, feed_order=['words', 'label'])
            t_loss = np.array([0.0])
            t_accuracy = np.array([0.0])

            print(
                "Epoch: {0}, Step: {1}, Time: {2}, Loss: {3}, Accuracy: {4}, Test Loss: {5}, Test Accuracy: {6}"
                .format(event.epoch, event.step,
                        time.time() - step_start_time, loss.mean(),
                        accuracy.mean(), t_loss.mean(), t_accuracy.mean()))

            t_losses.append(t_loss.mean())
            t_accuracies.append(t_accuracy.mean())

        if isinstance(event, fluid.EndEpochEvent):
            epoch_end_time = time.time()
            time_consuming = epoch_end_time - epoch_start_time
            speed = samples / time_consuming
            speeds.append(speed)

            t_loss, t_accuracy = trainer.test(reader=test_reader,
                                              feed_order=['words', 'label'])
            t_losses.append(t_loss.mean())
            t_accuracies.append(t_accuracy.mean())

            print(
                "Epoch: {0},Time: {1},  Speed: {2}, Avg Speed: {3}, Avg Loss: {4}, Avg accuracy: {5}, Test Avg Loss: {6}, Test Avg accuracy: {7}"
                .format(event.epoch, time_consuming, speed,
                        np.array(speeds).mean(),
                        np.array(losses).mean(),
                        np.array(accuracies).mean(),
                        np.array(t_losses).mean(),
                        np.array(t_accuracies).mean()))

    train_reader, test_reader = get_reader(word_dict)

    trainer.train(reader=train_reader,
                  num_epochs=conf.num_passes,
                  event_handler=event_handler,
                  feed_order=['words', 'label'])
コード例 #4
0
def create_model():
    def create_discriminator(discrim_inputs, discrim_targets):
        n_layers = 3
        layers = []

        # 2x [batch, in_channels, height, width] => [batch, in_channels * 2, height, width]
        input = fluid.layers.concat(input=[discrim_inputs, discrim_targets], axis=1)

        # layer_1: [batch, in_channels * 2, 256, 256] => [batch, ndf, 128, 128]
        convolved = discrim_conv(input, a.ndf, stride=2)
        rectified = utils.lrelu(convolved, 0.2)
        layers.append(rectified)

        # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
        # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
        # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
        for i in range(n_layers):
            out_channels = a.ndf * min(2 ** (i + 1), 8)
            stride = 1 if i == n_layers - 1 else 2  # last layer here has stride 1
            convolved = discrim_conv(layers[-1], out_channels, stride=stride)
            normalized = utils.batchnorm(convolved)
            rectified = utils.lrelu(normalized, 0.2)
            layers.append(rectified)

        # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
        convolved = discrim_conv(rectified, out_channels=1, stride=1)
        output = fluid.layers.sigmoid(convolved)
        layers.append(output)

        return layers[-1]

    def discrim_train_program():
        inputs = fluid.layers.data(name='input_images', shape=[3, CROP_SIZE, CROP_SIZE], dtype='float32')
        targets = fluid.layers.data(name='target_images', shape=[3, CROP_SIZE, CROP_SIZE], dtype='float32')

        out_channels = 3  # int(targets.get_shape()[-1])
        outputs = create_generator(inputs, out_channels)

        # create two copies of discriminator, one for real pairs and one for fake pairs
        # they share the same underlying variables
        # 2x [batch, height, width, channels] => [batch, 30, 30, 1]
        predict_real = create_discriminator(inputs, targets)

        # with tf.variable_scope("discriminator", reuse=True):
        # 2x [batch, height, width, channels] => [batch, 30, 30, 1]
        predict_fake = create_discriminator(inputs, outputs)

        # minimizing -tf.log will try to get inputs to 1
        # predict_real => 1
        # predict_fake => 0
        discrim_loss = fluid.layers.reduce_mean(
            fluid.layers.sum(
                fluid.layers.scale(
                    x=fluid.layers.log(predict_real + EPS),
                    scale=-1.0),
                fluid.layers.log(1 - predict_fake + EPS)
            )
        )
        return [discrim_loss]

    def discrim_optimizer_program():
        return fluid.optimizer.AdamOptimizer(learning_rate=a.lr, beta1=a.beta1)

    use_cuda = a.use_cuda
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    checkpoint_config = fluid.CheckpointConfig("./checkpoints")
    discrim_trainer = fluid.Trainer(train_func=discrim_train_program, place=place,
                                    optimizer_func=discrim_optimizer_program, checkpoint_config=checkpoint_config)

    def gen_train_program():
        inputs = fluid.layers.data(name='input_images', shape=[3, CROP_SIZE, CROP_SIZE], dtype='float32')
        targets = fluid.layers.data(name='target_images', shape=[3, CROP_SIZE, CROP_SIZE], dtype='float32')

        out_channels = 3
        outputs = create_generator(inputs, out_channels)

        # create two copies of discriminator, one for real pairs and one for fake pairs
        # they share the same underlying variables
        # 2x [batch, height, width, channels] => [batch, 30, 30, 1]
        predict_real = create_discriminator(inputs, targets)

        # 2x [batch, height, width, channels] => [batch, 30, 30, 1]
        predict_fake = create_discriminator(inputs, outputs)

        # predict_fake => 1
        # abs(targets - outputs) => 0
        gen_loss_GAN = fluid.layers.reduce_mean(
            fluid.layers.scale(
                x=fluid.layers.log(predict_fake + EPS),
                scale=-1.0
            )
        )
        gen_loss_L1 = fluid.layers.reduce_mean(
            fluid.layers.abs(targets - outputs))
        gen_loss = fluid.layers.scale(x=gen_loss_GAN, scale=a.gan_weight) + fluid.layers.scale(x=gen_loss_L1,
                                                                                               scale=a.l1_weight)
        return [gen_loss]

    def gen_optimizer_program():
        return fluid.optimizer.AdamOptimizer(learning_rate=a.lr, beta1=a.beta1)

    gen_trainer = fluid.Trainer(train_func=gen_train_program, place=place, optimizer_func=gen_optimizer_program,
                                checkpoint_config=checkpoint_config)

    # TODO: https://github.com/PaddlePaddle/Paddle/issues/7785
    # ExponentialMovingAverage()

    return Model(
        discrim_trainer=discrim_trainer,
        gen_trainer=gen_trainer,
    )