예제 #1
0
def main():
    trainer_config = paddle.trainer.config_parser.parse_config(
        "./testTrainConfig.py", "")
    opt_config = trainer_config.opt_config
    print "========Optimization Config ======="
    print opt_config
    print "==================================="
    opt_config = swig_paddle.OptimizationConfig.createFromProto(opt_config)
    _temp_optimizer_ = swig_paddle.ParameterOptimizer.create(opt_config)
    enable_types = _temp_optimizer_.getParameterTypes()
    m = swig_paddle.GradientMachine.createFromConfigProto(
        trainer_config.model_config, swig_paddle.CREATE_MODE_NORMAL,
        enable_types)
    assert m is not None
    assert isinstance(m, swig_paddle.GradientMachine)
    init_params(m.getParameters())

    optimizers = init_optimizers(opt_config, m.getParameters())

    # Train One Pass.
    for optimizer in optimizers:
        optimizer.startPass()
    batch_id = 0
    while True:  # Train one batch
        batch_size = 1000
        inArgs, atEnd = util.loadMNISTTrainData(batch_size)
        if atEnd:
            break
        outArgs = swig_paddle.Arguments.createArguments(0)

        for optimizer in optimizers:
            optimizer.startBatch(batch_size)

        def update_callback(param):
            try:
                bufs = list(param.getBufs())
                opt = optimizers[param.getID()]
                opt.update(bufs, param.getConfig())
                callback = opt.needSpecialTraversal(param.getConfig())
                if callback is not None:
                    callback(bufs, param.getConfig(), swig_paddle.NO_SPARSE_ID)

            except Exception as e:
                print e

        m.forwardBackward(inArgs, outArgs, swig_paddle.PASS_TRAIN,
                          update_callback)

        for optimizer in optimizers:
            optimizer.finishBatch()

        cost_vec = outArgs.getSlotValue(0)
        assert isinstance(cost_vec, swig_paddle.Matrix)
        cost_vec = cost_vec.copyToNumpyMat()
        print 'Finish Batch', batch_id, 'with cost ', cost_vec.sum(
        ) / batch_size
        batch_id += 1

    for optimizer in optimizers:
        optimizer.finishPass()
예제 #2
0
    def test_train_one_pass(self):
        conf_file_path = './testTrainConfig.py'
        trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
            conf_file_path)
        model_config = trainer_config.getModelConfig()
        machine = swig_paddle.GradientMachine.createByModelConfig(model_config)

        at_end = False

        output = swig_paddle.Arguments.createArguments(0)
        if not at_end:
            input_, at_end = util.loadMNISTTrainData(1000)
            machine.forwardBackward(input_, output, swig_paddle.PASS_TRAIN)
예제 #3
0
    def test_train_one_pass(self):
        conf_file_path = './testTrainConfig.py'
        trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
            conf_file_path)
        model_config = trainer_config.getModelConfig()
        machine = swig_paddle.GradientMachine.createByModelConfig(model_config)

        at_end = False

        output = swig_paddle.Arguments.createArguments(0)
        if not at_end:
            input_, at_end = util.loadMNISTTrainData(1000)
            machine.forwardBackward(input_, output, swig_paddle.PASS_TRAIN)
예제 #4
0
def main():
    trainer_config = parse_config("./testTrainConfig.py", "")
    model = swig_paddle.GradientMachine.createFromConfigProto(
        trainer_config.model_config)
    trainer = swig_paddle.Trainer.create(trainer_config, model)
    trainer.startTrain()
    for train_pass in xrange(2):
        trainer.startTrainPass()
        num = 0
        cost = 0
        while True:  # Train one batch
            batch_size = 1000
            data, atEnd = util.loadMNISTTrainData(batch_size)
            if atEnd:
                break
            trainer.trainOneDataBatch(batch_size, data)
            outs = trainer.getForwardOutput()
            cost += sum(outs[0]['value'])
            num += batch_size
        trainer.finishTrainPass()
        logger.info('train cost=%f' % (cost / num))

        trainer.startTestPeriod()
        num = 0
        cost = 0
        while True:  # Test one batch
            batch_size = 1000
            data, atEnd = util.loadMNISTTrainData(batch_size)
            if atEnd:
                break
            trainer.testOneDataBatch(batch_size, data)
            outs = trainer.getForwardOutput()
            cost += sum(outs[0]['value'])
            num += batch_size
        trainer.finishTestPeriod()
        logger.info('test cost=%f' % (cost / num))

    trainer.finishTrain()
예제 #5
0
    def test_create_gradient_machine(self):
        conf_file_path = "./testTrainConfig.py"
        trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
            conf_file_path)
        self.assertIsNotNone(trainer_config)
        opt_config = trainer_config.getOptimizationConfig()
        model_config = trainer_config.getModelConfig()
        self.assertIsNotNone(model_config)
        machine = swig_paddle.GradientMachine.createByModelConfig(
            model_config, swig_paddle.CREATE_MODE_NORMAL,
            swig_paddle.ParameterOptimizer.create(
                opt_config).getParameterTypes())
        self.assertIsNotNone(machine)
        ipt, _ = util.loadMNISTTrainData()
        output = swig_paddle.Arguments.createArguments(0)

        optimizers = {}

        # Initial Machine Parameter all to 0.1
        for param in machine.getParameters():
            assert isinstance(param, swig_paddle.Parameter)
            val = param.getBuf(swig_paddle.PARAMETER_VALUE)
            assert isinstance(val, swig_paddle.Vector)
            arr = numpy.full((len(val), ), 0.1, dtype="float32")
            val.copyFromNumpyArray(arr)
            param_config = param.getConfig().toProto()
            assert isinstance(param_config,
                              paddle.proto.ParameterConfig_pb2.ParameterConfig)
            opt = swig_paddle.ParameterOptimizer.create(opt_config)
            optimizers[param.getID()] = opt
            num_rows = param_config.dims[1]
            opt.init(num_rows, param.getConfig())

        for k in optimizers:
            opt = optimizers[k]
            opt.startPass()

        batch_size = ipt.getSlotValue(0).getHeight()
        for k in optimizers:
            opt = optimizers[k]
            opt.startBatch(batch_size)

        machine.forward(ipt, output, swig_paddle.PASS_TRAIN)
        self.assertEqual(1, output.getSlotNum())
        self.isCalled = False

        def backward_callback(param_):
            self.isCalled = isinstance(param_, swig_paddle.Parameter)
            assert isinstance(param_, swig_paddle.Parameter)
            vec = param_.getBuf(swig_paddle.PARAMETER_VALUE)
            assert isinstance(vec, swig_paddle.Vector)
            vec = vec.copyToNumpyArray()
            for val_ in vec:
                self.assertTrue(util.doubleEqual(
                    val_, 0.1))  # Assert All Value is 0.1

            vecs = list(param_.getBufs())
            opt_ = optimizers[param_.getID()]
            opt_.update(vecs, param_.getConfig())

        machine.backward(backward_callback)

        for k in optimizers:
            opt = optimizers[k]
            opt.finishBatch()

        for k in optimizers:
            opt = optimizers[k]
            opt.finishPass()

        self.assertTrue(self.isCalled)
예제 #6
0
    def test_create_gradient_machine(self):
        conf_file_path = "./testTrainConfig.py"
        trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
            conf_file_path)
        self.assertIsNotNone(trainer_config)
        opt_config = trainer_config.getOptimizationConfig()
        model_config = trainer_config.getModelConfig()
        self.assertIsNotNone(model_config)
        machine = swig_paddle.GradientMachine.createByModelConfig(
            model_config, swig_paddle.CREATE_MODE_NORMAL,
            swig_paddle.ParameterOptimizer.create(opt_config).getParameterTypes(
            ))
        self.assertIsNotNone(machine)
        ipt, _ = util.loadMNISTTrainData()
        output = swig_paddle.Arguments.createArguments(0)

        optimizers = {}

        # Initial Machine Parameter all to 0.1
        for param in machine.getParameters():
            assert isinstance(param, swig_paddle.Parameter)
            val = param.getBuf(swig_paddle.PARAMETER_VALUE)
            assert isinstance(val, swig_paddle.Vector)
            arr = numpy.full((len(val), ), 0.1, dtype="float32")
            val.copyFromNumpyArray(arr)
            self.assertTrue(param.save(param.getName()))
            param_config = param.getConfig().toProto()
            assert isinstance(param_config,
                              paddle.proto.ParameterConfig_pb2.ParameterConfig)
            opt = swig_paddle.ParameterOptimizer.create(opt_config)
            optimizers[param.getID()] = opt
            num_rows = param_config.dims[1]
            opt.init(num_rows, param.getConfig())

        for k in optimizers:
            opt = optimizers[k]
            opt.startPass()

        batch_size = ipt.getSlotValue(0).getHeight()
        for k in optimizers:
            opt = optimizers[k]
            opt.startBatch(batch_size)

        machine.forward(ipt, output, swig_paddle.PASS_TRAIN)
        self.assertEqual(1, output.getSlotNum())
        self.isCalled = False

        def backward_callback(param_):
            self.isCalled = isinstance(param_, swig_paddle.Parameter)
            assert isinstance(param_, swig_paddle.Parameter)
            vec = param_.getBuf(swig_paddle.PARAMETER_VALUE)
            assert isinstance(vec, swig_paddle.Vector)
            vec = vec.copyToNumpyArray()
            for val_ in vec:
                self.assertTrue(
                    util.doubleEqual(val_, 0.1))  # Assert All Value is 0.1

            vecs = list(param_.getBufs())
            opt_ = optimizers[param_.getID()]
            opt_.update(vecs, param_.getConfig())

        machine.backward(backward_callback)

        for k in optimizers:
            opt = optimizers[k]
            opt.finishBatch()

        for k in optimizers:
            opt = optimizers[k]
            opt.finishPass()

        self.assertTrue(self.isCalled)

        for param in machine.getParameters():
            self.assertTrue(param.load(param.getName()))