示例#1
0
    def run_with_executor(self):
        main = fluid.Program()
        startup = fluid.Program()
        with fluid.program_guard(main, startup):
            loss = simple_fc_net()

        feed_vars = [
            main.global_block().var(var_name)
            for var_name in ["image", "label"]
        ]
        feeder = fluid.DataFeeder(place=self.place, feed_list=feed_vars)

        self.exe.run(startup)

        loss_data = self.exe.run(main,
                                 feed=feeder.feed(self.batch_data),
                                 fetch_list=[loss.name])

        fluid.io.save_inference_model(self.save_dirname, ["image", "label"],
                                      [loss],
                                      self.exe,
                                      model_filename=self.model_filename,
                                      params_filename=self.params_filename,
                                      main_program=main)

        return loss_data
示例#2
0
    def build_network(self, call_interface):
        startup_prog = fluid.Program()
        main_prog = fluid.Program()
        with fluid.program_guard(main_prog, startup_prog):
            with fluid.unique_name.guard():
                loss = simple_fc_net()
                opt = fluid.optimizer.Adam(learning_rate=1e-3)
                opt.minimize(loss)

                if call_interface:
                    self.method(main_prog)

        return main_prog
 def test_get_memory_info(self):
     loss = simple_fc_net()
     optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
     optimizer.minimize(loss)
     main_prog = paddle.static.default_main_program()
     max_tmp_mem_1, max_persitable_mem_1 = get_max_memory_info(
         main_prog, batch_size=32)
     self.assertGreater(max_tmp_mem_1, 0)
     self.assertGreater(max_persitable_mem_1, 0)
     max_tmp_mem_2, max_persitable_mem_2 = get_max_memory_info(
         main_prog, batch_size=64)
     self.assertEqual(max_persitable_mem_1, max_persitable_mem_2)
     self.assertLess(max_tmp_mem_1, max_tmp_mem_2)
示例#4
0
    def check_network_convergence(self, use_cuda, build_strategy=None):
        os.environ['CPU_NUM'] = str(4)
        main = fluid.Program()
        startup = fluid.Program()
        with fluid.program_guard(main, startup):
            loss = simple_fc_net()
            test_program = main.clone(for_test=True)

            opt = fluid.optimizer.SGD(learning_rate=0.001)
            opt.minimize(loss)

            batch_size = 32
            image = np.random.normal(size=(batch_size, 784)).astype('float32')
            label = np.random.randint(0, 10, (batch_size, 1), dtype="int64")

            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(startup)
            feed_dict = {'image': image, 'label': label}

            train_cp = compiler.CompiledProgram(main).with_data_parallel(
                loss_name=loss.name, build_strategy=build_strategy)
            test_cp = compiler.CompiledProgram(test_program).with_data_parallel(
                loss_name=loss.name,
                build_strategy=build_strategy,
                share_vars_from=train_cp)

            for i in range(5):
                _ = exe.run(train_cp, fetch_list=[loss.name], feed=feed_dict)
                test_loss, = exe.run(test_cp,
                                     fetch_list=[loss.name],
                                     feed=feed_dict)
                train_loss = exe.run(train_cp,
                                     fetch_list=[loss.name],
                                     feed=feed_dict)

                avg_test_loss_val = np.array(test_loss).mean()
                if math.isnan(float(avg_test_loss_val)):
                    sys.exit("got NaN loss, testing failed.")

                avg_train_loss_val = np.array(train_loss).mean()
                if math.isnan(float(avg_train_loss_val)):
                    sys.exit("got NaN loss, training failed.")

                self.assertTrue(
                    np.allclose(
                        train_loss, test_loss, atol=1e-8),
                    "Train loss: " + str(train_loss) + "\n Test loss:" +
                    str(test_loss))
    def build_program(self, main_program, startup_program):
        with fluid.unique_name.guard():
            with fluid.program_guard(main_program, startup_program):
                i = layers.zeros(shape=[1], dtype='int64')
                img = fluid.data(name='image', shape=[-1, 784], dtype='float32')
                label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
                loss = simple_fc_net_with_inputs(img, label, class_num=10)
                loss = simple_fc_net()
                opt = fluid.optimizer.SGD(learning_rate=0.001)
                opt.minimize(loss)

                array = layers.array_write(x=img, i=i)
                i = layers.increment(i)
                layers.array_write(x=label, i=i, array=array)
                i = layers.increment(i)
                layers.array_write(x=loss, i=i, array=array)

                return loss, array
    def check_feed_persistable_var(self, feed_dict, use_cuda=False):
        if use_cuda and not core.is_compiled_with_cuda():
            return
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)

        main = fluid.Program()
        startup = fluid.Program()
        with fluid.program_guard(main, startup):
            loss = simple_fc_net()

            optimizer = self.optimizer()
            optimizer.minimize(loss)

            exe.run(program=startup)
            compiled_prog = fluid.compiler.CompiledProgram(
                main).with_data_parallel(loss_name=loss.name)

            exe.run(program=compiled_prog, feed=feed_dict)
示例#7
0
    def test_compiled_program_base(self):
        with new_program_scope():
            paddle.manual_seed(self.seed)
            paddle.framework.random._manual_program_seed(self.seed)
            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
            exe = fluid.Executor(place)

            loss = simple_fc_net()
            exe.run(fluid.default_startup_program())
            compiled_prog = fluid.CompiledProgram(fluid.default_main_program())

            loss_data, = exe.run(compiled_prog,
                                 feed={
                                     "image": self.img,
                                     "label": self.label
                                 },
                                 fetch_list=[loss.name])
            self.assertTrue(np.array_equal(loss_data[0], self.loss))
示例#8
0
    def test_compiled_program_with_data_parallel(self):
        with new_program_scope():
            fluid.default_startup_program().random_seed = self.seed
            fluid.default_main_program().random_seed = self.seed
            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
            exe = fluid.Executor(place)

            loss = simple_fc_net()
            exe.run(fluid.default_startup_program())
            compiled_prog = fluid.CompiledProgram(
                fluid.default_main_program()).with_data_parallel(
                    loss_name=loss.name, places=[place])

            loss_data, = exe.run(compiled_prog,
                                 feed={
                                     "image": self.img,
                                     "label": self.label
                                 },
                                 fetch_list=[loss.name])
            self.assertTrue(np.array_equal(loss_data[0], self.loss))
    def build_program_and_scope(self):
        self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
        startup_program = fluid.Program()
        main_program = fluid.Program()
        startup_program.random_seed = 1
        main_program.random_seed = 1

        scope = fluid.Scope()
        with fluid.program_guard(main_program, startup_program):
            with fluid.unique_name.guard():
                loss = simple_fc_net()
                adam = fluid.optimizer.Adam(learning_rate=1e-3)
                adam.minimize(loss)

                with fluid.scope_guard(scope):
                    exe = fluid.Executor(
                        fluid.CUDAPlace(0) if self.use_cuda else fluid.
                        CPUPlace())
                    exe.run(startup_program)

        return main_program, scope, exe, loss
示例#10
0
    def check_backward(self, use_cuda):
        main = paddle.static.Program()
        startup = paddle.static.Program()

        with program_guard(main, startup):
            loss = simple_fc_net()
            loss = paddle.static.Print(loss)
            paddle.optimizer.Adam().minimize(loss)

        print_ops = [op for op in main.blocks[0].ops if op.type == u'print']
        assert len(print_ops) == 2, "The number of print op should be 2"

        place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        exe.run(startup)

        binary = paddle.static.CompiledProgram(main).with_data_parallel(
            loss_name=loss.name)

        img, label = init_data()
        feed_dict = {"image": img, "label": label}
        exe.run(binary, feed_dict)
示例#11
0
    def setUp(self):
        self.seed = 100
        self.img = np.random.random(size=(16, 784)).astype('float32')
        self.label = np.random.randint(low=0,
                                       high=10,
                                       size=[16, 1],
                                       dtype=np.int64)
        with new_program_scope():
            paddle.manual_seed(self.seed)
            paddle.framework.random._manual_program_seed(self.seed)
            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
            exe = fluid.Executor(place)

            loss = simple_fc_net()
            exe.run(fluid.default_startup_program())

            loss_data, = exe.run(fluid.default_main_program(),
                                 feed={
                                     "image": self.img,
                                     "label": self.label
                                 },
                                 fetch_list=[loss.name])
            self.loss = loss_data[0]