Exemple #1
0
    def test_api(self):
        startup_program = Program()
        main_program = Program()
        startup_program.random_seed = SEED
        main_program.random_seed = SEED

        with program_guard(main_program, startup_program):
            input = np.random.random([2, 2, 25]).astype("float32")
            x = fluid.layers.data(
                name="x",
                shape=[2, 2, 25],
                append_batch_size=False,
                dtype="float32")

            out_1 = fluid.layers.fc(input=x, size=1, num_flatten_dims=-1)
            out_2 = fluid.layers.fc(input=x, size=1, num_flatten_dims=2)

        place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
        ) else fluid.CUDAPlace(0)
        exe = fluid.Executor(place=place)
        exe.run(startup_program)
        res_1, res_2 = exe.run(main_program,
                               feed={"x": input},
                               fetch_list=[out_1, out_2])

        assert np.array_equal(res_1, res_2)
Exemple #2
0
    def check_network_convergence(self,
                                  use_cuda=True,
                                  use_mem_opt=False,
                                  iter_num=5):
        prog = Program()
        startup_prog = Program()
        prog.random_seed = 100
        startup_prog.random_seed = 100
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
            avg_loss = layers.mean(loss)

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
            train_reader = paddle.batch(
                paddle.dataset.mnist.train(), batch_size=200)

            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = Executor(place)

            exec_strategy = fluid.ExecutionStrategy()
            exec_strategy.use_cuda = use_cuda

            build_strategy = fluid.BuildStrategy()
            build_strategy.memory_optimize = use_mem_opt

            train_cp = compiler.CompiledProgram(fluid.default_main_program())
            train_cp = train_cp.with_data_parallel(
                loss_name=avg_loss.name,
                exec_strategy=exec_strategy,
                build_strategy=build_strategy)
            fetch_list = [avg_loss.name]

            exe.run(startup_prog)
            PASS_NUM = 100
            loop = 0
            ret = []
            for pass_id in range(PASS_NUM):
                for data in train_reader():
                    x_data = np.array([x[0] for x in data]).astype("float32")
                    y_data = np.array([x[1] for x in data]).astype("int64")
                    y_data = y_data.reshape((y_data.shape[0], 1))

                    outs = exe.run(train_cp,
                                   feed={'x': x_data,
                                         'y': y_data},
                                   fetch_list=[avg_loss])

                    loop += 1
                    ret.append(outs[0])
                    if iter_num == loop:
                        return ret
            return ret