コード例 #1
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum() for item in exe.run(program,
                                                        feed={'x': tensor},
                                                        fetch_list=[g_vars],
                                                        return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
コード例 #2
0
 def test_mult_input(self):
     program = Program()
     block = program.current_block()
     sum_x1 = block.create_var(dtype="int",
                               shape=[3, 4],
                               lod_level=0,
                               name="sum.x1")
     sum_x2 = block.create_var(dtype="int",
                               shape=[3, 4],
                               lod_level=0,
                               name="sum.x2")
     sum_x3 = block.create_var(dtype="int",
                               shape=[3, 4],
                               lod_level=0,
                               name="sum.x3")
     sum_out = block.create_var(dtype="int",
                                shape=[3, 4],
                                lod_level=0,
                                name="sum.out")
     sum_op = block.append_op(type="sum",
                              inputs={"X": [sum_x1, sum_x2, sum_x3]},
                              outputs={"Out": sum_out})
     self.assertEqual(sum_op.type, "sum")
     self.assertEqual(sum_op.input_names, ["X"])
     self.assertEqual(sum_op.input("X"), ["sum.x1", "sum.x2", "sum.x3"])
     self.assertEqual(sum_op.output_names, ["Out"])
     self.assertEqual(sum_op.output("Out"), ["sum.out"])
     self.assertEqual(sum_op.idx, 0)
     self.assertEqual(sum_out.op, sum_op)
コード例 #3
0
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel',
                                 shape=[1, 28, 28],
                                 dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(input=images,
                                                    filter_size=5,
                                                    num_filters=2,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(input=conv_pool_1,
                                                    filter_size=5,
                                                    num_filters=4,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)

            program.append_backward(avg_cost)

        print(str(program))
コード例 #4
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        y = layers.data(
            name='y',
            shape=[1],
            dtype='bool',
            main_program=program,
            stop_gradient=False)

        level = 0

        out_true, out_false = layers.split_lod_tensor(
            input=x, mask=y, level=level, main_program=program)
        out = layers.merge_lod_tensor(
            in_true=out_true,
            in_false=out_false,
            mask=y,
            x=x,
            level=level,
            main_program=program)
        mean = layers.mean(x=out, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum()
            for item in map(np.array,
                            exe.run(program,
                                    feed={'x': tensor,
                                          'y': mask},
                                    fetch_list=[g_vars],
                                    scope=scope,
                                    return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
コード例 #5
0
def save_vars(executor, dirname, main_program=None, vars=None, predicate=None):
    """
    Save variables to directory by executor.

    :param executor: executor that save variable
    :param dirname: directory path
    :param main_program: program. If vars is None, then filter all variables in this
    program which fit `predicate`. Default g_program.
    :param predicate: The Predicate describes a callable that returns a variable
    as a bool. If it returns true, the variables will be saved.
    :param vars: variables need to be saved. If specify vars, program & predicate
    will be ignored
    :return: None
    """
    if vars is None:
        if main_program is None:
            main_program = default_main_program()
        if not isinstance(main_program, Program):
            raise TypeError("program should be as Program type or None")

        save_vars(executor,
                  dirname=dirname,
                  vars=filter(predicate, main_program.list_vars()))
    else:
        save_program = Program()
        save_block = save_program.global_block()
        for each_var in vars:
            new_var = _clone_var_in_block_(save_block, each_var)
            save_block.append_op(
                type='save',
                inputs={'X': [new_var]},
                outputs={},
                attrs={'file_path': os.path.join(dirname, new_var.name)})
        executor.run(save_program)
コード例 #6
0
    def setUp(self):
        self.program = Program()
        self.fake_program = Program()
        self.place = core.CPUPlace()

        self.input_names = ['X', 'Out']
        self.input_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.input_names
        }
        self.input_vars["Out@GRAD"] = \
            self.fake_program.global_block().create_var(
                name="Out@GRAD", shape=[2, 3], dtype='float32')

        self.output_names = ['X@GRAD']
        self.output_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.output_names
        }

        self.program.global_block().append_op(type='rnn_memory_helper_grad',
                                              inputs=self.input_vars,
                                              outputs=self.output_vars,
                                              attrs={})
コード例 #7
0
class RNNMemoryHelperOpTest(unittest.TestCase):
    def setUp(self):
        self.program = Program()
        self.place = core.CPUPlace()

        self.X = self.program.global_block().create_var(name='X',
                                                        shape=[2, 3],
                                                        dtype='float32')
        self.Out = self.program.global_block().create_var(name='Out',
                                                          shape=[2, 3],
                                                          dtype='float32')
        self.program.global_block().append_op(type='rnn_memory_helper',
                                              inputs={"X": self.X},
                                              outputs={"Out": self.Out},
                                              attrs={})

    def test_forward(self):
        x_np = np.random.normal(size=(2, 3)).astype("float32")
        self.feed_map = {'X': x_np}
        self.fetch_list = [self.Out]
        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
コード例 #8
0
    def setUp(self):
        self.program = Program()
        self.fake_program = Program()
        self.place = core.CPUPlace()

        self.input_names = ['X', 'Out']
        self.input_vars = {
            name: self.program.global_block().create_var(
                name=name, shape=[2, 3], dtype='float32')
            for name in self.input_names
        }
        self.input_vars["Out@GRAD"] = \
            self.fake_program.global_block().create_var(
                name="Out@GRAD", shape=[2, 3], dtype='float32')

        self.output_names = ['X@GRAD']
        self.output_vars = {
            name: self.program.global_block().create_var(
                name=name, shape=[2, 3], dtype='float32')
            for name in self.output_names
        }

        self.program.global_block().append_op(
            type='rnn_memory_helper_grad',
            inputs=self.input_vars,
            outputs=self.output_vars,
            attrs={})
コード例 #9
0
ファイル: test_layers.py プロジェクト: youmingwei/Paddle
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(
                name='pixel', shape=[1, 28, 28], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)

            program.append_backward(avg_cost)

        print(str(program))
コード例 #10
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum()
            for item in exe.run(program,
                                feed={'x': tensor},
                                fetch_list=[g_vars],
                                return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
コード例 #11
0
 def setup_program(self):
     self.main_program = Program()
     self.startup_program = Program()
     self.p_info = {
         "main_program": self.main_program,
         "startup_program": self.startup_program
     }
     self.place = core.CPUPlace()
コード例 #12
0
    def test_simple_conv2d(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel',
                                 shape=[3, 48, 48],
                                 dtype='int32')
            layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])

        print(str(program))
コード例 #13
0
 def test_dropout_layer(self):
     main_program = Program()
     startup_program = Program()
     images = fluid.layers.data(name='pixel',
                                shape=[3, 48, 48],
                                dtype='float32',
                                main_program=main_program)
     fluid.layers.dropout(x=images,
                          dropout_prob=0.5,
                          main_program=main_program,
                          startup_program=startup_program)
コード例 #14
0
ファイル: test_layers.py プロジェクト: youmingwei/Paddle
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)
            program.append_backward(avg_cost)

        print(str(program))
コード例 #15
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)
            program.append_backward(avg_cost)

        print(str(program))
コード例 #16
0
ファイル: test_program.py プロジェクト: youmingwei/Paddle
    def test_append_backward(self):
        prog = Program()
        block = prog.global_block()

        mul_x = block.create_var(
            dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
        mul_y = block.create_var(
            dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
        mul_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
        mul_op = block.append_op(
            type="mul",
            inputs={"X": [mul_x],
                    "Y": mul_y},
            outputs={"Out": [mul_out]},
            attrs={"x_num_col_dims": 1})

        add_y = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="add.y")
        add_out = block.create_var(
            dtype="float32", shape=[5, 8], lod_level=0, name="add.out")
        add_op = block.append_op(
            type="elementwise_add",
            inputs={"X": mul_out,
                    "Y": add_y},
            outputs={"Out": add_out},
            attrs={"x_num_col_dims": 1})
        mean_out = block.create_var(
            dtype="float32", shape=[1], lod_level=0, name="mean.out")
        block.append_op(
            type="mean", inputs={"X": add_out}, outputs={"Out": mean_out})

        self.assertEqual(mul_op.idx, 0)
        self.assertEqual(add_op.idx, 1)
        param_to_grad = prog.append_backward(mean_out, set())

        def grad_name(name):
            return name + "@GRAD"

        for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out",
                         "mean.out"):
            self.assertEqual(param_to_grad[var_name][0], grad_name(var_name))
            self.assertEqual(param_to_grad[var_name][1], 0)

        expect_ops = [
            "mul", "elementwise_add", "mean", "fill_constant", "mean_grad",
            "elementwise_add_grad", "mul_grad"
        ]
        actual_ops = []
        for op in block.ops:
            actual_ops.append(op.type)
        self.assertEqual(actual_ops, expect_ops)
コード例 #17
0
ファイル: test_program.py プロジェクト: youmingwei/Paddle
    def test_program_clone_with_parameter(self):
        main_program = Program()
        startup_program = Program()
        kwargs = {
            'main_program': main_program,
            'startup_program': startup_program
        }
        d = layers.data(name='x', shape=[784], dtype='float32', **kwargs)
        hidden = layers.fc(input=d, size=100, **kwargs)
        layers.fc(input=hidden, size=100, **kwargs)

        new_program = main_program.clone()
        self.assertNotEqual(0, len(new_program.blocks[0].all_parameters()))
コード例 #18
0
    def test_img_conv_group(self):
        main_program = Program()
        startup_program = Program()

        images = fluid.layers.data(name='pixel',
                                   shape=[3, 48, 48],
                                   dtype='float32',
                                   main_program=main_program,
                                   startup_program=startup_program)
        conv1 = conv_block(images, 64, 2, [0.3, 0], main_program,
                           startup_program)
        conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], main_program,
                           startup_program)
コード例 #19
0
    def test_program_clone_with_parameter(self):
        main_program = Program()
        startup_program = Program()
        kwargs = {
            'main_program': main_program,
            'startup_program': startup_program
        }
        d = layers.data(name='x', shape=[784], dtype='float32', **kwargs)
        hidden = layers.fc(input=d, size=100, **kwargs)
        layers.fc(input=hidden, size=100, **kwargs)

        new_program = main_program.clone()
        self.assertNotEqual(0, len(new_program.blocks[0].all_parameters()))
コード例 #20
0
def get_parameter_value(para, executor):
    """
    Get the LoDTensor for the parameter

    :param executor: executor for retrieving the value
    :param para: the given parameter
    :return: the LoDTensor for the parameter
    """
    assert is_parameter(para)

    get_program = Program()
    block = get_program.global_block()
    new_var = _clone_var_in_block_(block, para)
    return executor.run(get_program, feed={}, fetch_list=[new_var])[0]
コード例 #21
0
    def test_recognize_digits_mlp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            # Change g_program, so the rest layers use `g_program`
            images = layers.data(name='pixel', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
            predict = layers.fc(input=hidden2, size=10, act='softmax')
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
コード例 #22
0
    def setUp(self):
        self.program = Program()
        self.place = core.CPUPlace()

        self.X = self.program.global_block().create_var(name='X',
                                                        shape=[2, 3],
                                                        dtype='float32')
        self.Out = self.program.global_block().create_var(name='Out',
                                                          shape=[2, 3],
                                                          dtype='float32')
        self.program.global_block().append_op(type='rnn_memory_helper',
                                              inputs={"X": self.X},
                                              outputs={"Out": self.Out},
                                              attrs={})
コード例 #23
0
def load_inference_model(dirname, executor):
    """
    Load inference model from a directory

    :param dirname: directory path
    :param executor: executor that load inference model

    :return: [program, feed_var_names, fetch_var_names]
             program: program especially for inference.
             feeded_var_names: Names of variables that need to feed data
             fetch_vars: Variables from which we can get inference results.
    """
    if not os.path.isdir(dirname):
        raise ValueError("There is no directory named '%s'", dirname)

    model_file_name = dirname + "/__model__"
    model = pickle.load(open(model_file_name, "r"))
    program_desc_str = model["program_desc_str"]
    feed_var_names = model["feed_var_names"]
    fetch_var_names = model["fetch_var_names"]
    program = Program.parse_from_string(program_desc_str)
    load_persistables_if_exist(executor, dirname, program)
    fetch_vars = [program.global_block().var(name) for name in fetch_var_names]

    return [program, feed_var_names, fetch_vars]
コード例 #24
0
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[10], main_program=program)
        x.persistable = True
        table = layers.lod_rank_table(x, level=level, main_program=program)
        max_len = layers.max_sequence_len(table, main_program=program)
        max_len.persistable = True
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        array.persistable = True

        result = layers.array_to_lod_tensor(array, table, main_program=program)
        result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(
            scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
コード例 #25
0
 def test_sigmoid_cross_entropy(self):
     program = Program()
     with program_guard(program):
         dat = layers.data(name='data', shape=[10], dtype='float32')
         lbl = layers.data(name='label', shape=[10], dtype='float32')
         self.assertIsNotNone(
             layers.sigmoid_cross_entropy_with_logits(x=dat, label=lbl))
     print(str(program))
コード例 #26
0
 def setup_program(self):
     self.main_program = Program()
     self.startup_program = Program()
     self.p_info = {
         "main_program": self.main_program,
         "startup_program": self.startup_program
     }
     self.place = core.CPUPlace()
コード例 #27
0
class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
    def setUp(self):
        self.program = Program()
        self.fake_program = Program()
        self.place = core.CPUPlace()

        self.input_names = ['X', 'Out']
        self.input_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.input_names
        }
        self.input_vars["Out@GRAD"] = \
            self.fake_program.global_block().create_var(
                name="Out@GRAD", shape=[2, 3], dtype='float32')

        self.output_names = ['X@GRAD']
        self.output_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.output_names
        }

        self.program.global_block().append_op(type='rnn_memory_helper_grad',
                                              inputs=self.input_vars,
                                              outputs=self.output_vars,
                                              attrs={})

    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in ['X', 'Out']
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(
            np.allclose(out[0],
                        np.zeros(shape=(2, 3)).astype("float32"),
                        rtol=1e-5))
コード例 #28
0
 def test_elementwise_add_with_act(self):
     main_program = Program()
     startup_program = Program()
     image1 = fluid.layers.data(name='pixel1',
                                shape=[3, 48, 48],
                                dtype='float32',
                                main_program=main_program,
                                startup_program=startup_program)
     image2 = fluid.layers.data(name='pixel2',
                                shape=[3, 48, 48],
                                dtype='float32',
                                main_program=main_program,
                                startup_program=startup_program)
     out = fluid.layers.elementwise_add(x=image1,
                                        y=image2,
                                        act='relu',
                                        main_program=main_program,
                                        startup_program=startup_program)
コード例 #29
0
    def test_batch_norm_layer(self):
        main_program = Program()
        startup_program = Program()
        images = fluid.layers.data(name='pixel',
                                   shape=[3, 48, 48],
                                   dtype='float32',
                                   main_program=main_program)
        hidden1 = fluid.layers.batch_norm(input=images,
                                          main_program=main_program,
                                          startup_program=startup_program)
        hidden2 = fluid.layers.fc(input=hidden1,
                                  size=128,
                                  act='relu',
                                  main_program=main_program)
        hidden3 = fluid.layers.batch_norm(input=hidden2,
                                          main_program=main_program,
                                          startup_program=startup_program)

        print str(main_program)
コード例 #30
0
class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase):
    def setUp(self):
        self.program = Program()
        self.fake_program = Program()
        self.place = core.CPUPlace()

        self.input_names = ['X', 'Out']
        self.input_vars = {
            name: self.program.global_block().create_var(
                name=name, shape=[2, 3], dtype='float32')
            for name in self.input_names
        }
        self.input_vars["Out@GRAD"] = \
            self.fake_program.global_block().create_var(
                name="Out@GRAD", shape=[2, 3], dtype='float32')

        self.output_names = ['X@GRAD']
        self.output_vars = {
            name: self.program.global_block().create_var(
                name=name, shape=[2, 3], dtype='float32')
            for name in self.output_names
        }

        self.program.global_block().append_op(
            type='rnn_memory_helper_grad',
            inputs=self.input_vars,
            outputs=self.output_vars,
            attrs={})

    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in ['X', 'Out']
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(
            np.allclose(
                out[0], np.zeros(shape=(2, 3)).astype("float32"), rtol=1e-5))
コード例 #31
0
    def test_word_embedding(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            dict_size = 10000
            embed_size = 32
            first_word = layers.data(name='firstw', shape=[1], dtype='int64')
            second_word = layers.data(name='secondw', shape=[1], dtype='int64')
            third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
            forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
            next_word = layers.data(name='nextw', shape=[1], dtype='int64')

            embed_first = layers.embedding(input=first_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_second = layers.embedding(input=second_word,
                                            size=[dict_size, embed_size],
                                            dtype='float32',
                                            param_attr='shared_w')

            embed_third = layers.embedding(input=third_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_forth = layers.embedding(input=forth_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
                axis=1)

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
            predict_word = layers.fc(input=hidden1,
                                     size=dict_size,
                                     act='softmax')
            cost = layers.cross_entropy(input=predict_word, label=next_word)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
コード例 #32
0
    def test_linear_chain_crf(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            label_dict_len = 10
            images = layers.data(name='pixel', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            hidden = layers.fc(input=images, size=128)
            crf = layers.linear_chain_crf(input=hidden,
                                          label=label,
                                          param_attr=ParamAttr(name="crfw"))
            crf_decode = layers.crf_decoding(input=hidden,
                                             param_attr=ParamAttr(name="crfw"))
            layers.chunk_eval(input=crf_decode,
                              label=label,
                              chunk_scheme="IOB",
                              num_chunk_types=(label_dict_len - 1) / 2)
            self.assertNotEqual(crf, None)
            self.assertNotEqual(crf_decode, None)

        print(str(program))
コード例 #33
0
    def test_op_desc_creation(self):
        program = Program()
        block = program.current_block()
        mul_x = block.create_var(dtype="float32",
                                 shape=[5, 10],
                                 lod_level=0,
                                 name="mul.x")
        mul_y = block.create_var(dtype="float32",
                                 shape=[10, 8],
                                 lod_level=0,
                                 name="mul.y")
        mul_out = block.create_var(dtype="float32",
                                   shape=[5, 8],
                                   lod_level=0,
                                   name="mul.out")
        mul_op = block.append_op(type="mul",
                                 inputs={
                                     "X": [mul_x],
                                     "Y": mul_y
                                 },
                                 outputs={"Out": [mul_out]},
                                 attrs={"x_num_col_dims": 1})

        self.assertNotEqual(str(mul_op), "")
        self.assertEqual(mul_op.type, "mul")
        self.assertEqual(mul_op.input_names, ["X", "Y"])
        self.assertEqual(mul_op.input("X"), ["mul.x"])
        self.assertEqual(mul_op.input("Y"), ["mul.y"])
        self.assertEqual(mul_op.output_names, ["Out"])
        self.assertEqual(mul_op.output("Out"), ["mul.out"])
        self.assertEqual(set(mul_op.attr_names),
                         set(["x_num_col_dims", "y_num_col_dims"]))
        self.assertEqual(mul_op.has_attr("x_num_col_dims"), True)
        self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT)
        self.assertEqual(mul_op.attr("x_num_col_dims"), 1)
        self.assertEqual(mul_op.has_attr("y_num_col_dims"), True)
        self.assertEqual(mul_op.attr_type("y_num_col_dims"), core.AttrType.INT)
        self.assertEqual(mul_op.attr("y_num_col_dims"), 1)
        self.assertEqual(mul_op.idx, 0)
        self.assertEqual(mul_out.op, mul_op)
コード例 #34
0
    def setUp(self):
        self.program = Program()
        self.place = core.CPUPlace()

        self.X = self.program.global_block().create_var(
            name='X', shape=[2, 3], dtype='float32')
        self.Out = self.program.global_block().create_var(
            name='Out', shape=[2, 3], dtype='float32')
        self.program.global_block().append_op(
            type='rnn_memory_helper',
            inputs={"X": self.X},
            outputs={"Out": self.Out},
            attrs={})
コード例 #35
0
    def main(self,
             tensor,
             mask,
             expect_true,
             expect_false,
             expect_out,
             level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[1], main_program=program)
        x.persistable = True

        y = layers.data(name='y', shape=[1], main_program=program)
        y.persistable = True

        out_true, out_false = layers.split_lod_tensor(input=x,
                                                      mask=y,
                                                      level=level,
                                                      main_program=program)
        out_true.persistable = True
        out_false.persistable = True

        out = layers.merge_lod_tensor(in_true=out_true,
                                      in_false=out_false,
                                      mask=y,
                                      x=x,
                                      level=level,
                                      main_program=program)

        out.persistable = True

        exe = Executor(place)
        scope = core.Scope()
        exe.run(program,
                feed={
                    'x': tensor,
                    'y': mask
                },
                scope=scope,
                return_numpy=False)

        var_true = scope.find_var(out_true.name).get_tensor()

        var_false = scope.find_var(out_false.name).get_tensor()

        var_out = scope.find_var(out.name).get_tensor()

        self.check_tensor_same(var_true, expect_true)
        self.check_tensor_same(var_false, expect_false)
        self.check_tensor_same(var_out, expect_out)
コード例 #36
0
def load_vars(executor, dirname, main_program=None, vars=None, predicate=None):
    """
    Load variables from directory by executor.

    :param executor: executor that save variable
    :param dirname: directory path
    :param main_program: program. If vars is None, then filter all variables in this
    program which fit `predicate`. Default default_main_program().
    :param predicate: The Predicate describes a callable that returns a variable
    as a bool. If it returns true, the variables will be loaded.
    :param vars: variables need to be loaded. If specify vars, program &
    predicate will be ignored
    :return: None
    """
    if vars is None:
        if main_program is None:
            main_program = default_main_program()
        if not isinstance(main_program, Program):
            raise TypeError("program's type should be Program")

        load_vars(executor,
                  dirname=dirname,
                  vars=filter(predicate, main_program.list_vars()))
    else:
        load_prog = Program()
        load_block = load_prog.global_block()
        for each_var in vars:
            assert isinstance(each_var, Variable)
            new_var = _clone_var_in_block_(load_block, each_var)
            load_block.append_op(
                type='load',
                inputs={},
                outputs={"Out": [new_var]},
                attrs={'file_path': os.path.join(dirname, new_var.name)})

        executor.run(load_prog)
コード例 #37
0
class RNNMemoryHelperOpTest(unittest.TestCase):
    def setUp(self):
        self.program = Program()
        self.place = core.CPUPlace()

        self.X = self.program.global_block().create_var(
            name='X', shape=[2, 3], dtype='float32')
        self.Out = self.program.global_block().create_var(
            name='Out', shape=[2, 3], dtype='float32')
        self.program.global_block().append_op(
            type='rnn_memory_helper',
            inputs={"X": self.X},
            outputs={"Out": self.Out},
            attrs={})

    def test_forward(self):
        x_np = np.random.normal(size=(2, 3)).astype("float32")
        self.feed_map = {'X': x_np}
        self.fetch_list = [self.Out]
        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
コード例 #38
0
ファイル: test_program.py プロジェクト: youmingwei/Paddle
    def test_program_clone(self):
        prog = Program()

        x = prog.global_block().create_var(
            name='X', shape=[1000, 784], dtype='float32')

        y = prog.global_block().create_var(
            name='Y', shape=[784, 100], dtype='float32')
        out = prog.global_block().create_var(name='Out', dtype='float32')
        prog.global_block().append_op(
            type="mul", inputs={'X': [x],
                                'Y': [y]}, outputs={'Out': [out]})

        # FIXME(yuyang18): We manual compare the output string, since the order
        # of variable could be changed.
        print(prog)
        print(prog.clone())
コード例 #39
0
ファイル: test_program.py プロジェクト: youmingwei/Paddle
    def test_parse_program_from_string(self):
        prog = Program()

        x = prog.global_block().create_var(
            name='X', shape=[1000, 784], dtype='float32')

        y = prog.global_block().create_var(
            name='Y', shape=[784, 100], dtype='float32')
        out = prog.global_block().create_var(name='Out', dtype='float32')
        prog.global_block().append_op(
            type="mul", inputs={'X': [x],
                                'Y': [y]}, outputs={'Out': [out]})

        binary_str = prog.desc.serialize_to_string()
        prog_restored = Program.parse_from_string(binary_str)

        print(prog)
        print(prog_restored)
コード例 #40
0
    def test_program_clone(self):
        prog = Program()

        x = prog.global_block().create_var(name='X',
                                           shape=[1000, 784],
                                           dtype='float32')

        y = prog.global_block().create_var(name='Y',
                                           shape=[784, 100],
                                           dtype='float32')
        out = prog.global_block().create_var(name='Out', dtype='float32')
        prog.global_block().append_op(type="mul",
                                      inputs={
                                          'X': [x],
                                          'Y': [y]
                                      },
                                      outputs={'Out': [out]})

        # FIXME(yuyang18): We manual compare the output string, since the order
        # of variable could be changed.
        print(prog)
        print(prog.clone())
コード例 #41
0
    def test_parse_program_from_string(self):
        prog = Program()

        x = prog.global_block().create_var(name='X',
                                           shape=[1000, 784],
                                           dtype='float32')

        y = prog.global_block().create_var(name='Y',
                                           shape=[784, 100],
                                           dtype='float32')
        out = prog.global_block().create_var(name='Out', dtype='float32')
        prog.global_block().append_op(type="mul",
                                      inputs={
                                          'X': [x],
                                          'Y': [y]
                                      },
                                      outputs={'Out': [out]})

        binary_str = prog.desc.serialize_to_string()
        prog_restored = Program.parse_from_string(binary_str)

        print(prog)
        print(prog_restored)
コード例 #42
0
ファイル: op_test.py プロジェクト: youmingwei/Paddle
    def check_output_with_place(self, place, atol):
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)

        program = Program()
        block = program.global_block()

        inputs = append_input_output(block, op_proto, self.inputs, True)
        outputs = append_input_output(block, op_proto, self.outputs, False)
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)

        fetch_list = []
        for var_name, var in outputs.iteritems():
            if var_name in self.outputs:
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)

        feed_map = self.feed_var(inputs, place)

        exe = Executor(place)
        outs = exe.run(program,
                       feed=feed_map,
                       fetch_list=fetch_list,
                       return_numpy=False)

        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
            if out_name not in self.outputs:
                continue

            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

            if out_dup:
                sub_out = self.outputs[out_name]
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
                for sub_out_name, expect in sub_out:
                    idx = find_actual(sub_out_name, fetch_list)
                    actual = outs[idx]
                    actual_t = np.array(actual)
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
                    self.assertTrue(
                        np.allclose(
                            actual_t, expect_t, atol=atol),
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
                    if isinstance(expect, tuple):
                        self.assertListEqual(
                            actual.lod(), expect[1], "Output (" + sub_out_name +
                            ") has different lod at " + str(place))
            else:
                idx = find_actual(out_name, fetch_list)
                actual = outs[idx]
                actual_t = np.array(actual)
                expect = self.outputs[out_name]
                expect_t = expect[0] if isinstance(expect, tuple) else expect
                self.assertTrue(
                    np.allclose(
                        actual_t, expect_t, atol=atol),
                    "Output (" + out_name + ") has diff at " + str(place))
                if isinstance(expect, tuple):
                    self.assertListEqual(actual.lod(), expect[1],
                                         "Output (" + out_name +
                                         ") has different lod at " + str(place))
コード例 #43
0
ファイル: op_test.py プロジェクト: youmingwei/Paddle
    def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
        prog = Program()
        block = prog.global_block()
        inputs_with_np = {
            key: value
            for (key, value) in OpTest._create_var_descs_(
                block, getattr(self, 'inputs', {}))
        }
        outputs_with_np = {
            key: val
            for (key, val) in OpTest._create_var_descs_(
                block, getattr(self, 'outputs', {}))
        }
        inputs = {
            k: [item[0] for item in inputs_with_np[k]]
            for k in inputs_with_np
        }
        outputs = {
            k: [item[0] for item in outputs_with_np[k]]
            for k in outputs_with_np
        }

        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=getattr(self, 'attrs', {}))

        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)

        mean_inputs = map(block.var, output_names)

        if len(mean_inputs) == 1:
            loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
            op = block.append_op(
                inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
            op.desc.infer_var_type(block.desc)
            op.desc.infer_shape(block.desc)
        else:
            avg_sum = []
            for cur_loss in mean_inputs:
                cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
                op = block.append_op(
                    inputs={"X": [cur_loss]},
                    outputs={"Out": [cur_avg_loss]},
                    type="mean")
                op.desc.infer_var_type(block.desc)
                op.desc.infer_shape(block.desc)
                avg_sum.append(cur_avg_loss)

            loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
            op_sum = block.append_op(
                inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
            op_sum.desc.infer_var_type(block.desc)
            op_sum.desc.infer_shape(block.desc)

            loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
            op_loss = block.append_op(
                inputs={"X": loss_sum},
                outputs={"Out": loss},
                type='scale',
                attrs={'scale': 1.0 / float(len(avg_sum))})
            op_loss.desc.infer_var_type(block.desc)
            op_loss.desc.infer_shape(block.desc)

        param_grad_list = append_backward_ops(
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

        feed_dict = {
            item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place)
            for p_name in inputs_with_np for item in inputs_with_np[p_name]
        }

        fetch_list = [g for p, g in param_grad_list]
        executor = Executor(place)
        return map(
            np.array,
            executor.run(prog, feed_dict, fetch_list, return_numpy=False))
コード例 #44
0
class RecurrentOpTest1(unittest.TestCase):
    '''
    Test RNNOp
    equation:
        h_t = ( x_t + h_{t-1} ) / scale
    vars:
        - x
    memories:
        - h
    outputs:
        - h
    '''

    input_dim = 2
    batch_size = 1
    sent_len = 1

    def setup_program(self):
        self.main_program = Program()
        self.startup_program = Program()
        self.p_info = {
            "main_program": self.main_program,
            "startup_program": self.startup_program
        }
        self.place = core.CPUPlace()

    def setUp(self):
        self.setup_program()
        self.data_field = {"x", "h_boot"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape)

        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)

    def create_rnn_op(self):
        x = layers.data(
            shape=[self.sent_len, self.batch_size, self.input_dim],
            dtype='float32',
            name='x',
            append_batch_size=False,
            **self.p_info)
        x.stop_gradient = False
        h_boot = layers.data(
            shape=[self.input_dim],
            dtype='float32',
            name='h_boot',
            **self.p_info)
        h_boot.stop_gradient = False

        rnn = layers.StaticRNN(main_program=self.main_program)
        with rnn.step():
            h_pre = rnn.memory(init=h_boot)
            x_t = rnn.step_input(x)

            h = layers.scale(
                x=layers.elementwise_add(
                    x=h_pre, y=x_t, **self.p_info),
                scale=self.py_rnn.scale,
                **self.p_info)

            rnn.update_memory(h_pre, h)
            rnn.output(h)

        return rnn()

    def forward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        exe = Executor(self.place)
        out = exe.run(self.main_program,
                      feed=self.feed_map,
                      fetch_list=[self.output])

        return out[0]

    def backward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        fetch_list = [
            self.main_program.global_block().var(x + "@GRAD")
            for x in self.data_field
        ]

        exe = Executor(self.place)
        return exe.run(self.main_program,
                       feed=self.feed_map,
                       fetch_list=fetch_list,
                       return_numpy=False)

    def test_backward(self):
        self.check_forward()

        append_backward_ops(self.output)

        ana_grad = [np.array(x) for x in self.backward()]

        num_grad = self.get_numerical_gradient()
        for idx, name in enumerate(self.data_field):
            self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape)
            self.assertTrue(
                np.isclose(
                    num_grad[idx], ana_grad[idx], rtol=0.1).all())

    def check_forward(self):
        print 'test recurrent op forward'
        pd_output = self.forward()
        py_output = self.py_rnn.forward()
        print 'pd_output', pd_output
        print
        print 'py_output', py_output
        self.assertEqual(pd_output.shape, py_output.shape)
        self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all())

    def get_numerical_gradient(self, delta=0.005):
        dloss_dout = 1.0
        feed_list = [getattr(self.py_rnn, x) for x in self.data_field]
        grad_list = [np.zeros_like(x) for x in feed_list]
        for feed, grad in zip(feed_list, grad_list):
            for f, g in np.nditer([feed, grad], op_flags=['readwrite']):
                o = float(f)
                f[...] = o + delta
                y_pos = self.forward()

                f[...] = o - delta
                y_neg = self.forward()

                f[...] = o
                dout_dfeed = (y_pos - y_neg) / (delta * 2)
                g[...] = dout_dfeed[0]

        return grad_list
コード例 #45
0
    def test_raw_api(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)
        true_image, false_image = layers.split_lod_tensor(input=image,
                                                          mask=cond,
                                                          **kwargs)

        true_out = layers.create_tensor(dtype='float32', **kwargs)
        true_cond = layers.ConditionalBlock([true_image], **kwargs)

        with true_cond.block():
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=true_out, **kwargs)

        false_out = layers.create_tensor(dtype='float32', **kwargs)
        false_cond = layers.ConditionalBlock([false_image], **kwargs)

        with false_cond.block():
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=false_out, **kwargs)

        prob = layers.merge_lod_tensor(in_true=true_out,
                                       in_false=false_out,
                                       mask=cond,
                                       x=image,
                                       **kwargs)
        loss = layers.cross_entropy(input=prob, label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])

        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = np.expand_dims(y_data, axis=1)

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
コード例 #46
0
    def test_ifelse(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)

        ie = layers.IfElse(cond, **kwargs)

        with ie.true_block():
            true_image = ie.input(image)
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        with ie.false_block():
            false_image = ie.input(image)
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        prob = ie()
        loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)