Ejemplo n.º 1
0
 def test_op(self):
     x = layer.data(name='data', type=data_type.dense_vector(128))
     x = op.exp(x)
     x = op.sqrt(x)
     x = op.reciprocal(x)
     x = op.log(x)
     x = op.abs(x)
     x = op.sigmoid(x)
     x = op.tanh(x)
     x = op.square(x)
     x = op.relu(x)
     y = 1 + x
     y = y + 1
     y = x + y
     y = y - x
     y = y - 2
     y = 2 - y
     y = 2 * y
     y = y * 3
     z = layer.data(name='data_2', type=data_type.dense_vector(1))
     y = y * z
     y = z * y
     y = y + z
     y = z + y
     print layer.parse_network(y)
Ejemplo n.º 2
0
        def parse_new_rnn():
            reset_parser()
            data = layer.data(
                name="word", type=data_type.dense_vector(dict_dim))
            label = layer.data(
                name="label", type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(
                name="boot", type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(
                    name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(
                name="rnn", step=step, input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Ejemplo n.º 3
0
    def test_operator(self):
        ipt0 = layer.data(name='data', type=data_type.dense_vector(784))
        ipt1 = layer.data(name='word', type=data_type.dense_vector(128))
        fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())

        dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
        dotmul0 = layer.mixed(input=dotmul_op)
        with layer.mixed() as dotmul1:
            dotmul1 += dotmul_op

        conv = layer.conv_operator(img=ipt0,
                                   filter=ipt1,
                                   filter_size=1,
                                   num_channels=1,
                                   num_filters=128,
                                   stride=1,
                                   padding=0)
        conv0 = layer.mixed(input=conv)
        with layer.mixed() as conv1:
            conv1 += conv

        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Ejemplo n.º 4
0
        def parse_new_rnn():
            data = layer.data(name="word",
                              type=data_type.dense_vector(dict_dim))
            label = layer.data(name="label",
                               type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(name="boot",
                                    type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(name="rnn_state",
                                   size=hidden_dim,
                                   boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(name="rnn",
                                        step=step,
                                        input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Ejemplo n.º 5
0
    def test_operator(self):
        ipt0 = layer.data(name='data1', type=data_type.dense_vector(784))
        ipt1 = layer.data(name='word1', type=data_type.dense_vector(128))
        fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())

        dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
        dotmul0 = layer.mixed(input=dotmul_op)
        with layer.mixed() as dotmul1:
            dotmul1 += dotmul_op

        conv = layer.conv_operator(
            img=ipt0,
            filter=ipt1,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv)
        with layer.mixed() as conv1:
            conv1 += conv

        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Ejemplo n.º 6
0
    def test_multiple_features(self):
        batch_size = 2
        data = []
        for i in xrange(batch_size):
            each_sample = []
            each_sample.append(np.random.randint(10))
            each_sample.append(
                self.sparse_binary_reader(20000, 40, non_empty=True))
            each_sample.append(self.dense_reader(100))
            data.append(each_sample)

        # test multiple features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
        arg = feeder(data)
        output_dense = arg.getSlotValue(0).copyToNumpyMat()
        output_sparse = arg.getSlotValue(1)
        output_index = arg.getSlotIds(2).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(output_dense[i].all(), data[i][2].all())
            self.assertEqual(output_sparse.getSparseRowCols(i), data[i][1])
            self.assertEqual(output_index[i], data[i][0])

        # reader returns 3 features, but only use 2 features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea2': 0})
        arg = feeder(data)
        output_dense = arg.getSlotValue(0).copyToNumpyMat()
        output_index = arg.getSlotIds(1).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(output_dense[i].all(), data[i][2].all())
            self.assertEqual(output_index[i], data[i][0])

        # reader returns 3 featreus, one is duplicate data
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10)),
                      ('fea3', data_type.dense_vector(100))]
        feeder = DataFeeder(data_types, {
            'fea0': 2,
            'fea1': 1,
            'fea2': 0,
            'fea3': 2
        })
        arg = feeder(data)
        fea0 = arg.getSlotValue(0).copyToNumpyMat()
        fea1 = arg.getSlotValue(1)
        fea2 = arg.getSlotIds(2).copyToNumpyArray()
        fea3 = arg.getSlotValue(3).copyToNumpyMat()
        for i in xrange(batch_size):
            self.assertEqual(fea0[i].all(), data[i][2].all())
            self.assertEqual(fea1.getSparseRowCols(i), data[i][1])
            self.assertEqual(fea2[i], data[i][0])
            self.assertEqual(fea3[i].all(), data[i][2].all())
Ejemplo n.º 7
0
    def test_multiple_features(self):
        batch_size = 2
        data = []
        for i in xrange(batch_size):
            each_sample = []
            each_sample.append(np.random.randint(10))
            each_sample.append(
                self.sparse_binary_reader(
                    20000, 40, non_empty=True))
            each_sample.append(self.dense_reader(100))
            data.append(each_sample)

        # test multiple features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
        arg = feeder(data)
        output_dense = arg.getSlotValue(0).copyToNumpyMat()
        output_sparse = arg.getSlotValue(1)
        output_index = arg.getSlotIds(2).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(output_dense[i].all(), data[i][2].all())
            self.assertEqual(output_sparse.getSparseRowCols(i), data[i][1])
            self.assertEqual(output_index[i], data[i][0])

        # reader returns 3 features, but only use 2 features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea2': 0})
        arg = feeder(data)
        output_dense = arg.getSlotValue(0).copyToNumpyMat()
        output_index = arg.getSlotIds(1).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(output_dense[i].all(), data[i][2].all())
            self.assertEqual(output_index[i], data[i][0])

        # reader returns 3 featreus, one is duplicate data
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10)),
                      ('fea3', data_type.dense_vector(100))]
        feeder = DataFeeder(data_types,
                            {'fea0': 2,
                             'fea1': 1,
                             'fea2': 0,
                             'fea3': 2})
        arg = feeder(data)
        fea0 = arg.getSlotValue(0).copyToNumpyMat()
        fea1 = arg.getSlotValue(1)
        fea2 = arg.getSlotIds(2).copyToNumpyArray()
        fea3 = arg.getSlotValue(3).copyToNumpyMat()
        for i in xrange(batch_size):
            self.assertEqual(fea0[i].all(), data[i][2].all())
            self.assertEqual(fea1.getSparseRowCols(i), data[i][1])
            self.assertEqual(fea2[i], data[i][0])
            self.assertEqual(fea3[i].all(), data[i][2].all())
Ejemplo n.º 8
0
 def compare(input):
     feeder = DataFeeder([('image', data_type.dense_vector(784))],
                         {'image': 0})
     arg = feeder(input)
     output = arg.getSlotValue(0).copyToNumpyMat()
     input = np.array(input, dtype='float32')
     self.assertAlmostEqual(input.all(), output.all())
Ejemplo n.º 9
0
 def compare(input):
     feeder = DataFeeder([('image', data_type.dense_vector(784))],
                         {'image': 0})
     arg = feeder(input)
     output = arg.getSlotValue(0).copyToNumpyMat()
     input = np.array(input, dtype='float32')
     self.assertAlmostEqual(input.all(), output.all())
Ejemplo n.º 10
0
    def test_projection(self):
        input = layer.data(name='data2', type=data_type.dense_vector(784))
        word = layer.data(
            name='word2', type=data_type.integer_value_sequence(10000))
        fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
        mixed0 = layer.mixed(
            size=256,
            input=[
                layer.full_matrix_projection(input=fc0),
                layer.full_matrix_projection(input=fc1)
            ])
        with layer.mixed(size=200) as mixed1:
            mixed1 += layer.full_matrix_projection(input=fc0)
            mixed1 += layer.identity_projection(input=fc1)

        table = layer.table_projection(input=word)
        emb0 = layer.mixed(size=512, input=table)
        with layer.mixed(size=512) as emb1:
            emb1 += table

        scale = layer.scaling_projection(input=fc0)
        scale0 = layer.mixed(size=100, input=scale)
        with layer.mixed(size=100) as scale1:
            scale1 += scale

        dotmul = layer.dotmul_projection(input=fc0)
        dotmul0 = layer.mixed(size=100, input=dotmul)
        with layer.mixed(size=100) as dotmul1:
            dotmul1 += dotmul

        context = layer.context_projection(input=fc0, context_len=5)
        context0 = layer.mixed(size=500, input=context)
        with layer.mixed(size=500) as context1:
            context1 += context

        conv = layer.conv_projection(
            input=input,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv, bias_attr=True)
        with layer.mixed(bias_attr=True) as conv1:
            conv1 += conv

        print layer.parse_network(mixed0)
        print layer.parse_network(mixed1)
        print layer.parse_network(emb0)
        print layer.parse_network(emb1)
        print layer.parse_network(scale0)
        print layer.parse_network(scale1)
        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Ejemplo n.º 11
0
    def test_projection(self):
        input = layer.data(name='data', type=data_type.dense_vector(784))
        word = layer.data(
            name='word', type=data_type.integer_value_sequence(10000))
        fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
        mixed0 = layer.mixed(
            size=256,
            input=[
                layer.full_matrix_projection(input=fc0),
                layer.full_matrix_projection(input=fc1)
            ])
        with layer.mixed(size=200) as mixed1:
            mixed1 += layer.full_matrix_projection(input=fc0)
            mixed1 += layer.identity_projection(input=fc1)

        table = layer.table_projection(input=word)
        emb0 = layer.mixed(size=512, input=table)
        with layer.mixed(size=512) as emb1:
            emb1 += table

        scale = layer.scaling_projection(input=fc0)
        scale0 = layer.mixed(size=100, input=scale)
        with layer.mixed(size=100) as scale1:
            scale1 += scale

        dotmul = layer.dotmul_projection(input=fc0)
        dotmul0 = layer.mixed(size=100, input=dotmul)
        with layer.mixed(size=100) as dotmul1:
            dotmul1 += dotmul

        context = layer.context_projection(input=fc0, context_len=5)
        context0 = layer.mixed(size=100, input=context)
        with layer.mixed(size=100) as context1:
            context1 += context

        conv = layer.conv_projection(
            input=input,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv, bias_attr=True)
        with layer.mixed(bias_attr=True) as conv1:
            conv1 += conv

        print layer.parse_network(mixed0)
        print layer.parse_network(mixed1)
        print layer.parse_network(emb0)
        print layer.parse_network(emb1)
        print layer.parse_network(scale0)
        print layer.parse_network(scale1)
        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Ejemplo n.º 12
0
    def _declare_input_layers(self):
        self.dnn_merged_input = layer.data(
            name='dnn_input',
            type=paddle.data_type.sparse_binary_vector(self.dnn_input_dim))

        self.lr_merged_input = layer.data(
            name='lr_input',
            type=paddle.data_type.sparse_binary_vector(self.lr_input_dim))

        self.click = paddle.layer.data(name='click',
                                       type=dtype.dense_vector(1))
Ejemplo n.º 13
0
    def test_evaluator(self):
        img = layer.data(name='pixel2', type=data_type.dense_vector(784))
        output = layer.fc(input=img,
                          size=10,
                          act=activation.Softmax(),
                          name='fc_here')
        lbl = layer.data(name='label2', type=data_type.integer_value(10))
        cost = layer.cross_entropy_cost(input=output, label=lbl)

        evaluator.classification_error(input=output, label=lbl)
        print layer.parse_network(cost)
        print layer.parse_network(output)
Ejemplo n.º 14
0
    def test_evaluator(self):
        img = layer.data(name='pixel2', type=data_type.dense_vector(784))
        output = layer.fc(input=img,
                          size=10,
                          act=activation.Softmax(),
                          name='fc_here')
        lbl = layer.data(name='label2', type=data_type.integer_value(10))
        cost = layer.cross_entropy_cost(input=output, label=lbl)

        evaluator.classification_error(input=output, label=lbl)
        print layer.parse_network(cost)
        print layer.parse_network(output)
Ejemplo n.º 15
0
 def test_get_layer(self):
     pixel = layer.data(name='pixel2', type=data_type.dense_vector(784))
     label = layer.data(name='label2', type=data_type.integer_value(10))
     hidden = layer.fc(input=pixel,
                       size=100,
                       act=conf_helps.SigmoidActivation())
     inference = layer.fc(input=hidden,
                          size=10,
                          act=conf_helps.SoftmaxActivation())
     cost = layer.classification_cost(input=inference, label=label)
     topo = topology.Topology(cost)
     pixel_layer = topo.get_layer("pixel2")
     label_layer = topo.get_layer("label2")
     self.assertEqual(pixel_layer, pixel)
     self.assertEqual(label_layer, label)
Ejemplo n.º 16
0
    def test_parse(self):
        pixel = layer.data(name='pixel3', type=data_type.dense_vector(784))
        label = layer.data(name='label3', type=data_type.integer_value(10))
        hidden = layer.fc(input=pixel,
                          size=100,
                          act=conf_helps.SigmoidActivation())
        inference = layer.fc(input=hidden,
                             size=10,
                             act=conf_helps.SoftmaxActivation())
        maxid = layer.max_id(input=inference)
        cost1 = layer.classification_cost(input=inference, label=label)
        cost2 = layer.cross_entropy_cost(input=inference, label=label)

        topology.Topology(cost2).proto()
        topology.Topology([cost1]).proto()
        topology.Topology([cost1, cost2]).proto()
        topology.Topology([inference, maxid]).proto()
Ejemplo n.º 17
0
    def _declare_input_layers(self):
        self.dnn_merged_input = layer.data(
            name='dnn_input',
            #type    InputType(dim=61, seq_type=SequenceType.NO_SEQUENCE, type=DataType.SparseNonValue)
            # sparse_binary_vector 稀疏的01向量,即大部分值为0,但有值的地方必须为1
            type=paddle.data_type.sparse_binary_vector(
                self.dnn_input_dim))  #  #dnn_input_dim #61

        self.lr_merged_input = layer.data(
            name='lr_input',
            #type    InputType(dim=10040001, seq_type=SequenceType.NO_SEQUENCE, type=DataType.SparseValue)
            type=paddle.data_type.sparse_float_vector(self.lr_input_dim))  #

        if not self.is_infer:
            self.click = paddle.layer.data(
                name='click',
                type=dtype.dense_vector(1))  #dense_vector  稠密浮点向量
Ejemplo n.º 18
0
    def test_initializer(self):
        def initializer(name):
            assert name == "fc.w"
            mat = numpy.ones((3, 2), dtype=numpy.float32)
            mat[1, 1] = 2
            return mat

        x = layer.data(name="x", type=data_type.dense_vector(3))
        y = layer.fc(x,
                     size=2,
                     bias_attr=False,
                     param_attr=ParamAttr(
                         name="fc.w", initializer=initializer))
        params = parameters.create(y)
        val = params["fc.w"]
        assert val.shape == (3, 2)
        expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32)
        assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
Ejemplo n.º 19
0
    def test_initializer(self):
        def initializer(name):
            assert name == "fc.w"
            mat = numpy.ones((3, 2), dtype=numpy.float32)
            mat[1, 1] = 2
            return mat

        x = layer.data(name="x", type=data_type.dense_vector(3))
        y = layer.fc(x,
                     size=2,
                     bias_attr=False,
                     param_attr=ParamAttr(
                         name="fc.w", initializer=initializer))
        params = parameters.create(y)
        val = params["fc.w"]
        assert val.shape == (3, 2)
        expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32)
        assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
Ejemplo n.º 20
0
    def test_multiple_features_tuple(self):
        batch_size = 2
        data = []
        for i in xrange(batch_size):
            a = np.random.randint(10)
            b = self.sparse_binary_reader(20000, 40, non_empty=True)
            c = self.dense_reader(100)
            each_sample = (a, b, c)
            data.append(each_sample)

        # test multiple features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
        arg = feeder(data)
        out_dense = arg.getSlotValue(0).copyToNumpyMat()
        out_sparse = arg.getSlotValue(1)
        out_index = arg.getSlotIds(2).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(out_dense[i].all(), data[i][2].all())
            self.assertEqual(out_sparse.getSparseRowCols(i), data[i][1])
            self.assertEqual(out_index[i], data[i][0])
Ejemplo n.º 21
0
    def test_multiple_features_tuple(self):
        batch_size = 2
        data = []
        for i in xrange(batch_size):
            a = np.random.randint(10)
            b = self.sparse_binary_reader(20000, 40, non_empty=True)
            c = self.dense_reader(100)
            each_sample = (a, b, c)
            data.append(each_sample)

        # test multiple features
        data_types = [('fea0', data_type.dense_vector(100)),
                      ('fea1', data_type.sparse_binary_vector(20000)),
                      ('fea2', data_type.integer_value(10))]
        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
        arg = feeder(data)
        out_dense = arg.getSlotValue(0).copyToNumpyMat()
        out_sparse = arg.getSlotValue(1)
        out_index = arg.getSlotIds(2).copyToNumpyArray()
        for i in xrange(batch_size):
            self.assertEqual(out_dense[i].all(), data[i][2].all())
            self.assertEqual(out_sparse.getSparseRowCols(i), data[i][1])
            self.assertEqual(out_index[i], data[i][0])
Ejemplo n.º 22
0
    def test_data_type(self):
        pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
        label = layer.data(name='label', type=data_type.integer_value(10))
        hidden = layer.fc(input=pixel,
                          size=100,
                          act=conf_helps.SigmoidActivation())
        inference = layer.fc(input=hidden,
                             size=10,
                             act=conf_helps.SoftmaxActivation())
        cost = layer.classification_cost(input=inference, label=label)
        topo = topology.Topology(cost)
        data_types = topo.data_type()
        self.assertEqual(len(data_types), 2)
        pixel_data_type = filter(lambda type: type[0] == "pixel", data_types)
        self.assertEqual(len(pixel_data_type), 1)
        pixel_data_type = pixel_data_type[0]
        self.assertEqual(pixel_data_type[1].type, pydp2.DataType.Dense)
        self.assertEqual(pixel_data_type[1].dim, 784)

        label_data_type = filter(lambda type: type[0] == "label", data_types)
        self.assertEqual(len(label_data_type), 1)
        label_data_type = label_data_type[0]
        self.assertEqual(label_data_type[1].type, pydp2.DataType.Index)
        self.assertEqual(label_data_type[1].dim, 10)
Ejemplo n.º 23
0
 def test_vgg(self):
     img = layer.data(name='pixel', type=data_type.dense_vector(784))
     vgg_out = networks.small_vgg(input_image=img,
                                  num_channels=1,
                                  num_classes=2)
     print layer.parse_network(vgg_out)
Ejemplo n.º 24
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest

import paddle.v2.activation as activation
import paddle.v2.attr as attr
import paddle.v2.data_type as data_type
import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
import paddle.v2.networks as networks
import paddle.v2.evaluator as evaluator

pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(1))
combine_weight = layer.data(
    name='weight_combine', type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1))

hidden = layer.fc(input=pixel,
                  size=100,
                  act=activation.Sigmoid(),
                  param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(
    input=pixel,
    filter_size=1,
    filter_size_y=1,
Ejemplo n.º 25
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest

import paddle.v2.activation as activation
import paddle.v2.attr as attr
import paddle.v2.data_type as data_type
import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
import paddle.v2.networks as networks

pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(1))
combine_weight = layer.data(name='weight_combine',
                            type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1))

hidden = layer.fc(input=pixel,
                  size=100,
                  act=activation.Sigmoid(),
                  param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(input=pixel,
                      filter_size=1,
                      filter_size_y=1,
                      num_channels=8,
Ejemplo n.º 26
0
 def test_vgg(self):
     img = layer.data(name='pixel1', type=data_type.dense_vector(784))
     vgg_out = networks.small_vgg(
         input_image=img, num_channels=1, num_classes=2)
     print layer.parse_network(vgg_out)