def parse_new_rnn(): data = layer.data(name="word", type=data_type.dense_vector(dict_dim)) label = layer.data(name="label", type=data_type.dense_vector(label_dim)) emb = layer.embedding(input=data, size=word_dim) boot_layer = layer.data(name="boot", type=data_type.dense_vector(10)) boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10) def step(y, wid): z = layer.embedding(input=wid, size=word_dim) mem = layer.memory(name="rnn_state", size=hidden_dim, boot_layer=boot_layer) out = layer.fc(input=[y, z, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") return out out = layer.recurrent_group(name="rnn", step=step, input=[emb, data]) rep = layer.last_seq(input=out) prob = layer.fc(size=label_dim, input=rep, act=activation.Softmax(), bias_attr=True) cost = layer.classification_cost(input=prob, label=label) return str(layer.parse_network(cost))
def test_evaluator(self): img = layer.data(name='pixel2', type=data_type.dense_vector(784)) output = layer.fc(input=img, size=10, act=activation.Softmax(), name='fc_here') lbl = layer.data(name='label2', type=data_type.integer_value(10)) cost = layer.cross_entropy_cost(input=output, label=lbl) evaluator.classification_error(input=output, label=lbl) print layer.parse_network(cost) print layer.parse_network(output)
def network(self): """ Implements the detail of the model. """ self.check_and_create_data() self.create_shared_params() q_enc = self.get_enc(self.q_ids, type='q') a_enc = self.get_enc(self.a_ids, type='q') q_proj_left = layer.fc(size=self.emb_dim * 2, bias_attr=False, param_attr=Attr.Param(self.name + '_left.wq'), input=q_enc) q_proj_right = layer.fc(size=self.emb_dim * 2, bias_attr=False, param_attr=Attr.Param(self.name + '_right.wq'), input=q_enc) left_match = self.recurrent_group( self.name + '_left', [layer.StaticInput(q_enc), layer.StaticInput(q_proj_left), a_enc], reverse=False) right_match = self.recurrent_group( self.name + '_right', [layer.StaticInput(q_enc), layer.StaticInput(q_proj_right), a_enc], reverse=True) match_seq = layer.concat(input=[left_match, right_match]) with layer.mixed(size=match_seq.size, act=Act.Identity(), layer_attr=Attr.ExtraLayerAttribute(drop_rate=0.2), bias_attr=False) as dropped: dropped += layer.identity_projection(match_seq) match_result = layer.pooling(input=dropped, pooling_type=paddle.pooling.Max()) cls = layer.fc(input=match_result, act=Act.Softmax(), size=self.label_dim) return cls
import paddle.v2.layer as layer import paddle.v2.pooling as pooling import paddle.v2.networks as networks pixel = layer.data(name='pixel', type=data_type.dense_vector(128)) label = layer.data(name='label', type=data_type.integer_value(10)) weight = layer.data(name='weight', type=data_type.dense_vector(1)) combine_weight = layer.data(name='weight_combine', type=data_type.dense_vector(10)) score = layer.data(name='score', type=data_type.dense_vector(1)) hidden = layer.fc(input=pixel, size=100, act=activation.Sigmoid(), param_attr=attr.Param(name='hidden')) inference = layer.fc(input=hidden, size=10, act=activation.Softmax()) conv = layer.img_conv(input=pixel, filter_size=1, filter_size_y=1, num_channels=8, num_filters=16, act=activation.Linear()) class ImageLayerTest(unittest.TestCase): def test_conv_layer(self): conv_shift = layer.conv_shift(a=pixel, b=score) print layer.parse_network(conv, conv_shift) def test_pooling_layer(self): maxpool = layer.img_pool(input=conv,