def parse_new_rnn(): data = layer.data(name="word", type=data_type.dense_vector(dict_dim)) label = layer.data(name="label", type=data_type.dense_vector(label_dim)) emb = layer.embedding(input=data, size=word_dim) boot_layer = layer.data(name="boot", type=data_type.dense_vector(10)) boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10) def step(y, wid): z = layer.embedding(input=wid, size=word_dim) mem = layer.memory(name="rnn_state", size=hidden_dim, boot_layer=boot_layer) out = layer.fc(input=[y, z, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") return out out = layer.recurrent_group(name="rnn", step=step, input=[emb, data]) rep = layer.last_seq(input=out) prob = layer.fc(size=label_dim, input=rep, act=activation.Softmax(), bias_attr=True) cost = layer.classification_cost(input=prob, label=label) return str(layer.parse_network(cost))
def test_op(self): x = layer.data(name='data', type=data_type.dense_vector(128)) x = op.exp(x) x = op.sqrt(x) x = op.reciprocal(x) x = op.log(x) x = op.abs(x) x = op.sigmoid(x) x = op.tanh(x) x = op.square(x) x = op.relu(x) y = 1 + x y = y + 1 y = x + y y = y - x y = y - 2 y = 2 - y y = 2 * y y = y * 3 z = layer.data(name='data_2', type=data_type.dense_vector(1)) y = y * z y = z * y y = y + z y = z + y print layer.parse_network(y)
def test_operator(self): ipt0 = layer.data(name='data1', type=data_type.dense_vector(784)) ipt1 = layer.data(name='word1', type=data_type.dense_vector(128)) fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) dotmul_op = layer.dotmul_operator(a=fc0, b=fc1) dotmul0 = layer.mixed(input=dotmul_op) with layer.mixed() as dotmul1: dotmul1 += dotmul_op conv = layer.conv_operator( img=ipt0, filter=ipt1, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv) with layer.mixed() as conv1: conv1 += conv print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def parse_new_rnn(): reset_parser() data = layer.data( name="word", type=data_type.dense_vector(dict_dim)) label = layer.data( name="label", type=data_type.dense_vector(label_dim)) emb = layer.embedding(input=data, size=word_dim) boot_layer = layer.data( name="boot", type=data_type.dense_vector(10)) boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10) def step(y, wid): z = layer.embedding(input=wid, size=word_dim) mem = layer.memory( name="rnn_state", size=hidden_dim, boot_layer=boot_layer) out = layer.fc(input=[y, z, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") return out out = layer.recurrent_group( name="rnn", step=step, input=[emb, data]) rep = layer.last_seq(input=out) prob = layer.fc(size=label_dim, input=rep, act=activation.Softmax(), bias_attr=True) cost = layer.classification_cost(input=prob, label=label) return str(layer.parse_network(cost))
def test_operator(self): ipt0 = layer.data(name='data', type=data_type.dense_vector(784)) ipt1 = layer.data(name='word', type=data_type.dense_vector(128)) fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) dotmul_op = layer.dotmul_operator(a=fc0, b=fc1) dotmul0 = layer.mixed(input=dotmul_op) with layer.mixed() as dotmul1: dotmul1 += dotmul_op conv = layer.conv_operator(img=ipt0, filter=ipt1, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv) with layer.mixed() as conv1: conv1 += conv print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def check_and_create_data(self): """ Checks if the input data is legal and creates the data layers according to the input fields. """ if self.is_infer: expected = ['q_ids', 'a_ids'] if len(self.inputs) < 2: raise ValueError('''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) else: expected = ['q_ids', 'a_ids', 'label'] if len(self.inputs) < 3: raise ValueError('''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) self.label = layer.data(name=self.inputs[2], type=data_type.integer_value(4)) self.q_ids = layer.data( name=self.inputs[0], type=data_type.integer_value_sequence(self.vocab_size)) self.a_ids = layer.data( name=self.inputs[1], type=data_type.integer_value_sequence(self.vocab_size))
def check_and_create_data(self): """ Checks if the input data is legal and creates the data layers according to the input fields. """ if self.is_infer: expected = ['q_ids', 'a_ids'] if len(self.inputs) < 2: raise ValueError('''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) else: expected = ['q_ids', 'a_ids', 'label'] if len(self.inputs) < 3: raise ValueError('''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) self.label = layer.data(name=self.inputs[2], type=data_type.integer_value(4)) self.q_ids = layer.data(name=self.inputs[0], type=data_type.integer_value_sequence( self.vocab_size)) self.a_ids = layer.data(name=self.inputs[1], type=data_type.integer_value_sequence( self.vocab_size))
def test_projection(self): input = layer.data(name='data', type=data_type.dense_vector(784)) word = layer.data( name='word', type=data_type.integer_value_sequence(10000)) fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid()) mixed0 = layer.mixed( size=256, input=[ layer.full_matrix_projection(input=fc0), layer.full_matrix_projection(input=fc1) ]) with layer.mixed(size=200) as mixed1: mixed1 += layer.full_matrix_projection(input=fc0) mixed1 += layer.identity_projection(input=fc1) table = layer.table_projection(input=word) emb0 = layer.mixed(size=512, input=table) with layer.mixed(size=512) as emb1: emb1 += table scale = layer.scaling_projection(input=fc0) scale0 = layer.mixed(size=100, input=scale) with layer.mixed(size=100) as scale1: scale1 += scale dotmul = layer.dotmul_projection(input=fc0) dotmul0 = layer.mixed(size=100, input=dotmul) with layer.mixed(size=100) as dotmul1: dotmul1 += dotmul context = layer.context_projection(input=fc0, context_len=5) context0 = layer.mixed(size=100, input=context) with layer.mixed(size=100) as context1: context1 += context conv = layer.conv_projection( input=input, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv, bias_attr=True) with layer.mixed(bias_attr=True) as conv1: conv1 += conv print layer.parse_network(mixed0) print layer.parse_network(mixed1) print layer.parse_network(emb0) print layer.parse_network(emb1) print layer.parse_network(scale0) print layer.parse_network(scale1) print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def test_projection(self): input = layer.data(name='data2', type=data_type.dense_vector(784)) word = layer.data( name='word2', type=data_type.integer_value_sequence(10000)) fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid()) mixed0 = layer.mixed( size=256, input=[ layer.full_matrix_projection(input=fc0), layer.full_matrix_projection(input=fc1) ]) with layer.mixed(size=200) as mixed1: mixed1 += layer.full_matrix_projection(input=fc0) mixed1 += layer.identity_projection(input=fc1) table = layer.table_projection(input=word) emb0 = layer.mixed(size=512, input=table) with layer.mixed(size=512) as emb1: emb1 += table scale = layer.scaling_projection(input=fc0) scale0 = layer.mixed(size=100, input=scale) with layer.mixed(size=100) as scale1: scale1 += scale dotmul = layer.dotmul_projection(input=fc0) dotmul0 = layer.mixed(size=100, input=dotmul) with layer.mixed(size=100) as dotmul1: dotmul1 += dotmul context = layer.context_projection(input=fc0, context_len=5) context0 = layer.mixed(size=500, input=context) with layer.mixed(size=500) as context1: context1 += context conv = layer.conv_projection( input=input, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv, bias_attr=True) with layer.mixed(bias_attr=True) as conv1: conv1 += conv print layer.parse_network(mixed0) print layer.parse_network(mixed1) print layer.parse_network(emb0) print layer.parse_network(emb1) print layer.parse_network(scale0) print layer.parse_network(scale1) print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def _declare_input_layers(self): self.dnn_merged_input = layer.data( name='dnn_input', type=paddle.data_type.sparse_binary_vector(self.dnn_input_dim)) self.lr_merged_input = layer.data( name='lr_input', type=paddle.data_type.sparse_binary_vector(self.lr_input_dim)) self.click = paddle.layer.data(name='click', type=dtype.dense_vector(1))
def test_evaluator(self): img = layer.data(name='pixel2', type=data_type.dense_vector(784)) output = layer.fc(input=img, size=10, act=activation.Softmax(), name='fc_here') lbl = layer.data(name='label2', type=data_type.integer_value(10)) cost = layer.cross_entropy_cost(input=output, label=lbl) evaluator.classification_error(input=output, label=lbl) print layer.parse_network(cost) print layer.parse_network(output)
def test_get_layer(self): pixel = layer.data(name='pixel2', type=data_type.dense_vector(784)) label = layer.data(name='label2', type=data_type.integer_value(10)) hidden = layer.fc(input=pixel, size=100, act=conf_helps.SigmoidActivation()) inference = layer.fc(input=hidden, size=10, act=conf_helps.SoftmaxActivation()) cost = layer.classification_cost(input=inference, label=label) topo = topology.Topology(cost) pixel_layer = topo.get_layer("pixel2") label_layer = topo.get_layer("label2") self.assertEqual(pixel_layer, pixel) self.assertEqual(label_layer, label)
def check_and_create_data(self): """ Checks if the input data is legal and creates the data layers according to the input fields. """ if self.is_infer: expected = ['q_ids', 'p_ids', 'para_length', '[start_label, end_label, ...]'] if len(self.inputs) < 2 * self.doc_num + 1: raise ValueError(r'''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) else: expected = ['q_ids', 'p_ids', 'para_length', 'start_label', 'end_label', '...'] if len(self.inputs) < 4 * self.doc_num + 1: raise ValueError(r'''Input schema: expected vs given: {} vs {}'''.format(expected, self.inputs)) self.start_labels = [] for i in range(1 + 2 * self.doc_num, 1 + 3 * self.doc_num): self.start_labels.append( layer.data(name=self.inputs[i], type=data_type.dense_vector_sequence(1))) self.start_label = reduce( lambda x, y: layer.seq_concat(a=x, b=y), self.start_labels) self.end_labels = [] for i in range(1 + 3 * self.doc_num, 1 + 4 * self.doc_num): self.end_labels.append( layer.data(name=self.inputs[i], type=data_type.dense_vector_sequence(1))) self.end_label = reduce( lambda x, y: layer.seq_concat(a=x, b=y), self.end_labels) self.q_ids = layer.data( name=self.inputs[0], type=data_type.integer_value_sequence(self.vocab_size)) self.p_ids = [] for i in range(1, 1 + self.doc_num): self.p_ids.append( layer.data(name=self.inputs[i], type=data_type.integer_value_sequence(self.vocab_size))) self.para_lens = [] for i in range(1 + self.doc_num, 1 + 2 * self.doc_num): self.para_lens.append( layer.data(name=self.inputs[i], type=data_type.dense_vector_sequence(1))) self.para_len = reduce(lambda x, y: layer.seq_concat(a=x, b=y), self.para_lens)
def __declare_input_layers__(self): ''' Define the input layer. ''' # Image input as a float vector. self.image = layer.data( name='image', type=paddle.data_type.dense_vector(self.image_vector_size), height=self.shape[0], width=self.shape[1]) # Label input as an ID list if not self.is_infer: self.label = layer.data( name='label', type=paddle.data_type.integer_value_sequence(self.num_classes))
def get_cnn_input(name, size, channel): input = pd.data( name=name, type=pd.data_type.dense_vector( channel * size[0] * size[1]), height=size[0], width=size[1]) return {name: input}
def __declare_input_layers__(self): ''' 定义输入层 ''' # 图像输入为一个浮动向量 self.image = layer.data(name='image', type=paddle.data_type.dense_vector( self.image_vector_size), height=self.shape[1], width=self.shape[0]) # 将标签输入为ID列表 if not self.is_infer: self.label = layer.data( name='label', type=paddle.data_type.integer_value_sequence(self.num_classes))
def _declare_input_layers(self): self.dnn_merged_input = layer.data( name='dnn_input', #type InputType(dim=61, seq_type=SequenceType.NO_SEQUENCE, type=DataType.SparseNonValue) # sparse_binary_vector 稀疏的01向量,即大部分值为0,但有值的地方必须为1 type=paddle.data_type.sparse_binary_vector( self.dnn_input_dim)) # #dnn_input_dim #61 self.lr_merged_input = layer.data( name='lr_input', #type InputType(dim=10040001, seq_type=SequenceType.NO_SEQUENCE, type=DataType.SparseValue) type=paddle.data_type.sparse_float_vector(self.lr_input_dim)) # if not self.is_infer: self.click = paddle.layer.data( name='click', type=dtype.dense_vector(1)) #dense_vector 稠密浮点向量
def test_parse(self): pixel = layer.data(name='pixel3', type=data_type.dense_vector(784)) label = layer.data(name='label3', type=data_type.integer_value(10)) hidden = layer.fc(input=pixel, size=100, act=conf_helps.SigmoidActivation()) inference = layer.fc(input=hidden, size=10, act=conf_helps.SoftmaxActivation()) maxid = layer.max_id(input=inference) cost1 = layer.classification_cost(input=inference, label=label) cost2 = layer.cross_entropy_cost(input=inference, label=label) topology.Topology(cost2).proto() topology.Topology([cost1]).proto() topology.Topology([cost1, cost2]).proto() topology.Topology([inference, maxid]).proto()
def test_data_type(self): pixel = layer.data(name='pixel', type=data_type.dense_vector(784)) label = layer.data(name='label', type=data_type.integer_value(10)) hidden = layer.fc(input=pixel, size=100, act=conf_helps.SigmoidActivation()) inference = layer.fc(input=hidden, size=10, act=conf_helps.SoftmaxActivation()) cost = layer.classification_cost(input=inference, label=label) topo = topology.Topology(cost) data_types = topo.data_type() self.assertEqual(len(data_types), 2) pixel_data_type = filter(lambda type: type[0] == "pixel", data_types) self.assertEqual(len(pixel_data_type), 1) pixel_data_type = pixel_data_type[0] self.assertEqual(pixel_data_type[1].type, pydp2.DataType.Dense) self.assertEqual(pixel_data_type[1].dim, 784) label_data_type = filter(lambda type: type[0] == "label", data_types) self.assertEqual(len(label_data_type), 1) label_data_type = label_data_type[0] self.assertEqual(label_data_type[1].type, pydp2.DataType.Index) self.assertEqual(label_data_type[1].dim, 10)
def parse_new_rnn(): def new_step(y): mem = layer.memory(name="rnn_state", size=hidden_dim) out = layer.fc(input=[y, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") return out data = layer.data( name="word", type=data_type.integer_value(dict_dim)) embd = layer.embedding(input=data, size=word_dim) rnn_layer = layer.recurrent_group( name="rnn", step=new_step, input=embd) return str(layer.parse_network(rnn_layer))
def parse_new_rnn(): def new_step(y): mem = layer.memory(name="rnn_state", size=hidden_dim) out = layer.fc(input=[y, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") return out data = layer.data(name="word", type=data_type.integer_value(dict_dim)) embd = layer.embedding(input=data, size=word_dim) rnn_layer = layer.recurrent_group(name="rnn", step=new_step, input=embd) return str(layer.parse_network(rnn_layer))
def test_initializer(self): def initializer(name): assert name == "fc.w" mat = numpy.ones((3, 2), dtype=numpy.float32) mat[1, 1] = 2 return mat x = layer.data(name="x", type=data_type.dense_vector(3)) y = layer.fc(x, size=2, bias_attr=False, param_attr=ParamAttr( name="fc.w", initializer=initializer)) params = parameters.create(y) val = params["fc.w"] assert val.shape == (3, 2) expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32) assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
def test_vgg(self): img = layer.data(name='pixel', type=data_type.dense_vector(784)) vgg_out = networks.small_vgg(input_image=img, num_channels=1, num_classes=2) print layer.parse_network(vgg_out)
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.v2.activation as activation import paddle.v2.attr as attr import paddle.v2.data_type as data_type import paddle.v2.layer as layer import paddle.v2.pooling as pooling import paddle.v2.networks as networks import paddle.v2.evaluator as evaluator pixel = layer.data(name='pixel', type=data_type.dense_vector(128)) label = layer.data(name='label', type=data_type.integer_value(10)) weight = layer.data(name='weight', type=data_type.dense_vector(1)) combine_weight = layer.data( name='weight_combine', type=data_type.dense_vector(10)) score = layer.data(name='score', type=data_type.dense_vector(1)) hidden = layer.fc(input=pixel, size=100, act=activation.Sigmoid(), param_attr=attr.Param(name='hidden')) inference = layer.fc(input=hidden, size=10, act=activation.Softmax()) conv = layer.img_conv( input=pixel, filter_size=1, filter_size_y=1,
def test_recurrent_layer(self): word = layer.data(name='word', type=data_type.integer_value(12)) recurrent = layer.recurrent(input=word) lstm = layer.lstmemory(input=word) gru = layer.grumemory(input=word) print layer.parse_network(recurrent, lstm, gru)
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle.v2.activation as activation import paddle.v2.attr as attr import paddle.v2.data_type as data_type import paddle.v2.layer as layer import paddle.v2.pooling as pooling import paddle.v2.networks as networks pixel = layer.data(name='pixel', type=data_type.dense_vector(128)) label = layer.data(name='label', type=data_type.integer_value(10)) weight = layer.data(name='weight', type=data_type.dense_vector(1)) combine_weight = layer.data(name='weight_combine', type=data_type.dense_vector(10)) score = layer.data(name='score', type=data_type.dense_vector(1)) hidden = layer.fc(input=pixel, size=100, act=activation.Sigmoid(), param_attr=attr.Param(name='hidden')) inference = layer.fc(input=hidden, size=10, act=activation.Softmax()) conv = layer.img_conv(input=pixel, filter_size=1, filter_size_y=1, num_channels=8,
def test_recurrent_layer(self): word = layer.data(name='word', type=data_type.integer_value(12)) recurrent = layer.recurrent(input=word) lstm = layer.lstmemory(input=word) gru = layer.grumemory(input=word) print layer.parse_network([recurrent, lstm, gru])
def test_vgg(self): img = layer.data(name='pixel1', type=data_type.dense_vector(784)) vgg_out = networks.small_vgg( input_image=img, num_channels=1, num_classes=2) print layer.parse_network(vgg_out)