def test_shape(self): dims = turret.Dimensions(((13, turret.DimensionType.SEQUENCE), (7, turret.DimensionType.CHANNEL), (19, turret.DimensionType.SPATIAL))) self.assertEqual((13, 7, 19), dims.shape) dims = turret.Dimensions(((19, turret.DimensionType.SPATIAL), )) self.assertEqual((19, ), dims.shape)
def build_network(network): d = network.add_constant(data, turret.Dimensions.CHW(C, H, W)) i = network.add_input( "indices", turret.DataType.INT32, turret.Dimensions(((K, turret.DimensionType.INDEX), ))) h = L.gather(d, i, 0) network.mark_output("output", h)
def test_init_form_dimension(self): dims = turret.Dimensions( (turret.Dimension(11, turret.DimensionType.CHANNEL), turret.Dimension(17, turret.DimensionType.SPATIAL))) self.assertEqual(2, len(dims)) self.assertEqual(11, dims[0].size) self.assertEqual(17, dims[1].size) self.assertEqual(turret.DimensionType.CHANNEL, dims[0].type) self.assertEqual(turret.DimensionType.SPATIAL, dims[1].type)
def build_network(network): d = network.add_input("data", turret.DataType.FLOAT, turret.Dimensions.CHW(C, H, W)) i = network.add_input( "indices", turret.DataType.INT32, turret.Dimensions(((K1, turret.DimensionType.INDEX), (K2, turret.DimensionType.INDEX)))) h = L.gather(d, i, 1) network.mark_output("output", h)
def test_init_from_tuple(self): dims = turret.Dimensions(((13, turret.DimensionType.INDEX), (7, turret.DimensionType.CHANNEL), (19, turret.DimensionType.SPATIAL))) self.assertEqual(3, len(dims)) self.assertEqual(13, dims[0].size) self.assertEqual(7, dims[1].size) self.assertEqual(19, dims[2].size) self.assertEqual(turret.DimensionType.INDEX, dims[0].type) self.assertEqual(turret.DimensionType.CHANNEL, dims[1].type) self.assertEqual(turret.DimensionType.SPATIAL, dims[2].type)
def test_iterator(self): dims = turret.Dimensions(((13, turret.DimensionType.SEQUENCE), (7, turret.DimensionType.CHANNEL), (19, turret.DimensionType.SPATIAL))) it = iter(dims) dim = next(it) self.assertEqual(13, dim.size) self.assertEqual(turret.DimensionType.SEQUENCE, dim.type) dim = next(it) self.assertEqual(7, dim.size) self.assertEqual(turret.DimensionType.CHANNEL, dim.type) dim = next(it) self.assertEqual(19, dim.size) self.assertEqual(turret.DimensionType.SPATIAL, dim.type) self.assertRaises(StopIteration, lambda: next(it))
def build_encodeengine(encoder, batch_size, dtype, logger, max_sequence_length=16, workspace_size=2**30): sys.stderr.write("------------------------------\n") sys.stderr.write(" encoder\n") sys.stderr.write("------------------------------\n") builder = turret.InferenceEngineBuilder(logger) network = builder.create_network(dtype) # extract parameters emb = encoder["embedding.weight"] weights = [] bias = [] weights_rev = [] bias_rev = [] bidirect = ("lstm.weight_hh_l0_reverse" in encoder) weights.append( _reorg_lstm_parameters(encoder["lstm.weight_ih_l0"][:], encoder["lstm.weight_hh_l0"][:])) bias.append( _reorg_lstm_parameters(encoder["lstm.bias_ih_l0"][:], encoder["lstm.bias_hh_l0"][:])) if bidirect: weights_rev.append( _reorg_lstm_parameters(encoder["lstm.weight_ih_l0_reverse"][:], encoder["lstm.weight_hh_l0_reverse"][:])) bias_rev.append( _reorg_lstm_parameters(encoder["lstm.bias_ih_l0_reverse"][:], encoder["lstm.bias_hh_l0_reverse"][:])) # define a network src = network.add_constant(emb) h = network.add_input( "words", turret.DataType.INT32, turret.Dimensions(((1, turret.DimensionType.INDEX), (max_sequence_length, turret.DimensionType.INDEX)))) h = L.gather(src, h, 0) h_lengths = network.add_input( "lengths", turret.DataType.INT32, turret.Dimensions(((1, turret.DimensionType.INDEX), ))) if bidirect: context, hidden, cell = L.blstm_v2(h, max_sequence_length, weights, weights_rev, bias, bias_rev, sequence_lengths=h_lengths) else: context, hidden, cell = L.lstm_v2(h, max_sequence_length, weights, bias, sequence_lengths=h_lengths) network.mark_output("context", context) network.mark_output("hidden", hidden) network.mark_output("cell", cell) builder.max_batch_size = batch_size builder.max_workspace_size = workspace_size # build engine = builder.build(network) return engine
def build_decodeengine(decoder, batch_size, dtype, logger, max_sequence_length=16, workspace_size=2**30): sys.stderr.write("------------------------------\n") sys.stderr.write(" decoder\n") sys.stderr.write("------------------------------\n") builder = turret.InferenceEngineBuilder(logger) network = builder.create_network(dtype) emb = decoder["embedding.weight"] hidden_size = decoder["lstm.weight_hh_l0"].shape[1] weights = [] bias = [] weights.append( _reorg_lstm_parameters(decoder["lstm.weight_ih_l0"], decoder["lstm.weight_hh_l0"])) bias.append( _reorg_lstm_parameters(decoder["lstm.bias_ih_l0"], decoder["lstm.bias_hh_l0"])) tgt = network.add_constant(emb) # Embedding and LSTM. h_indices_in = network.add_input( "indices_in", turret.DataType.INT32, turret.Dimensions(((1, turret.DimensionType.INDEX), ))) h_indices_in = L.gather(tgt, h_indices_in, 0) h_indices_in = L.reshape(h_indices_in, turret.Dimensions.CHW(1, 1, hidden_size)) h_hidden = network.add_input("hidden_in", turret.DataType.FLOAT, turret.Dimensions.CHW(1, 1, hidden_size)) h_cell = network.add_input("cell_in", turret.DataType.FLOAT, turret.Dimensions.CHW(1, 1, hidden_size)) h, h_hidden, h_cell = L.lstm_v2(h_indices_in, 1, weights, bias, hidden_state=h_hidden, cell_state=h_cell) network.mark_output("hidden_out", h_hidden) network.mark_output("cell_out", h_cell) # Attention. h_hidden_enc = network.add_input( "enc_hidden", turret.DataType.FLOAT, turret.Dimensions.CHW(1, max_sequence_length, hidden_size)) h_attn_w = L.elementwise(h_hidden_enc, h_hidden, turret.ElementWiseOperation.PROD) h_attn_w = L.reduce(h_attn_w, turret.ReduceOperation.SUM, axes=2) h_hidden_enc = L.reshape( h_hidden_enc, turret.Dimensions.HW(max_sequence_length, hidden_size)) h_context = L.matrix_multiply(h_attn_w, False, h_hidden_enc, False) h_context = L.softmax(h_context) h_context = L.reshape( h_context, turret.Dimensions.CHW(1, h_context.dimensions.shape[0], h_context.dimensions.shape[1])) h = L.concat([h, h_context], axis=2) # Out, softmax, and log. out_weights = decoder["out.weight"][:] out_bias = decoder["out.bias"][:] h = L.fully_connected(h, out_weights, out_bias) h = L.softmax(h) h = L.unary(h, turret.UnaryOperation.LOG) h = L.reshape( h, turret.Dimensions( ((h.dimensions.shape[0], turret.DimensionType.SPATIAL), ))) _, h_indices_out = L.top_k(h, turret.TopKOperation.MAX, 1, 1) h_indices_out.dimensions # If this line is removed, error is occurred. network.mark_output("indices_out", h_indices_out) builder.max_batch_size = batch_size builder.max_workspace_size = workspace_size # build engine = builder.build(network) return engine
def test_size(self): dims = turret.Dimensions(((13, turret.DimensionType.SEQUENCE), (7, turret.DimensionType.CHANNEL), (19, turret.DimensionType.SPATIAL))) self.assertEqual(13 * 7 * 19, dims.size)
def from_too_small_tuple(): return turret.Dimensions(((1, ), (2, )))
def from_too_large_tuple(): return turret.Dimensions(((3, turret.DimensionType.SPATIAL, None)))
def from_valid_and_int(): return turret.Dimensions(((2, turret.DimensionType.SPATIAL), 3))