def test_scalar_shape(): from utensor_cgen.frontend.tensorflow import GraphDefParser graph = tf.Graph() with graph.as_default(): tf.constant(1, dtype=tf.float32, name='x') parser = GraphDefParser({}) ugraph = parser.parse(graph.as_graph_def(), output_nodes=['x']) # shape of scalar tensor should be empty list out_tensor = ugraph.ops_info['x'].output_tensors[0] assert out_tensor.shape == [] assert out_tensor.dtype is np.dtype('float32')
def test_normal_tensor_shape(): from utensor_cgen.frontend.tensorflow import GraphDefParser shape = np.random.randint(1, 10, size=(10, )).tolist() graph = tf.Graph() with graph.as_default(): tf.constant(np.random.rand(*shape), dtype=tf.float32, name='x') parser = GraphDefParser({}) ugraph = parser.parse(graph.as_graph_def(), output_nodes=['x']) # deterministic shape out_tensor = ugraph.ops_info['x'].output_tensors[0] assert out_tensor.shape == shape, 'expecting {}, get {}'.format( shape, out_tensor.shape) assert out_tensor.dtype is np.dtype('float32')
def test_dropout_trans_1_1(droput_graph_tuple): (graph_def, (rate_name, dropout_output_name), output_nodes) = droput_graph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes=output_nodes) transformer = DropoutTransformer() assert transformer.prune_graph new_ugraph = transformer.transform(ugraph) for op in new_ugraph.ops_info.values(): assert op.ugraph out_op = new_ugraph.ops_info[output_nodes[0]] assert set([str(op.name) for op in out_op.input_nodes]) == set(['x', 'bias']) # all dropout nodes should be gone graph_1 = tf.Graph() graph_2 = tf.Graph() with graph_1.as_default(): tf.import_graph_def(ugraph.graph_def, name='') with graph_2.as_default(): tf.import_graph_def(new_ugraph.graph_def, name='') with tf.Session(graph=graph_1): rate = graph_1.get_tensor_by_name(rate_name) dropout_output = graph_1.get_tensor_by_name(dropout_output_name) output = graph_1.get_tensor_by_name(output_nodes[0] + ":0") # test the dropout ops are gone assert rate.op.name not in new_ugraph.ops_info assert dropout_output.op.name not in new_ugraph.ops_info output_1 = output.eval({rate: 0.0}) with tf.Session(graph=graph_2): output = graph_2.get_tensor_by_name(output_nodes[0] + ":0") output_2 = output.eval() # expecting the same outputs with keep_prob == 1.0 assert (output_1 == output_2).all()
def __call__(self, match): graph = tf.Graph() subj_pool_name = match.patrn2subj_op_map['max_pool'].name subj_pool_op = match.subject_ugraph[subj_pool_name] ksize = subj_pool_op.op_attr['ksize'].value.ints_value[:] strides = subj_pool_op.op_attr['strides'].value.ints_value[:] padding = subj_pool_op.op_attr['padding'].value with graph.as_default(): dummy_input = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 3]) max_pool = tf.nn.max_pool(dummy_input, ksize=ksize, strides=strides, padding=padding, name='max_pool') tf.nn.relu(max_pool, name='relu') ugraph = GraphDefParser(config={}).parse(graph.as_graph_def(), output_nodes=['relu']) ugraph['max_pool'].replace_with_null_input_tensor(0) ugraph = prune_graph(ugraph) topologic_order_graph(ugraph) input_map = { match.pattern_ugraph['relu'].input_tensors[0]: ugraph['max_pool'].input_tensors[0] } output_map = { match.pattern_ugraph['max_pool'].output_tensors[0]: ugraph['relu'].output_tensors[0] } return ugraph, input_map, output_map
def subject_ugraph_1(): graph = tf.Graph() with graph.as_default(): input_1 = tf.placeholder(dtype=tf.float32, shape=[None, 512, 512, 10], name='input_1') relu_1 = tf.nn.relu(input_1, name='relu_1') max_pool_1 = tf.nn.max_pool(relu_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool_1') input_2 = tf.placeholder(dtype=tf.float32, shape=[None, 512, 512, 10], name='input_2') relu_2 = tf.nn.relu(input_2, name='relu_2') max_pool_2 = tf.nn.max_pool(relu_2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool_2') output = tf.add(max_pool_1, max_pool_2, name='output') subj_ugraph = GraphDefParser(config={}).parse( graph.as_graph_def(), output_nodes=[output.op.name]) return subj_ugraph
def pattern_ugraph(self): graph = tf.Graph() with graph.as_default(): dummy_input = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 3], name='dummy_input') dummy_weight = tf.zeros([32, 32, 3, 10], dtype=tf.float32, name='dummy_weight') conv = tf.nn.conv2d(dummy_input, dummy_weight, strides=[1, 2, 2, 1], padding='VALID', name='conv') maxpool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='maxpool') ugraph = GraphDefParser(config={}).parse( graph.as_graph_def(), output_nodes=[maxpool.op.name]) quant_ugraph = QuantizeTransformer().transform(ugraph) patrn_ugraph = deepcopy(quant_ugraph) quant_conv_op = patrn_ugraph['conv/eightbit'] for i, _ in enumerate(quant_conv_op.input_tensors): quant_conv_op.replace_with_null_input_tensor(i) patrn_ugraph.output_nodes = ['maxpool/eightbit'] patrn_ugraph = prune_graph(patrn_ugraph) topologic_order_graph(patrn_ugraph) return patrn_ugraph
def gen_vgg_graph(): graph = tf.Graph() with graph.as_default(): x = tf.placeholder(dtype=tf.float32, shape=[None, 2048, 2048, 3], name='input_x') in_feat = x num_layers = sample([3, 4, 5], 1)[0] for i in range(1, num_layers+1): ksize = sample([2, 3, 5], 1)[0] in_channel = in_feat.shape.as_list()[-1] out_channel = sample([3, 5, 10], 1)[0] stride = sample([1, 2], 1)[0] kernel = tf.constant( np.random.rand(ksize, ksize, in_channel, out_channel), dtype=tf.float32, name='kernel_{}'.format(i) ) in_feat = tf.nn.conv2d( in_feat, kernel, strides=[1, stride, stride, 1], padding='VALID', name='feat_map_{}'.format(i) ) in_feat = tf.nn.relu(in_feat, name='relu_{}'.format(i)) in_feat = tf.nn.max_pool( in_feat, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], name='pool_{}'.format(i), padding='SAME', ) ugraph = GraphDefParser(config={}).parse(graph.as_graph_def(), output_nodes=[in_feat.op.name]) return ugraph
def test_dropout_trans_2(dropout_graph_tuple2): graph_def, output_nodes = dropout_graph_tuple2 ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes=output_nodes) trans = DropoutTransformerV2() new_ugraph = trans.transform(ugraph) assert len(new_ugraph.ops_info) == 1 assert 'x' in new_ugraph.ops_info
def test_ugraph_topo_order(graph_tuple): graph_def, output_nodes = graph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes) first_out, second_out = output_nodes meet_first = False for node_name in ugraph.topo_order: if node_name == first_out: meet_first = True if node_name == second_out: assert meet_first
def test_id_rm_transform_1(id_graph_def_1): ugraph = GraphDefParser(config={}).parse(id_graph_def_1, output_nodes=['z']) optimizer = IdOpRemoveOptimizer() new_ugraph = optimizer.transform(ugraph) for op in new_ugraph.ops_info.values(): assert op.op_type != 'Identity' op_z = new_ugraph.ops_info['z'] in_op_names = set([op.name for op in op_z.input_nodes]) assert set(['x', 'y']) == in_op_names
def test_inline_optimizer(inlinegraph_tuple): (graph_def, inline_ans, output_nodes) = inlinegraph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes) transformer = InlineTransformer() assert not transformer.prune_graph ugraph = transformer.transform(ugraph) for node_name in ugraph.topo_order: if node_name in inline_ans: op_type = ugraph.ops_info[node_name].op_type assert op_type == 'Inline'
def test_in_out_nodes(graph_tuple): graph_def, output_nodes = graph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes) x3 = ugraph.ops_info['x3'] assert x3.ugraph is ugraph assert len(x3.input_nodes) == len(set([op.name for op in x3.input_nodes])) assert all([str(op.name) in ['x2', 'bias2'] for op in x3.input_nodes]) assert x3.output_nodes == [] x2 = ugraph.ops_info['x2'] assert [str(op.name) for op in x2.output_nodes] == ['x3']
def patrn_ugraph(): graph = tf.Graph() with graph.as_default(): ptrn_input0 = tf.placeholder(dtype=tf.float32, name='input0') ptrn_input1 = tf.placeholder(dtype=tf.float32, name='input1') ptrn_add0 = tf.add(ptrn_input0, ptrn_input1, name='add0') ptrn_out = tf.add(ptrn_add0, ptrn_input1, name='output') ugraph = GraphDefParser(config={}).parse(graph.as_graph_def(), [ptrn_out.op.name]) # ugraph.ops_info[ptrn_input0.op.name].add_null_input_tensor() return ugraph
def subject_ugraph1(): graph = tf.Graph() with graph.as_default(): sub_input0 = tf.placeholder(name='sub_input0', dtype=tf.int32) sub_input1 = tf.placeholder(name='sub_input1', dtype=tf.int32) sub_input2 = tf.constant([i for i in range(10)], name='sub_input2') sub_add0 = tf.add(sub_input0, sub_input1, name='sub_add0') sub_add1 = tf.add(sub_add0, sub_input1, name='sub_add1') sub_output = tf.add(sub_add1, sub_input2, name='sub_output') ugraph = GraphDefParser(config={}).parse(graph.as_graph_def(), [sub_output.op.name]) return ugraph
def subject_ugraph_1(): subj_graph = tf.Graph() with subj_graph.as_default(): x = tf.constant(np.random.rand(3, 3), name='x', dtype=tf.float32) y = tf.constant(np.random.rand(3, 3), name='y', dtype=tf.float32) z = tf.add(x, y, name='z') w = tf.constant(np.random.rand(3, 3), name='w', dtype=tf.float32) a = tf.matmul(z, w, name='a') r = tf.nn.relu(a, name='r') out = tf.add(x, r, name='out') subj_ugraph = GraphDefParser(config={}).parse(subj_graph.as_graph_def(), output_nodes=[out.op.name]) return subj_ugraph
def test_refcnt_optimizer(refgraph_tuple): (graph_def, refcnt_ans, output_nodes) = refgraph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes=output_nodes) transformer = RefCntOptimizer() assert not transformer.prune_graph ugraph = transformer.transform(ugraph) for node_name in ugraph.topo_order: if node_name in refcnt_ans: op_info = ugraph.ops_info[node_name] refcnts = op_info.op_attr["%s__ref_counts" % transformer.KWARGS_NAMESCOPE] assert refcnts == refcnt_ans[node_name]
def transform(self, ugraph): if ugraph.lib_name != 'tensorflow': raise ValueError('only support tensorflow graph') graph_def = ugraph.graph_def if TransformGraph is None: raise RuntimeError("quantization is temporary not supported") quant_graph_def = TransformGraph( input_graph_def=graph_def, inputs=[], outputs=ugraph.output_nodes, transforms=["quantize_weights", "quantize_nodes"]) return GraphDefParser(config={}).parse( quant_graph_def, output_nodes=ugraph.output_nodes)
def fully_connect_pattern1(): patrn_graph = tf.Graph() with patrn_graph.as_default(): z_prime = tf.placeholder(name='z_prime', dtype=tf.float32) w_prime = tf.constant(np.random.rand(3, 3), name='w_prime', dtype=tf.float32) a_prime = tf.matmul(z_prime, w_prime, name='a_prime') r_prime = tf.nn.relu(a_prime, name='r_prime') patrn_ugraph = GraphDefParser(config={}).parse(patrn_graph.as_graph_def(), output_nodes=[r_prime.op.name]) for i in range(2): patrn_ugraph.ops_info['a_prime'].replace_with_null_input_tensor(i) patrn_ugraph = prune_graph(patrn_ugraph) topologic_order_graph(patrn_ugraph) return patrn_ugraph
def pattern_ugraph(self): graph = tf.Graph() with graph.as_default(): dummy_input = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 3]) relu = tf.nn.relu(dummy_input, name='relu') tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='max_pool') pattern_ugraph = GraphDefParser(config={}).parse( graph.as_graph_def(), output_nodes=['max_pool']) pattern_ugraph['relu'].replace_with_null_input_tensor(0) pattern_ugraph = prune_graph(pattern_ugraph) topologic_order_graph(pattern_ugraph) return pattern_ugraph
def callback(match): graph = tf.Graph() with graph.as_default(): a = tf.placeholder(dtype=tf.float32, name='a') b = tf.placeholder(dtype=tf.float32, name='b') out = tf.add(a, b, name='fused_node') ugraph = GraphDefParser(config={}).parse(graph.as_graph_def(), output_nodes=[out.op.name]) ugraph.ops_info['fused_node'].replace_with_null_input_tensor(0) ugraph.ops_info['fused_node'].replace_with_null_input_tensor(1) topologic_order_graph(ugraph) ugraph = prune_graph(ugraph) patrn_ugraph = match.pattern_ugraph input_map = { patrn_ugraph.ops_info['a_prime'].input_tensors[0]: ugraph.ops_info['fused_node'].input_tensors[0], patrn_ugraph.ops_info['a_prime'].input_tensors[1]: ugraph.ops_info['fused_node'].input_tensors[1] } output_map = { patrn_ugraph.ops_info['r_prime'].output_tensors[0]: ugraph.ops_info['fused_node'].output_tensors[0] } return ugraph, input_map, output_map
def pattern_ugraph(self): graph = tf.Graph() with graph.as_default(): dummy_x = tf.constant(np.random.rand(10, 10), dtype=tf.float32, name='dummy_x') dummy_rate = tf.placeholder(dtype=tf.float32, name='dummy_rate') dropout = tf.nn.dropout(dummy_x, rate=dummy_rate, name='dropout') patrn_ugraph = GraphDefParser(config={}).parse( graph.as_graph_def(), output_nodes=[dropout.op.name]) # replace dummy_x patrn_ugraph['dropout/truediv'].replace_with_null_input_tensor(0) # # replace dummy_rate patrn_ugraph['dropout/sub'].replace_with_null_input_tensor(1) # # replace Shape Op patrn_ugraph[ 'dropout/random_uniform/RandomUniform'].replace_with_null_input_tensor( 0) patrn_ugraph = prune_graph(patrn_ugraph) topologic_order_graph(patrn_ugraph) return patrn_ugraph
def test_placeholder_shape(): from utensor_cgen.frontend.tensorflow import GraphDefParser graph = tf.Graph() with graph.as_default(): tf.placeholder(dtype=tf.float32, name='x') parser = GraphDefParser({}) ugraph = parser.parse(graph.as_graph_def(), output_nodes=['x']) # nondeterministic shape, can be any shape out_tensor = ugraph.ops_info['x'].output_tensors[0] assert out_tensor.shape is None assert out_tensor.dtype is np.dtype('float32') graph = tf.Graph() with graph.as_default(): tf.placeholder(dtype=tf.float32, name='x', shape=[None, 5]) parser = GraphDefParser({}) ugraph = parser.parse(graph.as_graph_def(), output_nodes=['x']) # nondeterministic dimension out_tensor = ugraph.ops_info['x'].output_tensors[0] assert out_tensor.shape == [None, 5] assert out_tensor.dtype is np.dtype('float32')
def test_ugraph_copy(graph_tuple): graph_def, output_nodes = graph_tuple ugraph_1 = GraphDefParser(config={}).parse(graph_def, output_nodes) ugraph_2 = deepcopy(ugraph_1) assert ugraph_1 is not ugraph_2 assert ugraph_1.graph_def == ugraph_2.graph_def
def test_tensor_ops(graph_tuple): graph_def, output_nodes = graph_tuple ugraph = GraphDefParser(config={}).parse(graph_def, output_nodes) for op in ugraph.ops_info.values(): for tensor in op.output_tensors: assert tensor.op is op