def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new_frontend): """ Tensorflow net IR net Placeholder->Add => Placeholder->Eltwise or Power or ScaleShift / / Const-------/ Const-------/ """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = x_shape.copy() tf_y_shape = y_shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') constant_value = np.random.randint(-256, 256, tf_y_shape).astype(np.float32) if (constant_value == 0).all(): # Avoid elimination of the layer from IR constant_value = constant_value + 1 y = tf.constant(constant_value) add = tf.add(x, y, name="Operation") add_shape = add.shape.as_list() tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_tf_scatternd_placeholder_const_net(self, x_shape, indices, updates, ir_version, use_new_frontend): # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = x_shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') tf_indices = tf.constant(indices) tf_updates = tf.constant(updates) scatter_nd = tf.scatter_nd(tf_indices, tf_updates, tf.shape(x), name="Operation") res = tf.add(x, scatter_nd) tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def ref_net = None return tf_net, ref_net
def create_select_net(self, shape_condition, shape_input, ir_version, use_new_frontend): """ Tensorflow net IR net Condition --| Condition --| v v Input_1-> Select Input_1-> Select ^ ^ Input_2-----| Input_2-----| """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: # Permute shapes NCHW -> NHWC for TF network creation shape_condition_net = permute_nchw_to_nhwc(shape_condition) shape_input_net = permute_nchw_to_nhwc(shape_input) condition = tf.compat.v1.placeholder(tf.bool, shape_condition_net, 'Input_condition') input_1 = tf.compat.v1.placeholder(tf.float32, shape_input_net, 'Input_1') input_2 = tf.compat.v1.placeholder(tf.float32, shape_input_net, 'Input_2') tf.compat.v1.where(condition, input_1, input_2, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_bias_add_2_consts_net(self, shape, ir_version, use_new_frontend): """ Tensorflow net IR net Const->BiasAdd-->Concat => Const---->Concat / / / Const--/ / Placeholder-/ / Placeholder---/ """ # # Create Tensorflow model # import tensorflow as tf import numpy as np tf.compat.v1.reset_default_graph() tf_concat_axis = -1 # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) tf_y_shape = tf_x_shape[-1:] constant_value_x = np.random.randint(-256, 256, tf_x_shape).astype(np.float32) x = tf.constant(constant_value_x) constant_value_y = np.random.randint(-256, 256, tf_y_shape).astype(np.float32) y = tf.constant(constant_value_y) add = tf.nn.bias_add(x, y, name="Operation") placeholder = tf.compat.v1.placeholder( tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def concat = tf.concat([placeholder, add], axis=tf_concat_axis, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_relu6_net(self, shape, ir_version, use_new_frontend): """ Tensorflow net IR net Input->ReLU6 => Input->Clamp """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') tf.nn.relu6(input, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None if check_ir_version(10, None, ir_version) and not use_new_frontend: nodes_attributes = { 'input': {'kind': 'op', 'type': 'Parameter'}, 'input_data': {'shape': shape, 'kind': 'data'}, 'ReLU6': {'kind': 'op', 'type': 'Clamp', "max": 6, "min": 0}, 'ReLU6_data': {'shape': shape, 'kind': 'data'}, 'result': {'kind': 'op', 'type': 'Result'} } ref_net = build_graph(nodes_attributes, [('input', 'input_data'), ('input_data', 'ReLU6'), ('ReLU6', 'ReLU6_data'), ('ReLU6_data', 'result') ]) return tf_net, ref_net
def create_one_hot_net(shape, depth, on_value, off_value, axis, ir_version, use_new_frontend): """ Tensorflow net Input -> OneHot IR net (can contain Permutes for input/output of OneHot, depending on shapes), all cases are: Input (< 3D) -> OneHot Input (3D) -> OneHot -> Permute (NHWC -> NCHW) Input (> 3D) -> Permute (NCHW -> NHWC) -> OneHot -> Permute (NHWC -> NCHW) """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: # Permute NCHW -> NHWC for TF network creation net_shape = permute_nchw_to_nhwc(shape) indices = tf.compat.v1.placeholder(tf.int32, shape=net_shape, name='input_indices') result = tf.one_hot(indices, depth, on_value, off_value, axis, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # ref_net = None return tf_net, ref_net
def create_eltwise_net(self, shape, operation, ir_version, use_new_frontend): """ Tensorflow net IR net Inputs->Eltwise => Inputs->Eltwise """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def if operation == 'sum': tf.add(x, y, name='Operation') elif operation == 'max': tf.maximum(x, y, name='Operation') elif operation == 'mul': tf.multiply(x, y, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_concat_net(self, shape, axis, ir_version, use_new_frontend): """ Tensorflow net IR net Input->Concat => Input->Concat """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: ax = axis tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) # TODO: add concat with const inputs to check fusing (as in ONNX) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def concat = tf.concat([x, y], axis=ax, name='Operation') concat_shape = concat.shape.as_list() tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def build_tf_graph(shape, axes): import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: # Permute NCHW -> NHWC for TF network creation net_shape = permute_nchw_to_nhwc(shape) data = tf.compat.v1.placeholder(tf.float32, shape=net_shape, name='data') result = tf.math.l2_normalize(data, axes, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def return tf_net
def create_reduce_net(self, shape, operation, keep_dims, axis, ir_version, use_new_frontend): import tensorflow as tf fn_mapping = {'sum': tf.reduce_sum, 'max': tf.reduce_max, 'min': tf.reduce_min, 'mean': tf.reduce_mean, 'prod': tf.reduce_prod, } tf.compat.v1.reset_default_graph() with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') fn_mapping[operation](x, axis=axis, keepdims=keep_dims, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def return tf_net, None
def create_tf_roll_net(self, shift, axis, x_shape, input_type, ir_version, use_new_frontend): tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = x_shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input') roll = tf.roll(x, shift=shift, axis=axis) tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # TODO: add reference IR net. Now it is omitted and tests only inference result that is more important ref_net = None return tf_net, ref_net
def create_squeeze_net(self, shape, axis, ir_version, use_new_frontend): """ Tensorflow net IR net Input->Squeeze => Input->[Permute]->Reshape """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') squeeze = tf.squeeze(x, axis=axis, name="Operation") tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, max_val, input_type, precision, ir_version, use_new_frontend): tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = x_shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input') if global_seed is not None: tf.compat.v1.random.set_random_seed(global_seed) random_uniform = tf.random.uniform(x_shape, seed=op_seed, dtype=input_type, minval=min_val, maxval=max_val) + x tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def ref_net = None if check_ir_version(10, None, ir_version) and not use_new_frontend: const_for_layer_tests = lambda name, value, shape, shape1: { **{ name + '_dd': { 'kind': 'data', 'value': value, 'shape': shape1 } }, **{ name: { 'kind': 'op', 'type': 'Const' } }, **shaped_data(name + '_d', shape) } connect_const_for_layer_tests = lambda first_tensor_name, second_tensor_name: [ *connect_front(first_tensor_name + '_dd', first_tensor_name), *connect(first_tensor_name, second_tensor_name) ] nodes_attributes = { **regular_op_with_shaped_data('input', x_shape, { 'type': 'Parameter' }), **const_for_layer_tests('shape', x_shape, int64_array([len(x_shape)]), int64_array([len(x_shape)])), **const_for_layer_tests('min_val', min_val, int64_array([]), int64_array([1])), **const_for_layer_tests('max_val', max_val, int64_array([]), int64_array([1])), **regular_op_with_shaped_data('random_uniform', x_shape, { 'type': 'RandomUniform' }), **regular_op_with_shaped_data('convert', x_shape, { 'type': 'Convert' }), **regular_op_with_shaped_data('add', x_shape, {'type': 'Add'}), **regular_op_with_shaped_data('result', x_shape, { 'type': 'Result' }), } if precision == 'FP16' and input_type == tf.float32: ref_net = build_graph(nodes_attributes, [ *connect_const_for_layer_tests('shape', '0:random_uniform'), *connect_const_for_layer_tests('min_val', '1:random_uniform'), *connect_const_for_layer_tests('max_val', '2:random_uniform'), *connect('random_uniform', 'convert'), *connect('convert', '0:add'), *connect('input', '1:add'), *connect('add', 'result') ]) else: ref_net = build_graph(nodes_attributes, [ *connect_const_for_layer_tests('shape', '0:random_uniform'), *connect_const_for_layer_tests('min_val', '1:random_uniform'), *connect_const_for_layer_tests('max_val', '2:random_uniform'), *connect('random_uniform', '0:add'), *connect('input', '1:add'), *connect('add', 'result') ]) return tf_net, ref_net
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type, use_new_frontend): """ Tensorflow net IR net Placeholder->BinaryOp => Placeholder->Eltwise or Power or ScaleShift / / Const-------/ Const-------/ """ self.current_op_type = op_type # # Create Tensorflow model # import tensorflow as tf op_type_to_tf = { 'Add': tf.math.add, 'Sub': tf.math.subtract, 'Mul': tf.math.multiply, 'Div': tf.math.divide, 'RealDiv': tf.realdiv, 'SquaredDifference': tf.math.squared_difference, 'Pow': tf.math.pow, 'Maximum': tf.math.maximum, 'Minimum': tf.math.minimum, 'Equal': tf.math.equal, 'NotEqual': tf.math.not_equal, 'Mod': tf.math.mod, 'Greater': tf.math.greater, 'GreaterEqual': tf.math.greater_equal, 'Less': tf.math.less, 'LessEqual': tf.math.less_equal, 'LogicalAnd': tf.math.logical_and, 'LogicalOr': tf.math.logical_or, 'LogicalXor': tf.math.logical_xor, 'FloorMod': tf.math.floormod, } type = np.float32 if op_type in ["LogicalAnd", "LogicalOr", "LogicalXor"]: type = np.bool tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = x_shape.copy() tf_y_shape = y_shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) tf_y_shape = permute_nchw_to_nhwc(tf_y_shape, use_new_frontend) x = tf.compat.v1.placeholder(type, tf_x_shape, 'Input') constant_value = generate_input(op_type, tf_y_shape) if (constant_value == 0).all(): # Avoid elimination of the layer from IR constant_value = constant_value + 1 y = tf.constant(constant_value, dtype=type) op = op_type_to_tf[op_type](x, y, name="Operation") tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None return tf_net, ref_net
def create_log_softmax_net(self, shape, reduction_axis, ir_version, use_new_frontend): """ Tensorflow net IR net Input->LogSoftmax => Input->Softmax->Log """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) input = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): tf.nn.log_softmax(input, name='Operation', axis=reduction_axis) else: tf.nn.log_softmax(input, axis=reduction_axis, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def ref_net = None reduce_sum_shape = np.copy(shape) rank = len(shape) if rank in {4, 5}: reduction_axis = reduction_axis if reduction_axis >= 0 else rank + reduction_axis if rank == 4: reduction_axis = {0: 0, 1: 2, 2: 3, 3: 1}[reduction_axis] else: reduction_axis = {0: 0, 1: 2, 2: 3, 3: 4, 4: 1}[reduction_axis] reduce_sum_shape[reduction_axis] = 1 converted_shape = shape if rank != 1 else shape[0] if check_ir_version(10, None, ir_version) and not use_new_frontend: ref_nodes_attributes = { 'input': { 'kind': 'op', 'type': 'Parameter', 'shape': converted_shape }, 'input_data': { 'shape': shape, 'kind': 'data', 'value': None }, 'reduce_max_axis_val': { 'shape': int64_array([reduction_axis]).shape, 'kind': 'data', 'value': int64_array([reduction_axis]) }, 'reduce_max_axis': { 'type': 'Const', 'kind': 'op', 'shape': 1 }, 'reduce_max_axis_data': { 'shape': int64_array([1]), 'kind': 'data', 'value': None }, 'reduce_max': { 'type': 'ReduceMax', 'kind': 'op', 'keep_dims': True }, 'reduce_max_data': { 'shape': reduce_sum_shape, 'kind': 'data', 'value': None }, 'sub_first': { 'type': 'Subtract', 'kind': 'op' }, 'sub_first_data': { 'shape': shape, 'kind': 'data', 'value': None }, 'reduce_sum_axis_val': { 'shape': int64_array([reduction_axis]).shape, 'kind': 'data', 'value': int64_array([reduction_axis]) }, 'reduce_sum_axis': { 'type': 'Const', 'kind': 'op', 'shape': 1 }, 'reduce_sum_axis_data': { 'shape': int64_array([1]), 'kind': 'data', 'value': None }, 'reduce_sum': { 'type': 'ReduceSum', 'kind': 'op', 'keep_dims': True }, 'reduce_sum_data': { 'shape': reduce_sum_shape, 'kind': 'data', 'value': None }, 'exp': { 'type': 'Exp', 'kind': 'op' }, 'exp_data': { 'shape': shape, 'kind': 'data', 'value': None }, 'log': { 'type': 'Log', 'kind': 'op' }, 'log_data': { 'shape': reduce_sum_shape, 'kind': 'data', 'value': None }, 'sub_second': { 'type': 'Subtract', 'kind': 'op' }, 'sub_second_data': { 'shape': shape, 'kind': 'data', 'value': None }, 'result': { 'kind': 'op', 'type': 'Result' }, } ref_edges = [ ('input', 'input_data'), ('reduce_max_axis_val', 'reduce_max_axis'), ('reduce_max_axis', 'reduce_max_axis_data'), ('reduce_max_axis_data', 'reduce_max', { 'in': 1 }), ('reduce_max', 'reduce_max_data'), ('input_data', 'reduce_max', { 'out': 0, 'in': 0 }), ('input_data', 'sub_first', { 'out': 0, 'in': 0 }), ('reduce_max_data', 'sub_first', { 'in': 1 }), ('sub_first', 'sub_first_data'), ('reduce_sum_axis_val', 'reduce_sum_axis'), ('reduce_sum_axis', 'reduce_sum_axis_data'), ('reduce_sum_axis_data', 'reduce_sum', { 'in': 1 }), ('reduce_sum', 'reduce_sum_data'), ('sub_first_data', 'exp'), ('exp', 'exp_data'), ('exp_data', 'reduce_sum', { 'in': 0 }), ('reduce_sum_data', 'log'), ('log', 'log_data'), ('log_data', 'sub_second', { 'in': 1 }), ('sub_second', 'sub_second_data'), ('sub_first_data', 'sub_second', { 'out': 0, 'in': 0 }), ('sub_second_data', 'result'), ] ref_net = build_graph(ref_nodes_attributes, ref_edges) return tf_net, ref_net
def create_normalize_l2_net_non_fusable(shape, axes, output_axes, ir_version, use_new_frontend): tf_net = TestNormalizeL2.build_tf_graph(shape, axes) reduced_shape = permute_nchw_to_nhwc(shape).copy() for axis in axes: reduced_shape[axis] = 1 reduced_shape = permute_nchw_to_nhwc(reduced_shape) eltwise_shapes = int64_array(np.ones(len(shape))) nodes_attributes = { 'input': { 'kind': 'op', 'type': 'Parameter' }, 'input_data': { 'shape': shape, 'kind': 'data' }, 'power_const_input_data': { 'shape': int64_array([1]), 'kind': 'data', 'value': np.array([2.0]) }, 'power_const': { 'kind': 'op', 'type': 'Const' }, 'power_const_data': { 'shape': eltwise_shapes, 'kind': 'data' }, 'power': { 'kind': 'op', 'type': 'Power' }, 'power_data': { 'shape': shape, 'kind': 'data' }, 'reduce': { 'kind': 'op', 'type': 'ReduceSum', 'keep_dims': True }, 'reduce_data': { 'shape': reduced_shape, 'kind': 'data' }, 'reduce_axes_input_data': { 'shape': int64_array([len(axes)]), 'kind': 'data', 'value': int64_array(output_axes) }, 'reduce_axes': { 'kind': 'op', 'type': 'Const' }, 'reduce_axes_data': { 'shape': int64_array([len(axes)]), 'kind': 'data' }, 'maximum_const_input_data': { 'shape': int64_array([1]), 'kind': 'data', 'value': np.array([1e-12]) }, 'maximum_const': { 'kind': 'op', 'type': 'Const' }, 'maximum_const_data': { 'shape': eltwise_shapes, 'kind': 'data' }, 'maximum': { 'kind': 'op', 'type': 'Maximum' }, 'maximum_data': { 'shape': reduced_shape, 'kind': 'data' }, 'power2_const_input_data': { 'shape': int64_array([1]), 'kind': 'data', 'value': np.array([-0.5]) }, 'power2_const': { 'kind': 'op', 'type': 'Const' }, 'power2_const_data': { 'shape': eltwise_shapes, 'kind': 'data' }, 'power2': { 'kind': 'op', 'type': 'Power' }, 'power2_data': { 'shape': reduced_shape, 'kind': 'data' }, 'multiply': { 'kind': 'op', 'type': 'Multiply' }, 'multiply_data': { 'shape': shape, 'kind': 'data' }, 'result': { 'kind': 'op', 'type': 'Result' }, } ref_net = build_graph(nodes_attributes, [ ('input', 'input_data'), ('input_data', 'power', { 'out': 0, 'in': 0 }), ('power_const_input_data', 'power_const'), ('power_const', 'power_const_data'), ('power_const_data', 'power', { 'out': 0, 'in': 1 }), ('power', 'power_data'), ('power_data', 'reduce', { 'out': 0, 'in': 0 }), ('reduce_axes_input_data', 'reduce_axes'), ('reduce_axes', 'reduce_axes_data'), ('reduce_axes_data', 'reduce', { 'out': 0, 'in': 1 }), ('reduce', 'reduce_data'), ('reduce_data', 'maximum', { 'out': 0, 'in': 0 }), ('maximum_const_input_data', 'maximum_const'), ('maximum_const', 'maximum_const_data'), ('maximum_const_data', 'maximum', { 'out': 0, 'in': 1 }), ('maximum', 'maximum_data'), ('maximum_data', 'power2', { 'out': 0, 'in': 0 }), ('power2_const_input_data', 'power2_const'), ('power2_const', 'power2_const_data'), ('power2_const_data', 'power2', { 'out': 0, 'in': 1 }), ('power2', 'power2_data'), ('input_data', 'multiply', { 'out': 0, 'in': 0 }), ('power2_data', 'multiply', { 'out': 0, 'in': 1 }), ('multiply', 'multiply_data'), ('multiply_data', 'result'), ]) if use_new_frontend: ref_net = None return tf_net, ref_net
def create_topK_net(shape, k, ir_version, use_new_frontend): """ Tensorflow net: |-> Values Input -> TopK | |-> Indices IR net: |-> Values Input -> TopK | |-> Indices """ # # Create Tensorflow model # import tensorflow as tf tf.compat.v1.reset_default_graph() # Create the graph and model with tf.compat.v1.Session() as sess: shape_net = permute_nchw_to_nhwc(shape) input_tensor = tf.compat.v1.placeholder(tf.int32, shape=shape_net, name='Input') values, indices = tf.nn.top_k(input_tensor, k=k, sorted=True, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # topk_output_shape = shape.copy() inverse_nhwc_nchw = PermuteAttrs.get_nhwc_to_nchw_permutation( len(topk_output_shape)).inv topk_axis = permute_axis( len(topk_output_shape) - 1, inverse_nhwc_nchw) # we need to permute axis attribute topk_output_shape[topk_axis] = k ref_net = None if check_ir_version(10, None, ir_version) and not use_new_frontend: nodes_attributes = { 'input': { 'kind': 'op', 'type': 'Parameter' }, 'input_data': { 'shape': shape, 'kind': 'data' }, 'Const_k_input_data': { 'shape': [], 'kind': 'data' }, 'Const_k': { 'kind': 'op', 'type': 'Const' }, 'Const_k_data': { 'shape': [], 'kind': 'data' }, 'TopK': { 'kind': 'op', 'type': 'TopK', 'axis': topk_axis, 'mode': 'max', 'sort': 'value' }, 'TopK_data_1': { 'shape': topk_output_shape, 'kind': 'data' }, 'TopK_data_2': { 'shape': topk_output_shape, 'kind': 'data' }, 'result_1': { 'kind': 'op', 'type': 'Result' }, 'result_2': { 'kind': 'op', 'type': 'Result' }, } ref_net = build_graph(nodes_attributes, [ ('input', 'input_data'), ('input_data', 'TopK', { 'in': 0 }), ('Const_k_input_data', 'Const_k'), ('Const_k', 'Const_k_data'), ('Const_k_data', 'TopK', { 'in': 1 }), ('TopK', 'TopK_data_1', { 'out': 0 }), ('TopK', 'TopK_data_2', { 'out': 1 }), ('TopK_data_1', 'result_1'), ('TopK_data_2', 'result_2'), ]) return tf_net, ref_net
def create_net_with_unary_op(self, shape, ir_version, op_type, use_new_frontend): """ Tensorflow net IR net Input->UnaryOp => Input->UnaryOp """ import tensorflow as tf self.current_op_type = op_type op_type_to_tf = { 'Abs': tf.math.abs, 'Acos': tf.math.acos, 'Acosh': tf.math.acosh, 'Asin': tf.math.asin, 'Asinh': tf.math.asinh, 'Atan': tf.math.atan, 'Atanh': tf.math.atanh, 'Ceiling': tf.math.ceil, 'Cos': tf.math.cos, 'Cosh': tf.math.cosh, 'Elu': tf.nn.elu, 'Exp': tf.math.exp, 'Floor': tf.math.floor, 'Log': tf.math.log, 'LogicalNot': tf.math.logical_not, 'Negative': tf.math.negative, 'Sigmoid': tf.nn.sigmoid, 'Sign': tf.math.sign, 'Sin': tf.math.sin, 'Sinh': tf.math.sinh, 'SoftPlus': tf.nn.softplus, 'Tan': tf.math.tan, 'Tanh': tf.math.tanh, 'ReLU': tf.nn.relu, } # # Create Tensorflow model # tf.compat.v1.reset_default_graph() type = tf.float32 if op_type == "LogicalNot": type = tf.bool # Create the graph and model with tf.compat.v1.Session() as sess: tf_x_shape = shape.copy() tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend) input = tf.compat.v1.placeholder(type, tf_x_shape, 'Input') op_type_to_tf[self.current_op_type](input, name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def # # Create reference IR net # Please, specify 'type': 'Input' for input node # Moreover, do not forget to validate ALL layer attributes!!! # ref_net = None if check_ir_version(10, None, ir_version) and not use_new_frontend: nodes_attributes = { 'input': { 'kind': 'op', 'type': 'Parameter' }, 'input_data': { 'shape': shape, 'kind': 'data' }, 'testing_op': { 'kind': 'op', 'type': self.current_op_type }, 'testing_data': { 'shape': shape, 'kind': 'data' }, 'result': { 'kind': 'op', 'type': 'Result' } } ref_net = build_graph(nodes_attributes, [('input', 'input_data'), ('input_data', 'testing_op'), ('testing_op', 'testing_data'), ('testing_data', 'result')]) return tf_net, ref_net