def generate_feed_dict(graph: tf_v1.Graph, node: Node): """ The first value in the return tuple is True if all inputs for the node has constant values. The second returned value is mapping of placeholder tensor to the numpy arrays with the values for these placeholders. :param graph: the TensorFlow Graph to generate feed dictionary to. :param node: the node which represents TensorFlow sub-graph of operations. :return: pair where the first element is a flag that specifies that all node inputs are constants and a dictionary where key is the input Tensor object and the value is the tensor value. """ all_constants = True feed_dict = dict() for in_data_node_name, edge_attrs in node.get_inputs(): if 'control_flow_edge' in edge_attrs and edge_attrs[ 'control_flow_edge']: continue value = node.in_node(edge_attrs['in']).value if value is None: all_constants = False placeholder_pb = node['pbs'][edge_attrs['placeholder_name']] value = np.ones( shape=tf_tensor_shape(placeholder_pb.attr['shape'].shape), dtype=tf_dtype_extractor(placeholder_pb.attr['dtype'].type)) feed_dict[graph.get_tensor_by_name(edge_attrs['placeholder_name'] + ":0")] = value return all_constants, feed_dict
def extract(cls, node: Node): attrs = { 'op': __class__.op, 'element_shape': tf_tensor_shape(node.pb.attr["element_shape"].shape), } TensorArrayGather.update_node_stat(node, attrs) return cls.enabled
def update_placeholder_shape_and_add_transpose(node: Node): """ The function changes placeholders shapes from NHWC to NCHW format and add transpose operations if needed. :param node: node to operate on. :return: None """ import tensorflow as tf from mo.front.common.layout import convert_shape, nhwc_to_nchw_permute, nchw_to_nhwc_permute from mo.front.tf.extractors.utils import tf_tensor_shape from mo.front.tf.partial_infer.tf import add_node_def_to_subgraph, update_input_in_pbs tf.reset_default_graph() inputs_replacements = list() # transpose permutation constant nchw_to_nhwc_constant = tf.constant(nchw_to_nhwc_permute, dtype=tf.int32, name=nchw_to_nhwc_constant_name) nhwc_to_nchw_constant = tf.constant(nhwc_to_nchw_permute, dtype=tf.int32, name=nhwc_to_nchw_constant_name) for placeholder_name in node['input_nodes_names']: # dummy node which we can refer to as input in the transpose for the output node # dummy node should be unique for each placeholder dummy_node = tf.constant(value=[[[[1]]]], dtype=tf.float32, name='random_dummy_name_' + placeholder_name) placeholder = node['pbs'][placeholder_name] cur_shape = tf_tensor_shape(placeholder.attr['shape'].shape) if len( cur_shape ) == 4: # TODO think about better check that transpose is required nchw_shape = convert_shape(cur_shape, nhwc_to_nchw_permute) for ind in range(len(cur_shape)): placeholder.attr['shape'].shape.dim[ind].size = nchw_shape[ ind] transpose_name = placeholder.name + '_transpose' transpose = tf.transpose(dummy_node, nchw_to_nhwc_constant, transpose_name) # NCHW -> NHWC # add transpose operations to GraphDef after placeholders add_node_def_to_subgraph(node, transpose.op.node_def, transpose_name, len(node['input_nodes_names'])) inputs_replacements.append((placeholder.name, transpose_name)) inputs_replacements.append((dummy_node.name, placeholder.name)) node['real_input_dims'].append(nchw_shape) else: node['real_input_dims'].append(cur_shape) add_node_def_to_subgraph(node, nchw_to_nhwc_constant.op.node_def) add_node_def_to_subgraph(node, nhwc_to_nchw_constant.op.node_def) # update initial input names to a transposed ones for old_input_tensor_name, new_name in inputs_replacements: update_input_in_pbs(node, old_input_tensor_name, new_name)
def tf_placeholder_ext(pb): return { 'data_type': tf_dtype_extractor(pb.attr["dtype"].type), 'shape': tf_tensor_shape(pb.attr["shape"].shape), 'type': 'Input', 'infer': lambda node: single_output_infer(node, lambda n: n.shape), 'permute_attrs': PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')]) }
def extract(node): attrs = { 'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type), 'shape': tf_tensor_shape(node.pb.attr["shape"].shape), 'permute_attrs': PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')]) } Parameter.update_node_stat(node, attrs) return __class__.enabled
def extract(node): attrs = { 'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type), 'shape': tf_tensor_shape(node.pb.attr["shape"].shape), 'identity': True, } Op.update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): pb_tensor = node.pb.attr["value"].tensor shape = tf_tensor_shape(pb_tensor.tensor_shape) attrs = { 'shape': shape, 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), 'data_type': tf_dtype_extractor(pb_tensor.dtype), } Const.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): attrs = { 'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type), 'shape': tf_tensor_shape(node.pb.attr["shape"].shape), 'identity': True, 'infer': lambda node: copy_shape_infer(node, value_infer=copy_value), } Op.update_node_stat(node, attrs) return cls.enabled
def tf_const_ext(pb): pb_tensor = pb.attr["value"].tensor result = { 'data_type': tf_dtype_extractor(pb_tensor.dtype), 'shape': tf_tensor_shape(pb_tensor.tensor_shape), 'infer': tf_const_infer } result['value'] = tf_tensor_content(pb_tensor.dtype, result['shape'], pb_tensor) log.debug('Constant extractor for node gives shape = {} and value.shape = {}'.format(result['shape'], result['value'].shape)) return result
def extract(cls, node): shapes = node.pb.attr['output_shapes'].list.shape tf_types = node.pb.attr['output_types'].list.type extracted_types = [] for t in tf_types: extracted_types.append(tf_dtype_extractor(t)) result_shapes = [] for shape_pb in shapes: result_shapes.append(tf_tensor_shape(shape_pb)) Op.update_node_stat(node, {'shapes': result_shapes, 'types': extracted_types, 'out_ports_count': 1}) return cls.enabled