Exemplo n.º 1
0
def _tensors(detection_graph: tf.Graph) -> tuple:
    # Input tensor is the image
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    # Output tensors are the detection boxes, scores, and classes
    # Each box represents a part of the image where a particular object was detected
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    # Each score represents level of confidence for each of the objects.
    # The score is shown on the result image, together with the class label.
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')
    return image_tensor, detection_boxes, detection_scores, detection_classes, num_detections
Exemplo n.º 2
0
    def from_dict(graph: tf.Graph, dictionary: dict) -> 'EvaluationSpec':

        # Metric ops
        eval_metric_ops = {
            key:
            (graph.get_tensor_by_name(tensor), graph.get_tensor_by_name(op))
            for key, (tensor, op) in dictionary['metric_ops'].items()
        }

        # Loss
        loss = graph.get_tensor_by_name(dictionary['loss'])

        return EvaluationSpec(loss=loss, eval_metric_ops=eval_metric_ops)
Exemplo n.º 3
0
def _set_signature_dtypes(
        graph: tf.Graph, signature_def: util.SignatureDef
) -> Tuple[tf.Graph, util.SignatureDef]:
    """Set the dtype of each input and output to match the graph and return
       both
    """
    for key, value in signature_def.inputs.items():
        node = graph.get_tensor_by_name(value.name)
        value.dtype = node.dtype.as_datatype_enum
    for key, value in signature_def.outputs.items():
        node = graph.get_tensor_by_name(value.name)
        value.dtype = node.dtype.as_datatype_enum
    return graph, signature_def
Exemplo n.º 4
0
class TensoflowFaceDector(object):
    def __init__(self, PATH_TO_CKPT):
        """Tensorflow detector
        """

        self.detection_graph = Graph()
        with self.detection_graph.as_default():
            od_graph_def = compat.v1.GraphDef()
            with io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                import_graph_def(od_graph_def, name='')


        with self.detection_graph.as_default():
            config = compat.v1.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = compat.v1.Session(graph=self.detection_graph, config=config)
            self.windowNotSet = True


    def run(self, image):
        """image: bgr image
        return (boxes, scores, classes, num_detections)
        """

        image_np = cvtColor(image, COLOR_BGR2RGB)

        # the array based representation of the image will be used later in order to prepare the
        # result image with boxes and labels on it.
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = expand_dims(image_np, axis=0)
        image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
        classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
        # Actual detection.
        start_time = time()
        (boxes, scores, classes, num_detections) = self.sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})
        elapsed_time = time() - start_time
        print('inference time cost: {}'.format(elapsed_time))

        return (boxes, scores, classes, num_detections)
Exemplo n.º 5
0
def generate_feed_dict(graph: tf.Graph, node: Node):
    """
    The first value in the return tuple is True if all inputs for the node has constant values.
    The second returned value is mapping of placeholder tensor to the numpy arrays with the values for these
    placeholders.
    :param graph: the TensorFlow Graph to generate feed dictionary to.
    :param node: the node which represents TensorFlow sub-graph of operations.
    :return: pair where the first element is a flag that specifies that all node inputs are constants and a dictionary
    where key is the input Tensor object and the value is the tensor value.
    """
    all_constants = True
    feed_dict = dict()
    for in_data_node_name, edge_attrs in get_inputs(node.graph, node.id):
        if 'control_flow_edge' in edge_attrs and edge_attrs[
                'control_flow_edge']:
            continue
        value = node.in_node(edge_attrs['in']).value
        if value is None:
            all_constants = False
            placeholder_pb = node['pbs'][edge_attrs['placeholder_name']]
            value = np.ones(
                shape=tf_tensor_shape(placeholder_pb.attr['shape'].shape),
                dtype=tf_dtype_extractor(placeholder_pb.attr['dtype'].type))
        feed_dict[graph.get_tensor_by_name(edge_attrs['placeholder_name'] +
                                           ":0")] = value
    return all_constants, feed_dict
Exemplo n.º 6
0
def _get_tensor_by_parameter_name(graph: tf.Graph,
                                  parameter_name: str) -> tf.Tensor:
    nucleotide_var_scopes_with_parameters = [
        ScopeNames.MODEL,
        ScopeNames.POSTPROCESSING,
    ]
    nucleotide_var_scope = parameter_name.split("//", maxsplit=1)[0]
    tensor_name = ":".join([parameter_name, "0"])
    tensor = None
    for each_var_scope in nucleotide_var_scopes_with_parameters:
        tensor_name_with_var_scope = "/".join(
            [each_var_scope, nucleotide_var_scope, tensor_name])
        try:
            tensor = graph.get_tensor_by_name(tensor_name_with_var_scope)
            break

        except KeyError:
            continue
    if tensor is None:
        raise ValueError("Model parameter with name {} does not exist!".format(
            parameter_name))

    if tensor.op.type != 'PlaceholderWithDefault':
        msg = ("Model parameter tensor operation should be of type "
               "PlaceholderWithDefault! (found {})").format(tensor.op.type)
        raise ValueError(msg)
    return tensor
Exemplo n.º 7
0
def input_to_feed_dict(graph: tf.Graph, input_data: Union[dict, xr.Dataset]) \
        -> Dict[Union[Union[tf.Tensor, tf.Operation], Any], Any]:
    """
    Converts some input data to a feedable dict for Tensorflow sessions based on the placeholders in a tf.Graph

    :param graph: tf.Graph object
    :param input_data: either xr.Dataset or some dict{"placeholder": data}
    :return: dict{"placeholder:0", data} for all placeholder names in `input_data`
    """
    placeholders = {
        op.name: op
        for op in graph.get_operations()
        if op.type.lower().startswith("placeholder")
    }

    if isinstance(input_data, xr.Dataset):
        keys = input_data.variables.keys()
    else:
        keys = input_data.keys()
    keys = set(keys).intersection(placeholders.keys())

    retval = {}
    for k in keys:
        retval[graph.get_tensor_by_name(k + ":0")] = input_data[k]

    return retval
def _get_transformed_tensor(src_tensor: tf.Tensor,
                            dst_graph: tf.Graph,
                            dst_scope: str = '') -> tf.Tensor:
    dst_tensor_name = src_tensor.name
    if dst_scope:
        dst_tensor_name = f'{dst_scope}/{dst_tensor_name}'

    return dst_graph.get_tensor_by_name(dst_tensor_name)
Exemplo n.º 9
0
    def get_loss_tensor(self, graph: tf.Graph) -> tf.Tensor:
        """
        Note: if you give your loss component a different name, you must overwrite this method.

        :param graph:
        :return:
        """
        return graph.get_tensor_by_name('{}:0'.format(_Labels.LOSS_LABEL))
Exemplo n.º 10
0
def _build_tensor_info(graph: tf.Graph, info: NodeInfo) -> TensorInfo:
    if info is not None:
        tensor = graph.get_tensor_by_name(info.tensor)
        return TensorInfo(dtype=tf.dtypes.as_dtype(
            tensor.dtype).as_datatype_enum,
                          tensor_shape=tf.TensorShape(tensor.shape).as_proto(),
                          name=info.tensor)
    else:
        return None
Exemplo n.º 11
0
def create_tensor_dict(detection_graph: tensorflow.Graph) -> dict:
    tensor_dict = {}
    with tensorflow.compat.v1.Session(graph=detection_graph):
        ops = detection_graph.get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key] = detection_graph.get_tensor_by_name(tensor_name)
    return tensor_dict
Exemplo n.º 12
0
def label_image(detection_graph: tensorflow.Graph, tensor_dict: dict, image_np: numpy.ndarray) -> dict:
    with tensorflow.compat.v1.Session(graph=detection_graph) as sess:
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        image_np_expanded = numpy.expand_dims(image_np, axis=0)
        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})
    output_dict['num_detections'] = int(output_dict['num_detections'][0])
    output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(numpy.int64)
    output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
    output_dict['detection_scores'] = output_dict['detection_scores'][0]
    return output_dict
Exemplo n.º 13
0
def get_io_tensor_by_node_name(graph: tf.Graph, input_nodes, output_nodes,
                               input_indices=None, output_indices=None):
    """
    Get graph input and output tensors by their corresponding operation names
    Args:
        graph: A tf.Graph object.
        input_nodes: A list of the names of all the input nodes.
        output_nodes: A list of the names of all the output nodes.
        input_indices: A list of integers with the length same as input_nodes,
            specifying which tensor should be returned from the outputs of an
            operation. Defaults to None, which converts to a list of zeroes.
            e.g. If input_nodes = ['node_a', 'node_b']
                    input_indices = [0, 1]
                 Then ['node_a:0', 'node_b:1'] will be the tensors searched.
        output_indices: A list of integers with the length same as output_nodes.

    Returns:
        Two lists of tf.Tensor objects, corresponding to inputs and outputs.
    """
    input_tensors = []
    output_tensors = []
    if not input_indices:
        input_indices = [0] * len(input_nodes)
    if not output_indices:
        output_indices = [0] * len(output_nodes)

    assert len(input_nodes) == len(input_indices)
    assert len(output_nodes) == len(output_indices)

    for node, index in zip(input_nodes, input_indices):
        tensor_name = node_to_tensor(node, index)
        try:
            input_tensors.append(graph.get_tensor_by_name(tensor_name))
        except KeyError as e:
            print(e)
    for node, index in zip(output_nodes, output_indices):
        tensor_name = node_to_tensor(node, index)
        try:
            output_tensors.append(graph.get_tensor_by_name(tensor_name))
        except KeyError as e:
            print(e)
    return input_tensors, output_tensors
Exemplo n.º 14
0
def _parse_graph_info(graph_def):
    """Parse GraphDef
  Fetch input tensors and output tensors name for reconstructing
  graph in uTensor Context object

  Argument
  ========
  - graph_def <tf.GraphDef>: a GraphDef object

  Return
  ======
  - graph_nodes <defaultdict>: a dict with key as operation name and
    value as a defaultdict with keys 'input_tensor' and 'output_tensor'
    which maps to a set of input/output tensor names respectively

  Note
  ====
  - thought the output tensor names is irrelevent for TensorFlow, but it
    is neccessary for uTensor
  """
    OperationInfo = namedtuple('OperationInfo',
                               field_names=[
                                   'input_tensor', 'output_tensor', 'op_type',
                                   'output_content', 'op_attr'
                               ])
    graph = Graph()
    with graph.as_default():  # pylint: disable=E1129
        import_graph_def(graph_def, name="")
    graph_info = {}
    with Session(graph=graph):
        for node in graph_def.node:
            op = graph.get_operation_by_name(node.name)
            input_tensor = [(t.name, t.dtype, _parse_shape(t.shape))
                            for t in op.inputs]
            output_tensor = [(t.name, t.dtype, _parse_shape(t.shape))
                             for t in op.outputs]
            op_type = node.op
            output_content = {}
            op_attr = node.attr
            if node.op in ["Const"]:
                for tensor_name, _, _ in output_tensor:
                    tensor = graph.get_tensor_by_name(tensor_name)
                    output_content[tensor_name] = tensor.eval()
            graph_info[node.name] = OperationInfo(input_tensor, output_tensor,
                                                  op_type, output_content,
                                                  op_attr)
    return graph_info
Exemplo n.º 15
0
def create_input_feed_dict(graph: tf.Graph,
                           input_op_names_list: List,
                           input_data: Union[np.ndarray, List, Tuple],
                           training=False) -> Dict:
    """
    Creates feed dictionary [op_name] = data for session.run
    :param graph: tf.Graph
    :param input_op_names_list: list of input op names
    :param input_data: either single numpy array, list or tuple of numpy array
    :param training: True if graph is in training mode, false otherwise
    :return: feed_dict
    """

    feed_dict = {}

    # single input
    if isinstance(input_data, np.ndarray):
        input_data_list = [input_data]

    # list of multiple inputs
    elif isinstance(input_data, list):
        input_data_list = input_data

    # tuple of multiple inputs
    elif isinstance(input_data, tuple):
        input_data_list = list(input_data)

    else:
        raise ValueError(
            'Session run return value should be either numpy array, list or tuple'
        )

    if not len(input_op_names_list) == len(input_data_list):
        raise ValueError(
            'There is mismatch between number of input op names and input data!'
        )

    for inp_op_name, inp_data in zip(input_op_names_list, input_data_list):

        inp_tensor = graph.get_tensor_by_name(inp_op_name + ':0')
        feed_dict[inp_tensor] = inp_data

    # Identify and set all training tensors to True or False depending on training parameter
    for training_tensor in get_training_tensors(graph):
        feed_dict[training_tensor] = training

    return feed_dict
Exemplo n.º 16
0
def tensor_dependency(graph: tf.Graph, name: str, ops: List[str],
                      dtypes: List[str]):
    """
	Given a Tensorflow graph, a tensor name in the graph, and list of ops and dtypes to prune, return if this
	tensor depends on any of the given ops and dtypes.

	Recursive search over the graph starting from this tensor to determine dependency on any of the ops or dtypes.
	"""
    tensor = graph.get_tensor_by_name(name)
    # check if this tensor depends on any of the listed dtypes, or if the op that created it is in the list of ops
    if tensor.dtype.name.lower() in dtypes or tensor.op.type.lower() in ops:
        return True

    # if this tensor's op has inputs, traverse the graph to see if it depends on any of the dtypes or ops
    for op_input in tensor.op.inputs:
        if tensor_dependency(graph, op_input.name, ops, dtypes):
            return True

    # otherwise return false, it doesn't depend on any of the listed ops or dtypes :)
    return False
Exemplo n.º 17
0
def _parse_graph_nodes(graph_def: GraphDef) -> defaultdict:
    """Parse GraphDef
  Fetch input tensors and output tensors name for reconstructing
  graph in uTensor Context object

  Argument
  ========
  - graph_def <tf.GraphDef>: a GraphDef object

  Return
  ======
  - graph_nodes <defaultdict>: a dict with key as operation name and
    value as a defaultdict with keys 'input_tensor' and 'output_tensor'
    which maps to a set of input/output tensor names respectively

  Note
  ====
  - thought the output tensor names is irrelevent for TensorFlow, but it
    is neccessary for uTensor
  """
    graph = Graph()
    with graph.as_default():  # pylint: disable=E1129
        import_graph_def(graph_def, name="")
    graph_info = defaultdict(lambda: {"output_content": {}})
    with Session(graph=graph):
        for node in graph_def.node:
            op = graph.get_operation_by_name(node.name)
            op_info = graph_info[node.name]
            op_info["input_tensor"] = [(t.name, t.dtype, _parse_shape(t.shape))
                                       for t in op.inputs]
            op_info["output_tensor"] = [
                (t.name, t.dtype, _parse_shape(t.shape)) for t in op.outputs
            ]
            op_info["op_type"] = node.op
            if node.op in ["Const"]:
                for out_tensor, _, _ in op_info["output_tensor"]:
                    tensor = graph.get_tensor_by_name(out_tensor)
                    op_info["output_content"][tensor.name] = tensor.eval()
    return graph_info
Exemplo n.º 18
0
 def from_dict(graph: tf.Graph, dictionary: dict) -> 'TrainSpec':
     return TrainSpec(loss=graph.get_tensor_by_name(dictionary['loss']),
                      train_op=graph.get_operation_by_name(
                          dictionary['train_op']),
                      create_viz_op=False)
Exemplo n.º 19
0
 def from_dict(graph: tf.Graph, dictionary: dict) -> 'PredictSpec':
     return PredictSpec(
         output={
             key: graph.get_tensor_by_name(tensor)
             for key, tensor in dictionary.items()
         })
Exemplo n.º 20
0
 def attach(self, tf_graph: tf.Graph) -> 'MatrixInitializer':
     return MatrixInitializer(
         self.__param_name, tf_graph.get_tensor_by_name(self.__indices),
         tf_graph.get_tensor_by_name(self.__values),
         tf_graph.get_tensor_by_name(self.__update_op))
Exemplo n.º 21
0
def getTensor(graph: tf.Graph, tensor_name: str) -> tf.Tensor:
    t = graph.get_tensor_by_name(tensor_name + ":0")
    return t
Exemplo n.º 22
0
    count_file = sys.argv[4]
except IndexError:
    count_file = None

#create new session
graph = Graph()
sess = Session(graph=graph)

#restore model
saved_model.loader.load(sess, [saved_model.tag_constants.SERVING], model_dir)

#get restored model ops and tensors
model_info = joblib.load(osp.join(model_dir, 'model_info.pkl'))
model = dict()
model.update(
    {k: graph.get_tensor_by_name(v)
     for k, v in model_info['inputs'].items()})
model.update(
    {k: graph.get_tensor_by_name(v)
     for k, v in model_info['outputs'].items()})

#inputs
inp = model["obs"]

#outputs
pi = model["pi"]
val = model["val"]
logp_pi = model["logp_pi"]
out = [pi, val, logp_pi]

if count_file:
Exemplo n.º 23
0
def predict_images(detection_graph: tf.Graph, image_path: str, output_path: str, output_csv_path: str,
                   threshold: float = 0.3, save_csv: bool = True) -> Tuple[np.ndarray]:
    """Predict detection on image

    Args:
        detection_graph (tf.Graph): Graph of model to detect
        image_path (str): path to image
        output_path (str): output folder to write detected images to
        output_csv_path (str): output folder to write csv of detections to
        threshold (float, optional): detection threshold. Defaults to 0.3.
        save_csv (bool, optional): whether csv files of detection should be saved. Defaults to True.

    Returns:
        Tuple[np.ndarray]: tuple of np arrays (all_boxes, all_scores, all_classes, all_num_detections)
    """
    data = pd.DataFrame(columns=[
                        'filename', 'width', 'height', 'class', 'score', 'xmin', 'ymin', 'xmax', 'ymax'])
    all_boxes, all_scores, all_classes, all_num_detections = [], [], [], []
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            for img in sorted(glob.glob(image_path)):
                image_np, orig_w, orig_h = image_load_encode(img)

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image_np, axis=0)

                #print('Image expanded: ', image_np.shape, image_np_expanded.shape)
                image_tensor = detection_graph.get_tensor_by_name(
                    'image_tensor:0')
                # Each box represents a part of the image where a particular object was detected.
                boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
                # Each score represent how level of confidence for each of the objects.
                # Score is shown on the result image, together with the class label.
                scores = detection_graph.get_tensor_by_name(
                    'detection_scores:0')
                classes = detection_graph.get_tensor_by_name(
                    'detection_classes:0')
                num_detections = detection_graph.get_tensor_by_name(
                    'num_detections:0')
                # Actual detection.
                (boxes, scores, classes, num_detections) = sess.run(
                    [boxes, scores, classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})
                #print('Boxes: ', boxes, 'scores', scores, 'classes', classes, 'num dets', num_detections)

                if save_csv:
                    all_boxes.append(boxes)
                    all_scores.append(scores)
                    all_classes.append(classes)
                    all_num_detections.append(num_detections)

                boxes = boxes[0] * np.array([[512, 512, 512, 512]])
                scores = scores[0]
                classes = classes[0]
                num_detections = int(num_detections[0])

                # convert boxes to xmin, ymin, xmax, ymax. Currently it is ymin, xmin, ymax, xmax
                boxes = boxes[:, [1, 0, 3, 2]]

                # find out where scores are greater than at threshold and change everything according to that
                thresh_indices = np.where(scores >= threshold)[0]
                boxes = boxes[thresh_indices]
                scores = scores[thresh_indices]
                classes = classes[thresh_indices]

                boxes, scores = postprocess(boxes, scores, theta=args.theta)

                # Visualization of the results of a detection, but only if output_path is provided
                if output_path is not None:
                    image_np = draw_boxes(
                        image_np, boxes, scores, disable_thresh=True)
                    orig_name = img.split('/')[-1].split('\\')[-1]
                    img_output_path = os.path.join(output_path, orig_name)
                    cv2.imwrite(img_output_path, image_np)

                # always saving data to dataframe
                if save_csv:
                    _ = write_to_df(data, img, orig_w, orig_h, output_csv_path,
                                    'spine', boxes, scores, disable_thresh=True)

                print('[INFO] Finished detection of image '+img+'.')

    return all_boxes, all_scores, all_classes, all_num_detections
Exemplo n.º 24
0
def get_variables_from_tf(graph: Graph, tf_graph: tf.Graph,
                          session: tf.Session) -> Dict[str, np.ndarray]:
    return session.run({
        name: tf_graph.get_tensor_by_name(name)
        for name, _ in graph.variables.items()
    })
Exemplo n.º 25
0
def _get_tensors_by_name(graph: tf.Graph, names: List[str]) -> List[tf.Tensor]:
    return [graph.get_tensor_by_name(f'{name}:0') for name in names]
Exemplo n.º 26
0
 def get_read_training(cls, graph: tf.Graph) -> tf.Tensor:
     return graph.get_tensor_by_name('{}:0'.format(_Labels.READ_TRAINING_LABEL))
Exemplo n.º 27
0
 def get_global_step(cls, graph: tf.Graph) -> tf.Tensor:
     return graph.get_tensor_by_name('{}:0'.format(_Labels.GLOBAL_STEP_LABEL))