Пример #1
0
def get_frozen_graph(graph_file):
    """Read Frozen Graph file from disk."""
    with tf.gfile.FastGFile(graph_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    return graph_def
Пример #2
0
capture_id = 0

capture = cv.VideoCapture(capture_id)

print("capture")
print(capture)

if (capture is None):
    print("No video capture found for id=" + capture_id)
    quit()

print("Using tensorflow version " + tf.__version__)

# Read the graph.
with tf.io.gfile.GFile(dir, 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())

with tf.Session() as sess:
    # Restore session
    sess.graph.as_default()
    tf.import_graph_def(graph_def, name='')

    # Read and preprocess an image.
    keepAlive = True
    while (keepAlive):

        ret, img = capture.read()
        #img = vs.read()
        rows = img.shape[0]
        cols = img.shape[1]
Пример #3
0
def main():  # pylint: disable=R0914
    """
    Before run this script, please check whether the following files
    exist in the same directory.
        classification.jpg
        folder 'calibration' contains 32 images
    :return: None
    """
    model_file = os.path.join(PATH, 'model/resnet_v1_50.pb')
    load_graph(model_file)
    graph = tf.get_default_graph()

    input_tensor = graph.get_tensor_by_name('input:0')
    output_tensor = graph.get_tensor_by_name('Reshape_1:0')

    image_path = os.path.join(PATH, 'data/classification.jpg')
    image_test = Image.open(image_path).resize([SIDE, SIDE])
    image_test = np.array(image_test).astype(np.float32) / 128 - 1
    image_test = image_test.reshape([1, SIDE, SIDE, 3])

    print('inference with origin pb********************')
    with tf.Session() as session:
        origin_prediction = session.run(output_tensor,
                                        feed_dict={input_tensor: image_test})

    config_file = os.path.join(OUTPUTS, 'config.json')
    record_file = os.path.join(OUTPUTS, 'record.txt')
    amct.create_quant_config(config_file,
                             graph,
                             batch_num=1,
                             config_defination='./src/nuq_conf/nuq_quant.cfg')
    amct.quantize_model(graph, config_file, record_file)

    calibration_path = os.path.join(PATH, 'data/calibration')
    batch = load_image(calibration_path)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        session.run(output_tensor, feed_dict={input_tensor: batch})

    amct.save_model(model_file, ['Reshape_1'], record_file,
                    os.path.join(OUTPUTS, 'resnet-50_v1'))

    # reload and test the quantized model for 'Fakequant'.
    model_file = os.path.join(OUTPUTS, 'resnet-50_v1_quantized.pb')
    with tf.io.gfile.GFile(model_file, mode='rb') as model:
        graph_def_reload = tf.GraphDef()
        graph_def_reload.ParseFromString(model.read())

    graph_reload = tf.Graph()
    with graph_reload.as_default():
        tf.import_graph_def(graph_def_reload, name='')

    print('inference with quantized pb====================')
    with tf.Session(graph=graph_reload) as session:
        fakequant_prediction = session.run('Reshape_1:0',
                                           feed_dict={'input:0': image_test})

    print('Origin Model Prediction:\n',
          '\tcategory index: %d\n' % origin_prediction.argmax(),
          '\tcategory prob: %.3f\n' % round(origin_prediction.max(), 3),
          end='')
    print('Quantized Model Prediction:\n',
          '\tcategory index: %d\n' % fakequant_prediction.argmax(),
          '\tcategory prob: %.3f\n' % round(fakequant_prediction.max(), 3),
          end='')
Пример #4
0
 def load_frozen_model(self):
     with self.detection_graph.as_default():
         od_graph_def = tf.GraphDef()
         gd = tf.GraphDef.FromString(
             open('mobilenet/frozen_inference_graph.pb', 'rb').read())
         tf.import_graph_def(gd, name='')
Пример #5
0
def model_modify(modelname, destmodel, strsign, signweightindex, infofile):
    try:
        original_model = PbModel(modelname)
        inputnamesnodes = original_model.input_nodes
        outputnamesnodes = original_model.output_nodes
    except Exception as e:
        with open(infofile, 'a') as f:
            f.write((modelname[modelname.find("assets\\") +
                               7:]).replace('\\', '/') + '\n')
            print((modelname[modelname.find("assets\\") +
                             7:]).replace('\\', '/') + '\n')
            f.write('------\n')
        print("error:")
        print("fail to get model input output names")
        print(e)
        return -1
    outputnames = []
    for o in outputnamesnodes:
        outputnames.append(o.name)
    print(outputnames)

    try:
        with tf.Graph().as_default() as g_combined:
            with tf.Session(graph=g_combined) as sess:
                graph_def = load_def(modelname)
                tf.import_graph_def(graph_def, name='')
                new_model = tf.GraphDef()
                filters = []
                shapes = []
                i = tf.NodeDef()
                for n in sess.graph_def.node:
                    if n.op == "Conv2D":
                        filters.append(n.input[1])
                        ft = sess.graph.get_tensor_by_name(n.input[1] + ':0')
                        shape = ft.shape.as_list()
                        shapes.append(shape)

                # some filters are identity, not const
                for n in sess.graph_def.node:
                    if n.name in filters:
                        if n.op == "Identity":
                            index = filters.index(n.name)
                            filters[index] = n.input[0]
                            ft = sess.graph.get_tensor_by_name(n.input[0] +
                                                               ':0')
                            shape = ft.shape.as_list()
                            shapes[index] = shape

                signinputindex = signweightindex[0]
                print(filters[signinputindex])
                fname = filters[signinputindex]
                ft = sess.graph.get_tensor_by_name(fname + ':0')
                print(ft)
                shape = ft.shape.as_list()
                weight = sess.run(ft)
                i = tf.NodeDef()
                i.name = "input"
                i.op = "Placeholder"
                i.attr['dtype'].CopyFrom(
                    tf.AttrValue(type=tf.float32.as_datatype_enum))
                iwithdefault = tf.NodeDef()
                iwithdefault.op = "PlaceholderWithDefault"
                iwithdefault.attr['dtype'].CopyFrom(
                    tf.AttrValue(type=tf.float32.as_datatype_enum))

                # degrade conv op
                for n in sess.graph_def.node:
                    if n.name in filters:
                        index = filters.index(n.name)
                        nw = new_model.node.add(
                        )  # change name of original weights
                        nw.CopyFrom(n)
                        nw.name = "w_" + str(index)
                        ninput = new_model.node.add(
                        )  # add an input, type is float32
                        fshape = shapes[index]
                        ninput.CopyFrom(iwithdefault)
                        ninput.name = "inputwithdefault_" + str(index)
                        ninput.attr['shape'].CopyFrom(
                            tf.AttrValue(
                                shape=tensor_shape_pb2.TensorShapeProto(dim=[
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[0]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[1]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[2]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[3])
                                ])))
                        default = new_model.node.add()
                        default.op = 'Const'
                        default.name = 'default_' + str(index)
                        default.attr['dtype'].CopyFrom(
                            tf.AttrValue(type=tf.float32.as_datatype_enum))
                        default.attr['value'].CopyFrom(
                            tf.AttrValue(tensor=tf.make_tensor_proto(
                                sess.run(tf.zeros(fshape)), tf.float32,
                                fshape)))
                        default.attr['_output_shapes'].CopyFrom(
                            tf.AttrValue(list=tf.AttrValue.ListValue(shape=[
                                tensor_shape_pb2.TensorShapeProto(dim=[
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[0]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[1]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[2]),
                                    tensor_shape_pb2.TensorShapeProto.Dim(
                                        size=fshape[3])
                                ])
                            ])))
                        ninput.input.extend(['default_' + str(index)])
                        add = new_model.node.add(
                        )  # add an Add op, name it with original filter name
                        add.name = n.name
                        add.op = "Add"
                        add.attr["T"].CopyFrom(
                            tf.AttrValue(type=tf.float32.as_datatype_enum))  #
                        add.input.extend(["w_" + str(index)])
                        add.input.extend(["inputwithdefault_" + str(index)])
                        if n.name == fname:  # change the weights of selected conv
                            f = sess.graph.get_tensor_by_name(n.name + ':0')
                            size = 1
                            for s in fshape:
                                size = size * s
                            fweights = sess.run(f)
                            cmd = ['java', 'random', strsign, str(size)]
                            outputinfo = subprocess.check_output(
                                cmd, shell=True, stderr=subprocess.STDOUT
                            ).decode().strip().split('\r\n')
                            content = list(map(stripfloat, outputinfo))
                            tmp = tf.constant(content,
                                              dtype=tf.float32,
                                              shape=fshape)
                            addres = tf.add(tmp, fweights)
                            nw.attr["value"].CopyFrom(
                                tf.AttrValue(tensor=tf.make_tensor_proto(
                                    sess.run(addres), tf.float32, fshape)))
                    elif n.op == "Conv2D":  #degrade
                        nn = new_model.node.add()
                        nn.op = 'Conv2D'
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["T"].CopyFrom(n.attr["T"])
                        nn.attr["use_cudnn_on_gpu"].CopyFrom(
                            n.attr["use_cudnn_on_gpu"])
                        nn.attr["strides"].CopyFrom(n.attr["strides"])
                        nn.attr["padding"].CopyFrom(n.attr["padding"])
                    elif n.op == "ResizeBilinear":  #degrade
                        nn = new_model.node.add()
                        nn.op = "ResizeBilinear"
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["T"].CopyFrom(n.attr["T"])
                        nn.attr["align_corners"].CopyFrom(
                            n.attr["align_corners"])
                    elif n.op == "Conv2DBackpropInput":  #degrade
                        nn = new_model.node.add()
                        nn.op = "Conv2DBackpropInput"
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["data_format"].CopyFrom(n.attr["data_format"])
                        nn.attr["dilations"].CopyFrom(n.attr["dilations"])
                        nn.attr["padding"].CopyFrom(n.attr["padding"])
                        nn.attr["strides"].CopyFrom(n.attr["strides"])
                        nn.attr["T"].CopyFrom(n.attr["T"])
                        nn.attr["use_cudnn_on_gpu"].CopyFrom(
                            n.attr["use_cudnn_on_gpu"])
                    elif n.op == "DepthwiseConv2dNative":  #degrade
                        nn = new_model.node.add()
                        nn.op = "DepthwiseConv2dNative"
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["data_format"].CopyFrom(n.attr["data_format"])
                        nn.attr["padding"].CopyFrom(n.attr["padding"])
                        nn.attr["strides"].CopyFrom(n.attr["strides"])
                        nn.attr["T"].CopyFrom(n.attr["T"])
                    elif n.op == "Cast":  #degrade
                        nn = new_model.node.add()
                        nn.op = "Cast"
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["DstT"].CopyFrom(n.attr["DstT"])
                        nn.attr["SrcT"].CopyFrom(n.attr["SrcT"])
                    elif n.op == "Equal":  #degrade
                        nn = new_model.node.add()
                        nn.op = "Equal"
                        nn.name = n.name
                        for input_name in n.input:
                            nn.input.extend([input_name])
                        nn.attr["T"].CopyFrom(n.attr["T"])
                    else:
                        nn = new_model.node.add()
                        nn.CopyFrom(n)
                g_combined_def = graph_util.convert_variables_to_constants(
                    sess, new_model, outputnames)
                model_f = tf.gfile.GFile(destmodel, "wb")
                model_f.write(g_combined_def.SerializeToString())

    except Exception as e:
        with open(infofile, 'a') as f:
            f.write((modelname[modelname.find("assets\\") +
                               7:]).replace('\\', '/') + '\n')
            print((modelname[modelname.find("assets\\") +
                             7:]).replace('\\', '/') + '\n')
            f.write('------\n')
        print("error:")
        print("fail to modify model")
        print(e)
        return -1

    cmd = ['java', 'MD5', destmodel]
    outputinfo = subprocess.check_output(
        cmd, shell=True, stderr=subprocess.STDOUT).decode().split('\r\n')
    print(outputinfo)
    with open(infofile, 'a') as f:
        f.write((modelname[modelname.find("assets\\") +
                           7:]).replace('\\', '/') + '\n')
        f.write(outputinfo[0] + '\n')
        f.write(strsign + '\n')
        f.write(str(signinputindex) + '\n')
        for shape in shapes:
            strshape = []
            for i in shape:
                strshape.append(str(i))
            f.write(','.join(strshape) + '\n')
        f.write('------\n')
    return 0
Пример #6
0
def live_feed():
    MODEL_NAME = 'inference_graph'

    # Grab path to current working directory
    CWD_PATH = os.getcwd()

    # Path to frozen detection graph .pb file, which contains the model that is used
    # for object detection.
    PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME,
                                'frozen_inference_graph.pb')

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'training', 'labelmap.pbtxt')

    # Number of classes the object detector can identify
    NUM_CLASSES = 3

    ## Load the label map.
    # Label maps map indices to category names, so that when our convolution
    # network predicts `5`, we know that this corresponds to `king`.
    # Here we use internal utility functions, but anything that returns a
    # dictionary mapping integers to appropriate string labels would be fine
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    # Define input and output tensors (i.e. data) for the object detection classifier

    # Input tensor is the image
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Output tensors are the detection boxes, scores, and classes
    # Each box represents a part of the image where a particular object was detected
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represents level of confidence for each of the objects.
    # The score is shown on the result image, together with the class label.
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name(
        'detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Initialize webcam feed
    video = cv2.VideoCapture(0)
    ret = video.set(3, 1280)
    ret = video.set(4, 720)

    while (True):

        # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
        # i.e. a single-column array, where each item in the column has the pixel RGB value
        ret, frame = video.read()
        frame_expanded = np.expand_dims(frame, axis=0)

        # Perform the actual detection by running the model with the image as input
        (boxes, scores, classes,
         num) = sess.run([
             detection_boxes, detection_scores, detection_classes,
             num_detections
         ],
                         feed_dict={image_tensor: frame_expanded})

        # Draw the results of the detection (aka 'visulaize the results')
        vis_util.visualize_boxes_and_labels_on_image_array(
            frame,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8,
            min_score_thresh=0.60)

        # All the results have been drawn on the frame, so it's time to display it.
        # cv2.imshow('Object detector', frame)
        cv2.imwrite('demo.jpg', frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' +
               open('demo.jpg', 'rb').read() + b'\r\n')
Пример #7
0
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

NUM_CLASSES = 90

opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
    file_name = os.path.basename(file.name)
    if 'frozen_inference_graph.pb' in file_name:
        tar_file.extract(file, os.getcwd())

detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf1.GraphDef()
    tf.gfile = tf.io.gfile  #bnk
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
    label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

with detection_graph.as_default():
    with tf1.Session(graph=detection_graph) as sess:
        cap = cv2.VideoCapture(0)
        urlsent = False
Пример #8
0
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match(r'.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(
                graph_def, graph_file_name,
                is_binary), variables_values, 'tf', None
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values, 'tf', None
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # Since version 2.2 TF can fail with internal error while loading graph from .meta file.
            # It happens because some operation may has an _output_shapes attribute inconsistent with the GraphDef
            # calculated value. To avoid this problem we must delete `_output_shapes` attributes from operations
            for node in input_meta_graph_def.graph_def.node:
                if '_output_shapes' in node.attr:
                    del node.attr['_output_shapes']
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub(r'\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values, 'tf', None
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup("tf")
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()

                try:
                    # Code to extract Keras model.
                    # tf.keras.models.load_model function throws TypeError,KeyError or IndexError
                    # for TF 1.x SavedModel format in case TF 1.x installed
                    imported = tf.keras.models.load_model(model_dir,
                                                          compile=False)
                except:
                    imported = tf.saved_model.load(model_dir, saved_model_tags)  # pylint: disable=E1120

                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup[
                        "tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func,
                        lower_control_flow=False,
                        aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func, lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()

                input_names = []
                if hasattr(imported, 'inputs'):
                    # Extract tensor names order from Keras model
                    input_names = [tensor.name for tensor in imported.inputs]

                # After model freezing output tensor names are changing and recieve "Func/PartitionedCall" prefix,
                # so output_names from saved_model cannot be used. Here tensor names from frozen graph are used,
                # as TF adds indexed Identity nodes during freezing to each output, so this indexing is used for
                # order alignment.
                output_names = [tensor.name for tensor in frozen_func.outputs]

                inputs_outputs_order = (input_names, output_names)

                return graph_def, variables_values, 'tf2', inputs_outputs_order
            except:
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [
                    tf_v1.saved_model.tag_constants.SERVING
                ]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(
                        sess, tags, model_dir)
                    outputs = get_output_node_names_list(
                        meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(
                        sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values, 'tf', None
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
Пример #9
0
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,

                          input_binary, checkpoint_path, output_node_names,

                          restore_op_name, filename_tensor_name,

                          output_frozen_graph_name, clear_devices, "")



#removing the parts of the graph that are only needed during training.



input_graph_def = tf.GraphDef()

with tf.gfile.Open(output_frozen_graph_name, "r") as f:

    data = f.read()

    input_graph_def.ParseFromString(data)



output_graph_def = optimize_for_inference_lib.optimize_for_inference(

        input_graph_def,

        ["I"], # an array of the input node(s)
def helmet_detector():
    # Name of the directory containing the object detection module we're using
    MODEL_NAME = 'inference_graph'
    IMAGE_NAME = 'validate'

    # Grab path to current working directory
    CWD_PATH = os.getcwd()

    # Path to frozen detection graph .pb file, which contains the model that is used
    # for object detection.
    PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'helmet_frozen_inference_graph.pb')

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'training', 'helmet-detection.pbtxt')

    # Path to image
    PATH_TO_IMAGE = os.path.join(CWD_PATH, IMAGE_NAME)

    # Number of classes the object detector can identify
    NUM_CLASSES = 2

    # Load the label map.
    # Label maps map indices to category names
    # Here we use internal utility functions, but anything that returns a
    # dictionary mapping integers to appropriate string labels would be fine
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    # Define input and output tensors (i.e. data) for the object detection classifier

    # Input tensor is the image
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Output tensors are the detection boxes, scores, and classes
    # Each box represents a part of the image where a particular object was detected
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represents level of confidence for each of the objects.
    # The score is shown on the result image, together with the class label.
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Load image using OpenCV and
    # expand image dimensions to have shape: [1, None, None, 3]
    # i.e. a single-column array, where each item in the column has the pixel RGB value
    PATH_TO_TEST_IMAGES_DIR = os.path.join(CWD_PATH, IMAGE_NAME)
    TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'validate_image{}.jpg'.format(i)) for i in range(0, 10)]

    for image_path in TEST_IMAGE_PATHS:
        image = cv2.imread(image_path)
        #image = cv2.resize(image, (720, 480))
        image_expanded = np.expand_dims(image, axis=0)

        # Perform the actual detection by running the model with the image as input
        (boxes, scores, classes, num) = sess.run(
            [detection_boxes, detection_scores, detection_classes, num_detections],
            feed_dict={image_tensor: image_expanded})

        # Draw the results of the detection (aka 'visulaize the results')
        img=image
        vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=4,
            min_score_thresh=0.90)

        # All the results have been drawn on image. Now display the image.
        cv2.imshow('Riders detection', image)
        cv2.waitKey(0)

        coordinates = coord.return_coordinates(img,
                                                     np.squeeze(boxes),
                                                     np.squeeze(classes).astype(np.int32),
                                                     np.squeeze(scores),
                                                     category_index,
                                                     use_normalized_coordinates=True,
                                                     line_thickness=1,
                                                     min_score_thresh=0.90)

        image_classes_list = []
        for index, value in enumerate(classes[0]):
            if scores[0, index] >= 0.90:
                image_classes_list.append(category_index.get(value)['name'])

        i = 0
        cropped_img_lists = []
        no_helmet_list = []
        for coordinate in coordinates:
            (y1, y2, x1, x2, acc) = coordinate
            height = y2 - y1
            width = x2 - x1
            crop = img[y1:y1 + height, x1:x1 + width]
            cropped_img_lists.append(crop)
            if image_classes_list[i] == 'No Helmet':
                no_helmet_list.append(cropped_img_lists[i])
            i = i + 1

        if len(no_helmet_list)==0:
            print("Rider are with helmet")
        else:
            for helmet in no_helmet_list:
                print("Detecting licence plate...")
                plate.plate_recog(helmet)

        '''if len(no_helmet_list) == 1:
            print("Rider is without helmet")
            print("Detecting licence plate...")
            #plate.plate_recognition(no_helmet_list[0], image_list)
            plate.plate_recog(no_helmet_list[0])
        elif len(no_helmet_list) > 1:
            print("Riders are without helmet")
            print("Detecting licence plate...")
            
            im_h_resize = hconcat_resize_min(no_helmet_list)
            #plate.plate_recognition(im_h_resize, image_list)
            plate.plate_recog(im_h_resize)
        else:
            print("Riders are with helmet")'''

            # Press any key to close the image
        cv2.waitKey(0)

        # Clean up
        cv2.destroyAllWindows()
Пример #11
0
def run_inference_on_image(image_path):
    # 추론을 진행할 이미지 파일경로
    # image_path = './kia3.jpg'
    # image_path = './model_360.png'

    # 읽어들일 labels 파일 경로
    labels_txt_file_path = './output_labels.txt'
    answer = None

    # 만약 경로에 이미지 파일이 없을 경우 오류 로그를 출력합니다.
    if not tf.gfile.Exists(image_path):
        tf.logging.fatal('추론할 이미지 파일이 존재하지 않습니다. %s', image_path)
        return answer

    # 이미지 파일을 읽습니다.
    image_data = tf.gfile.FastGFile(image_path, 'rb').read()
    # 그래프를 생성합니다.
    graph_pb_file_path = './output_graph.pb'
    with tf.gfile.FastGFile(graph_pb_file_path, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')
    # 세션을 열고 그래프를 실행합니다.
    print(1)
    with tf.Session() as sess:
        # 최종 소프트 맥스 출력 레이어를 지정합니다.
        softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
        # 추론할 이미지를 인풋으로 넣고 추론 결과인 소프트 맥스 행렬을 리턴 받습니다.
        predictions = sess.run(softmax_tensor,
                               feed_dict={'DecodeJpeg/contents:0': image_data})
        # 불필요한 차원을 제거합니다.
        predictions = np.squeeze(predictions)
        print(2)
        # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)들의 인덱스를 가져옵니다.
        # e.g. [0 3 2 4 1]]
        top_k = predictions.argsort()[-5:][::-1]

        # output_labels.txt 파일로부터 정답 레이블들을 list 형태로 가져옵니다.
        f = open(labels_txt_file_path, 'r', encoding='utf-8')
        lines = f.readlines()
        labels = [str(w).replace("\n", "") for w in lines]

        # 가장 높은 확률을 가진 인덱스들부터 추론 결과(Top-10)를 출력합니다.
        print("Top-5 추론 결과:")
        top_3 = []
        percent = []

        idx = 0
        for node_id in top_k:
            idx += 1
            label_name = labels[node_id]
            probability = predictions[node_id]
            print('%s (확률 = %.5f)' % (label_name, probability))
            top_3.append(label_name)
            percent.append(probability)
            if idx == 3:
                break

        # 가장 높은 확류을 가진 Top-1 추론 결과를 출력합니다.
        print("\nTop-1 추론 결과:")
        answer = labels[top_k[0]]
        probability = predictions[top_k[0]]
        print('%s (확률 = %.5f)' % (answer, probability))

    return top_3, percent
Пример #12
0
def main(path_to_model, graph_pb_type="tf"):
    if graph_pb_type != "tf":
        raise KeyError("The type {} is not supported".format(graph_pb_type))
    list_of_links = []
    # Used to store i/p nodes that are input to the graph and are non-dummy nodes.
    node_set = set()

    # Simultaneously creating nx graph to get absolute node positions to space
    # them appropriately.
    nxGraph = nx.Graph()
    dummy_nodes_counter = 0

    with tf.gfile.GFile(path_to_model, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    not_in_graph_node_to_dummy_map = {}
    with tf.Session(graph=tf.Graph()) as _:
        tf.import_graph_def(graph_def, name="")
        g = tf.get_default_graph()

        [graph_inputs, graph_outputs] = analyze_inputs_outputs(g)
        graph_inputs = [x.name for x in graph_inputs]
        graph_outputs = [x.name for x in graph_outputs]

        for node in g.as_graph_def().node:
            op = g.get_operation_by_name(node.name)

            for input_node_obj in op.inputs:
                [name, port] = get_node_name_and_port(input_node_obj.name)
                try:
                    g.get_operation_by_name(name)
                    source = name
                except Exception:
                    if name not in not_in_graph_node_to_dummy_map.keys():
                        source = "dummy" + str(dummy_nodes_counter)
                        not_in_graph_node_to_dummy_map[name] = source
                        dummy_nodes_counter += 1
                    else:
                        # do not update counter if node name is cached
                        source = not_in_graph_node_to_dummy_map[name]

                target = node.name  # op.name and node.name should be same ideally

                linkname = source + ":" + str(port)

                node_set.add(target)
                node_set.add(source)
                list_of_links.append((source, target, linkname))

            for ctrl_input_node_obj in op.control_inputs:
                try:
                    g.get_operation_by_name(ctrl_input_node_obj.name)
                except Exception:
                    print(
                        "Could not find control input {0} in the graph".format(
                            ctrl_input_node_obj.name))
                node_set.add(ctrl_input_node_obj.name)
                list_of_links.append(
                    (ctrl_input_node_obj.name, node.name, "ctrl_input"))

            for output_node_obj in op.outputs:
                [output_edge_name,
                 port] = get_node_name_and_port(output_node_obj.name)
                try:
                    g.get_operation_by_name(output_edge_name)
                except Exception:
                    source = node.name
                    target = "Dummy" + dummy_nodes_counter
                    dummy_nodes_counter += 1
                    linkname = source + ":" + str(port)

                    list_of_links.append((source, target, linkname))
                    node_set.add(source)
                    node_set.add(target)

        # list provides a better layout than set
        list_of_nodes = list(node_set)

        for node in list_of_nodes:
            nxGraph.add_node(node)

        for link in list_of_links:
            nxGraph.add_edge(link[0], link[1])

        positions_dict = nx.spectral_layout(nxGraph)
        # get position from spectral layout which is faster than others.

        graph = {}
        graph["nodes"] = []
        graph["links"] = []
        graph["directories"] = ["Graph"]
        graph["workload"] = []
        graph["op_type"] = []
        graph["all_opu_type_names"] = []

        opu_type_set = set(["Dummy"])

        for index, node in enumerate(g.as_graph_def().node):
            group = -1
            hover_text = ""
            error_value = 0
            opu_type_name = g.get_operation_by_name(node.name).type
            opu_type_set.add(opu_type_name)

            graph["nodes"].append({
                "name": node.name,
                "node_info": opu_type_name,
                "group": group + 1,
                "hover_text": hover_text,
                "error": error_value
            })
            # Take position from the layout algorithm.
            graph["nodes"][index]["x"] = positions_dict[node.name][0]
            graph["nodes"][index]["y"] = positions_dict[node.name][1]

        graph["all_opu_type_names"] = list(opu_type_set)

        for link in list_of_links:
            port = int(get_node_name_and_port(
                link[2])[-1]) if not link[2].startswith("ctrl_input") else -1
            hover_text = ""
            if not link[0].startswith("Dummy") and port != -1:
                related_tensor = g.get_operation_by_name(link[0]).outputs[port]

                hover_text = "Shape:" + extract_shape(
                    related_tensor) + "<br> Dtype:" + related_tensor.dtype.name
            graph["links"].append({
                "source": link[0],
                "target": link[1],
                "linkname": link[0] + ":" + str(port),
                "hover_info": hover_text,
                "edge_hist_data": [],
                "port": port
            })

        graph["input_nodes"] = []
        for node in graph_inputs:
            graph["input_nodes"].append({"name": node})
        return graph
Пример #13
0
def create_graph():
    # 저장된(saved) graph_def.pb로부터 graph를 생성한다.
    with tf.gfile.FastGFile(modelFullPath, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')
Пример #14
0
from tensorflow.python.platform import gfile
from tensorflow import keras
import tensorflow.compat.v1 as tf
import tensorflow

mnist=keras.datasets.mnist
# global_var=tf.global_variables_initializer()
sess = tf.Session()
# f=tf.gfile.GFile('mymodel.pb', 'rb')
# graph_def=tf.GraphDef()
# graph_def.ParseFromString(f.read())
# sess.graph.as_default()
# tf.import_graph_def(graph_def, name='acc/Mean')
with gfile.FastGFile('mymodel.pb', 'rb') as f:
    print(type(f))
    graph_def = tf.GraphDef()
    tmpread=f.read()
    graph_def.ParseFromString(tmpread)
    # print(tmpread)
    sess.graph.as_default()
    x=tf.import_graph_def(graph_def,return_elements=['acc/Mean'], name='')

# global_var=tf.global_variables_initializer()
# print(type(global_var))
# sess.run(global_var)
# sess.run(tf.global_variables_initializer())

# AttributeError: 'NoneType' object has no attribute 'run'
(mnist_x,mnist_y),(_,_)=mnist.load_data()
mnist_x=mnist_x.reshape(mnist_x.shape[0],784)
mnist_y=keras.utils.to_categorical(mnist_y,10)
Пример #15
0
    def __init__(self):

        # Set up camera constants
        self.IM_WIDTH = 1280
        self.IM_HEIGHT = 720

        #### Initialize TensorFlow model ####

        # Grab path to current working directory
        CWD_PATH = os.getcwd()

        # Path to frozen detection graph .pb file, which contains the model that is used
        # for object detection.
        self.PATH_TO_CKPT = os.path.join(CWD_PATH, 'model', 'saved_model.pb')
        print("Path to model: {}".format(self.PATH_TO_CKPT))

        # Path to label map file
        self.PATH_TO_LABELS = os.path.join(CWD_PATH, 'model')
        print("Path to labels: {}".format(self.PATH_TO_LABELS))

        # Number of classes the object detector can identify
        self.NUM_CLASSES = 90

        ## Load the label map.
        # Label maps map indices to category names, so that when the convolution
        # network predicts `5`, we know that this corresponds to `airplane`.
        # Here we use internal utility functions, but anything that returns a
        # dictionary mapping integers to appropriate string labels would be fine
        self.label_map = label_map_util.load_labelmap(
            '/Users/leonsick/Desktop/Developer/Dojo/Image-Capturing-Program/acceleration/model/label_map.pbtxt'
        )
        self.categories = label_map_util.convert_label_map_to_categories(
            self.label_map,
            max_num_classes=self.NUM_CLASSES,
            use_display_name=True)
        self.category_index = label_map_util.create_category_index(
            self.categories)
        print(self.categories)
        # Load the Tensorflow model into memory.

        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(
                    '/Users/leonsick/Desktop/Developer/Dojo/Image-Capturing-Program/acceleration/model/frozen_inference_graph.pb',
                    'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

            self.sess = tf.Session(graph=self.detection_graph)

        #self.detection_graph = tf.Graph()
        #self.sess = tf.Session(graph=self.detection_graph)
        #tf.saved_model.loader.load(self.sess, [tf.saved_model.tag_constants.SERVING],
        #'/Users/leonsick/Desktop/Developer/Dojo/Image-Capturing-Program/acceleration/model')

        # Define input and output tensors (i.e. data) for the object detection classifier

        # Input tensor is the image
        self.image_tensor = self.detection_graph.get_tensor_by_name(
            'image_tensor:0')

        # Output tensors are the detection boxes, scores, and classes
        # Each box represents a part of the image where a particular object was detected
        self.detection_boxes = self.detection_graph.get_tensor_by_name(
            'detection_boxes:0')

        # Each score represents level of confidence for each of the objects.
        # The score is shown on the result image, together with the class label.
        self.detection_scores = self.detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name(
            'detection_classes:0')

        # Number of objects detected
        self.num_detections = self.detection_graph.get_tensor_by_name(
            'num_detections:0')

        #### Initialize other parameters ####

        # Initialize frame rate calculation
        self.frame_rate_calc = 1
        self.freq = cv2.getTickFrequency()
        self.font = cv2.FONT_HERSHEY_SIMPLEX
Пример #16
0
def get_inception_score_coco(images, splits=10):
    if not os.path.exists(MODEL_DIR):
        os.makedirs(MODEL_DIR)
    filename = DATA_URL.split('/')[-1]
    filepath = os.path.join(MODEL_DIR, filename)
    if not os.path.exists(filepath):

        def _progress(count, block_size, total_size):
            sys.stdout.write('\r>> Downloading %s %.1f%%' %
                             (filename, float(count * block_size) /
                              float(total_size) * 100.0))
            sys.stdout.flush()

        filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
        print()
        statinfo = os.stat(filepath)
        print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
    tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
    with tf.gfile.FastGFile(
            os.path.join(MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')
        print([n.name
               for n in tf.get_default_graph().as_graph_def().node][:10])
        # exit(0)
    # Works with an arbitrary minibatch size.
    with tf.Session() as sess:
        pool3 = sess.graph.get_tensor_by_name('pool_3:0')
        ops = pool3.graph.get_operations()
        for op_idx, op in enumerate(ops):
            for o in op.outputs:
                shape = o.get_shape()
                shape = [s.value for s in shape]
                new_shape = []
                for j, s in enumerate(shape):
                    if s == 1 and j == 0:
                        new_shape.append(None)
                    else:
                        new_shape.append(s)
                o.set_shape(tf.TensorShape(new_shape))
        w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
        logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w)
        softmax = tf.nn.softmax(logits)
    assert (type(images) == list)
    assert (type(images[0]) == np.ndarray)
    assert (len(images[0].shape) == 3)
    assert (np.max(images[0]) > 10)
    assert (np.min(images[0]) >= 0.0)
    inps = []
    for img in images:
        img = img.astype(np.float32)
        inps.append(np.expand_dims(img, 0))
    bs = 1
    with tf.Session() as sess:
        preds = []
        n_batches = int(math.ceil(float(len(inps)) / float(bs)))
        for i in range(n_batches):
            sys.stdout.write(".")
            sys.stdout.flush()
            inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
            inp = np.concatenate(inp, 0)
            pred = sess.run(softmax, feed_dict={"ExpandDims:0": inp})
            preds.append(pred)
        preds = np.concatenate(preds, 0)
        scores = []
        for i in range(splits):
            part = preds[(i * preds.shape[0] //
                          splits):((i + 1) * preds.shape[0] // splits), :]
            kl = part * (np.log(part) -
                         np.log(np.expand_dims(np.mean(part, 0), 0)))
            kl = np.mean(np.sum(kl, 1))
            scores.append(np.exp(kl))
        print('mean:', "%.2f" % np.mean(scores), 'std:',
              "%.2f" % np.std(scores))
Пример #17
0
def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpoint: str = "",
                      model_dir: str = "", saved_model_tags: list = [], meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match('.*\.(ckpt|meta)$', graph_file_name)):
        print('[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
              'extension.\n'
              'It means that the model is not frozen.\n'
              'To load non frozen model to Model Optimizer run:'
              '\n\n1. For "*.ckpt" file:'
              '\n- if inference graph is in binary format'
              '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
              '\n- if inference graph is in text format'
              '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
              '--input_checkpoint "path/to/*.ckpt"'
              '\n\n2. For "*.meta" file:'
              '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(graph_def, graph_file_name, is_binary), variables_values
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name, is_binary)
            outputs = get_output_node_names_list(graph_def, user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(graph_def=graph_def, checkpoint_dir=checkpoint,
                                                                 output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub('\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(input_meta_graph_def.graph_def, user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def,
                                                                            outputs)
                return graph_def, variables_values
        if model_dir:
            # saved model directory
            try:
                # code to extract GraphDef for TF 2.0 SavedModel format
                # tf.saved_model.load function throws TypeError for TF 1.x SavedModel format in case TF 1.x installed
                imported = tf.saved_model.load(model_dir, saved_model_tags) # pylint: disable=E1120
                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                frozen_func = convert_variables_to_constants_v2(concrete_func, lower_control_flow=False) # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution to dump a graph for tensorboard
                tf_v1.disable_eager_execution()                
                return graph_def, variables_values
            except (TypeError, KeyError):
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [tf_v1.saved_model.tag_constants.SERVING]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(sess, tags, model_dir)
                    outputs = get_output_node_names_list(meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values
            except Exception as e:
                raise FrameworkError('SavedModel format load failure: {}', e) from e
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
Пример #18
0
    def __init__(self, pb_path, params_path):
        """
            Loads the model from a file.
            Arguments:
                pb_path - Path to the serialized .pb file
                params_path - Path to the .pkl meta-parameter file
        """

        # Import Tensorflow for the first time.
        tf_guard()
        tf = get_tf()
        from tensorflow.python.platform import gfile
        from tensorflow.python.client import device_lib

        # Start a new graph. This will not necessarily protect us against the
        # new graph having tensors of the same name. Use IsolatedInferrer for
        # this.
        with tf.Graph().as_default():
            
            # Unpack the network parameters
            with open(params_path, 'rb') as f:
                try: 
                    self.params = pickle.load(f)
                except UnicodeDecodeError:
                    # Python2-pickled file
                    self.params = pickle.load(f, encoding="latin1")

            # Load the frozen graph
            with gfile.FastGFile(pb_path,'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())

            # Query the available GPUs
            local_device_protos = device_lib.list_local_devices()
            gpuNames = [x.name for x in local_device_protos if x.device_type == 'GPU']
            if len(gpuNames) < 1:
                raise OSError("Failed to find any GPUs!")

            # Initialize on each gpu
            xList = []
            probList = []
            for gpuName in gpuNames:
                with tf.device(gpuName):
                    device_name, device_num = gpuName.strip('/device:').strip('/').split(':')
                    with tf.name_scope(device_name + device_num) as scope:

                        # Initialize the graph, extract the input and output nodes
                        X, prob = tf.import_graph_def(
                            graph_def,
                            name='',
                            return_elements=['X:0', 'prob:0']
                        )

                xList.append(X)
                probList.append(prob)
            self.xList = xList

            # Create a tensor for the full (batch) output
            self.prob = tf.concat(probList, axis=0)

            # Disable Tensorflow auto-tuning, which is ridiculously slow
            os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'

            # Configure Tensorflow to only use as much GPU memory as it actually needs
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True

            # Spawn a session
            self.session = tf.Session(config=config)
Пример #19
0
def ml_process(request):
    # Name of the directory containing the object detection module we're using
    MODEL_NAME = 'inference_graph'
    IMAGE_NAME = request.session['name']

    # Grab path to current working directory
    CWD_PATH = os.getcwd()

    # Path to frozen detection graph .pb file, which contains the model that is used
    # for object detection.
    PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME,
                                'frozen_inference_graph.pb')

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'training', 'labelmap.pbtxt')

    # Path to image
    PATH_TO_IMAGE = os.path.join(CWD_PATH, 'media', IMAGE_NAME)

    # Path to write
    PATH_TO_WRITE = os.path.join(CWD_PATH,
                                 'apps/webframe/static/webframe/images',
                                 "ml" + IMAGE_NAME)
    print(PATH_TO_WRITE + "|" * 80)

    # Number of classes the object detector can identify
    NUM_CLASSES = 3

    # Load the label map.
    # Label maps map indices to category names, so that when our convolution
    # network predicts `5`, we know that this corresponds to `king`.
    # Here we use internal utility functions, but anything that returns a
    # dictionary mapping integers to appropriate string labels would be fine
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    # Define input and output tensors (i.e. data) for the object detection classifier

    # Input tensor is the image
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Output tensors are the detection boxes, scores, and classes
    # Each box represents a part of the image where a particular object was detected
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represents level of confidence for each of the objects.
    # The score is shown on the result image, together with the class label.
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name(
        'detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Load image using OpenCV and
    # expand image dimensions to have shape: [1, None, None, 3]
    # i.e. a single-column array, where each item in the column has the pixel RGB value
    image = cv2.imread(PATH_TO_IMAGE)
    image_expanded = np.expand_dims(image, axis=0)

    # Perform the actual detection by running the model with the image as input
    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_expanded})

    # Draw the results of the detection (aka 'visulaize the results')

    vis_util.visualize_boxes_and_labels_on_image_array(
        image,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8,
        min_score_thresh=0.60)

    # All the results have been drawn on image. Now display the image.
    # cv2.imshow('Object detector', image)
    cv2.imwrite(PATH_TO_WRITE, image)

    return redirect('/ml_image')
def load_graph(graph_filename):
    with tf.gfile.GFile(graph_filename, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    return graph_def
Пример #21
0
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match('.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(graph_def, graph_file_name,
                                          is_binary), variables_values
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub('\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values
        if model_dir:
            # saved model directory
            tags = saved_model_tags if saved_model_tags is not None else [
                tf_v1.saved_model.tag_constants.SERVING
            ]
            with tf_v1.Session() as sess:
                meta_graph_def = tf_v1.saved_model.loader.load(
                    sess, tags, model_dir)
                outputs = get_output_node_names_list(
                    meta_graph_def.graph_def, user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, meta_graph_def.graph_def, outputs)
                return graph_def, variables_values
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
Пример #22
0
    def load_model(self, model, input_map=None):

        with gfile.FastGFile(model, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, input_map=input_map, name='')
Пример #23
0
def load_def(pb_path):
    graph_def = tf.GraphDef()
    with open(pb_path, "rb") as f:
        graph_def.ParseFromString(f.read())
        return graph_def
COUNT = 0
IM_W = 1280
IM_H = 720
SCORE_THRESH = 0.5  # - lower threshold of prediction score
MAX_BBX = 5  # - max. number of bounding boxes to be drawn
MODEL = 'pdp_v2'
PATH_TO_CKPT = MODEL + '/frozen_inference_graph.pb'

# ob-det.pbtxt contains label for 'person'.
label_path = os.path.join('training', 'ob-det.pbtxt')

# - Load model
detection_graph = TF.Graph()
with detection_graph.as_default():
    od_graph_def = TF.GraphDef()
    with TF.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        TF.import_graph_def(od_graph_def, name='')
print('[info]: Loading TensorFlow model')

# When our model predicts the value '1', then we know that this is a 'person'.
label_map = label_map_util.load_labelmap(label_path)
categories = label_map_util.convert_label_map_to_categories(
    label_map, max_num_classes=1, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

# - Start pedestrian detection session
with detection_graph.as_default():
    with TF.Session(graph=detection_graph) as sess:
def init_graph(model_name=FLAGS.model_name):
    with open(model_name, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')
def detect_with_MTCNN(origin_images_dir, out_dir, pb_path, mode="no_depth"):
    print("MTCNN detect")

    if os.path.exists(out_dir) is False:
        os.makedirs(out_dir)

    minsize = 20  # minimum size of face
    threshold = [0.5, 0.6, 0.6]  # three steps's threshold
    factor = 0.709  # scale factor

    with tf.Graph().as_default():
        graph_def = tf.GraphDef()
        graph_file = pb_path
        with open(graph_file, "rb") as f:
            print("hello")
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name="")

        sess = tf.Session()
        with sess.as_default():
            tf.global_variables_initializer().run()
            pnet, rnet, onet = create_mtcnn_pb(sess)

    # find files
    import glob

    if mode == "depth":
        files = glob.glob(osp.join(origin_images_dir, "*.jpg"))
        files.extend(glob.glob(osp.join(origin_images_dir, "*.JPG")))
        dep_files = glob.glob(osp.join(origin_images_dir, "*.png"))
        dep_files.extend(glob.glob(osp.join(origin_images_dir, "*.PNG")))
        files.sort()
        dep_files.sort()
    else:
        files = glob.glob(osp.join(origin_images_dir, "*.jpg"))
        files.extend(glob.glob(osp.join(origin_images_dir, "*.png")))
        files.extend(glob.glob(osp.join(origin_images_dir, "*.JPG")))
        files.extend(glob.glob(osp.join(origin_images_dir, "*.PNG")))
        files.sort()
    print("=========================")
    # print("img:", files)
    # print("Dep:", dep_files)

    # detect face bbox
    count = 0
    names_list = []
    dep_name_list = []
    for index in range(0, len(files)):
        img = cv2.imread(files[index])
        bounding_boxes, points = detect_face(
            img, minsize, pnet, rnet, onet, threshold, factor
        )  # bounding_boxes.shape: (n, 5) points.shape: (10, n)
        if len(bounding_boxes) == 1:
            points = np.transpose(points)
            batch_imgs = img
            batch_bboxes = bounding_boxes
            batch_points = points
            batch_names = files[index].split("/")[-1]
            names_list.append(batch_names)

            scio.savemat(
                os.path.join(out_dir, batch_names[:-4] + ".mat"),
                {
                    "batch_bboxes": batch_bboxes.astype(np.float64),
                    "batch_points": batch_points.astype(np.float64),
                },
            )
            if mode == "depth":
                dep_name_list.append(dep_files[index].split("/")[-1])

        elif len(bounding_boxes) > 1:
            print("too much face to detect by MTCNN, only select first person")
            points = np.transpose(points[:, 0:1])
            batch_imgs = img
            batch_bboxes = bounding_boxes[0:1, :]
            batch_points = points
            batch_names = files[index].split("/")[-1]
            names_list.append(batch_names)

            scio.savemat(
                os.path.join(out_dir, batch_names[:-4] + ".mat"),
                {
                    "batch_bboxes": batch_bboxes.astype(np.float64),
                    "batch_points": batch_points.astype(np.float64),
                },
            )
            if mode == "depth":
                dep_name_list.append(dep_files[index].split("/")[-1])

        else:
            print("no face to detect by MTCNN, please input single person photo")
            pass
            # raise Exception("no face or much face to detect, please input single person photo")

        count = count + 1
        if (count % 100 == 0) | (count == len(files)):
            print("has run MTCNN: " + str(count) + " / " + str(len(files)))
    sess.close()

    if mode == "depth":
        return names_list, dep_name_list
    else:
        return names_list
Пример #27
0
def load_graph(model_name):
    """Load graph"""
    with tf.io.gfile.GFile(model_name, mode='rb') as model:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(model.read())
        tf.import_graph_def(graph_def, name='')
    def __init__(self, graph_path, target_size=(320, 240), tf_config=None):
        self.target_size = target_size

        # load graph
        logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1]))
        with tf.gfile.GFile(graph_path, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        self.graph = tf.get_default_graph()
        tf.import_graph_def(graph_def, name='TfPoseEstimator')
        self.persistent_sess = tf.Session(graph=self.graph, config=tf_config)

        # for op in self.graph.get_operations():
        #     print(op.name)
        # for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]:
        #     print(ts)

        self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')
        self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')
        self.tensor_heatMat = self.tensor_output[:, :, :, :19]
        self.tensor_pafMat = self.tensor_output[:, :, :, 19:]
        self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size')
        self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size,
                                                      align_corners=False, name='upsample_heatmat')
        self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size,
                                                     align_corners=False, name='upsample_pafmat')
        smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0)
        gaussian_heatMat = smoother.get_output()

        max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME')
        self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat,
                                     tf.zeros_like(gaussian_heatMat))

        self.heatMat = self.pafMat = None

        # warm-up
        self.persistent_sess.run(tf.variables_initializer(
            [v for v in tf.global_variables() if
             v.name.split(':')[0] in [x.decode('utf-8') for x in
                                      self.persistent_sess.run(tf.report_uninitialized_variables())]
             ])
        )
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
                self.upsample_size: [target_size[1], target_size[0]]
            }
        )
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
                self.upsample_size: [target_size[1] // 2, target_size[0] // 2]
            }
        )
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
                self.upsample_size: [target_size[1] // 4, target_size[0] // 4]
            }
        )
Пример #29
0
def main(args=None):
    tf.logging.set_verbosity(tf.logging.INFO)
    if not tf.gfile.Exists(FLAGS.output_dir):
        tf.gfile.MkDir(FLAGS.output_dir)

    with tf.Session() as sess:
        if FLAGS.input_model.rsplit('.', 1)[-1] == 'ckpt':
            style_img_ph = tf.placeholder(tf.float32,
                                          shape=[None, 256, 256, 3],
                                          name='style_input')
            content_img_ph = tf.placeholder(tf.float32,
                                            shape=[None, 256, 256, 3],
                                            name='content_input')
            # import meta_graph
            meta_data_path = FLAGS.input_model + '.meta'
            saver = tf.train.import_meta_graph(meta_data_path,
                                               clear_devices=True)

            sess.run(tf.global_variables_initializer())
            saver.restore(sess, FLAGS.input_model)
            graph_def = sess.graph.as_graph_def()

            replace_style = 'style_image_processing/ResizeBilinear_2'
            replace_content = 'batch_processing/batch'
            for node in graph_def.node:
                for idx, input_name in enumerate(node.input):
                    # replace style input and content input nodes to  placeholder
                    if replace_content == input_name:
                        node.input[idx] = 'content_input'
                    if replace_style == input_name:
                        node.input[idx] = 'style_input'

            if FLAGS.tune:
                _parse_ckpt_bn_input(graph_def)
            output_name = 'transformer/expand/conv3/conv/Sigmoid'
            frozen_graph = tf.graph_util.convert_variables_to_constants(
                sess, graph_def, [output_name])
        # use frozen pb instead
        elif FLAGS.input_model.rsplit('.', 1)[-1] == 'pb':
            with open(FLAGS.input_model, 'rb') as f:
                frozen_graph = tf.GraphDef()
                frozen_graph.ParseFromString(f.read())
        else:
            print("not supported model format")
            exit(-1)

        if FLAGS.tune:
            with tf.Graph().as_default() as graph:
                tf.import_graph_def(frozen_graph, name='')
                quantizer = Quantization(FLAGS.config)
                quantized_model = quantizer(graph, eval_func=eval_func)

                # save the frozen model for deployment
                with tf.io.gfile.GFile(FLAGS.output_model, "wb") as f:
                    f.write(quantized_model.as_graph_def().SerializeToString())

                frozen_graph = quantized_model.as_graph_def()

    # validate the quantized model here
    with tf.Graph().as_default(), tf.Session() as sess:
        if FLAGS.tune:
            # create dataloader using default style_transfer dataset
            # generate stylized images
            dataset = DATASETS('tensorflow')['style_transfer']( \
                FLAGS.content_images_paths.strip(),
                FLAGS.style_images_paths.strip(),
                crop_ratio=0.2,
                resize_shape=(256, 256))
        else:
            dataset = DATASETS('tensorflow')['dummy']( \
                shape=[(200, 256, 256, 3), (200, 256, 256, 3)], label=True)
        dataloader = DataLoader('tensorflow', \
            dataset=dataset, batch_size=FLAGS.batch_size)
        tf.import_graph_def(frozen_graph, name='')
        style_transfer(sess, dataloader, FLAGS.precision)
def lambda_handler(event, context):
    #Get the key from the lambda event that triggered the function
    imgkey = event.get("key1")

    #Get the s3 bucket name that is storing the trained model
    bucket_name = "S3_BUCKET_NAME"

    #Path in s3 to frozen detection graph. This is the actual model that is used for the object detection.
    PATH_TO_CKPT_obj = s3.Object(bucket_name, "PATH_TO_S3_WITH_MODEL_GRAPH")
    serialized_graph = PATH_TO_CKPT_obj.get()['Body'].read()

    #Get the proper lables for objection detection
    categories = [{
        'id': 1,
        'name': 'ladder'
    }, {
        'id': 2,
        'name': 'rotating_parts'
    }, {
        'id': 3,
        'name': 'moving_equipment'
    }, {
        'id': 4,
        'name': 'chemicals'
    }, {
        'id': 5,
        'name': 'extension_cords'
    }, {
        'id': 6,
        'name': 'hot_surfaces'
    }]
    category_index = label_map_util.create_category_index(categories)

    #Create a cursor context manager for running SQL queries
    with conn.cursor() as cur:
        #Get the unclassified image data atached to imageid that was sent via the mobile app
        cur.execute("SELECT uimg FROM image_unclass WHERE uimgID = %s", imgkey)
        unclass_img_hexdata = cur.fetchone()
        unclass_img_hexdata = unclass_img_hexdata[0]

        #Get the userid attached to the unclassified image
        cur.execute("SELECT user_id FROM user_image_unclass WHERE uimgID = %s",
                    imgkey)
        userid = cur.fetchone()
        userid = userid[0]

        conn.commit()
    conn.commit()

    #############################
    #### End of my code edit ####
    #############################

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            ##################################
            #### Begining of my code edit ####
            ##################################

            #Convert the binary data retrieved from the database to a readable format and open the file with Pillow
            unclass_img_byte_stream = io.BytesIO(unclass_img_hexdata)
            image = Image.open(unclass_img_byte_stream)
            image.thumbnail((1280, 720))

            #############################
            #### End of my code edit ####
            #############################

            # the array based representation of the image will be used later in order to prepare the
            # result image with boxes and labels on it.
            image_np = load_image_into_numpy_array(image)

            # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
            image_np_expanded = np.expand_dims(image_np, axis=0)
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            scores = detection_graph.get_tensor_by_name('detection_scores:0')
            classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')
            # Actual detection.
            (boxes, scores, classes, num_detections) = sess.run(
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})

            # Visualization of the results of a detection.
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_np,
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                use_normalized_coordinates=True,
                line_thickness=5)

            ##################################
            #### Begining of my code edit ####
            ##################################

            #Convert numpy array back into an image
            new_img = Image.fromarray(image_np)
            new_img.thumbnail((1280, 720))

            #Convert the image to the required binary format for saving to the database
            classified_img_bytes = io.BytesIO()
            new_img.save(classified_img_bytes, format='jpeg')
            classified_img_hex_data = classified_img_bytes.getvalue()

            #Get the classes of the detected objects in the image so they can be stored in the database with the image
            classes_list = [
                category_index.get(value)
                for index, value in enumerate(classes[0])
                if scores[0, index] > 0.5
            ]
            value_of_classes_list = [
                class_dict['name'] for class_dict in classes_list
            ]

            if value_of_classes_list == []:
                value_of_classes = 'None'
            else:
                value_of_classes_list = list(
                    dict.fromkeys(value_of_classes_list))
                value_of_classes = ', '.join(value_of_classes_list)

            #Create a cursor context manager for running SQL queries
            with conn.cursor() as cur:
                #Insert classified image and corresponding attributes into database
                cur.execute(
                    "INSERT INTO image (imgID, img, imgDate, hazards, imgTime) VALUES (%s,%s,%s,%s,%s)",
                    (imgkey, classified_img_hex_data,
                     datetime.datetime.now().date(), value_of_classes,
                     datetime.datetime.now().strftime("%H:%M:%S")))
                conn.commit()

                #Insert userid and imgid associated with classified image into database
                cur.execute(
                    "INSERT INTO user_image (user_id, imgID) VALUES (%s,%s)",
                    (userid, imgkey))
                conn.commit()
            conn.commit()