Beispiel #1
0
    def run(self):
        models = {}
        for f in os.listdir(self.options.model_dir):
            if f in models:
                print('Warning: duplicate model name of "' + f + '", ignoring second model.')
                continue

            path = os.path.join(self.options.model_dir, f)
            if os.path.isfile(path) and path.endswith(self.model_extension):
                model_name = ''.join(f.rsplit(self.model_extension, 1)) # remove the extension

                # reverse replace the extension
                labels_file = '.csv'.join(f.rsplit(self.model_extension, 1))
                labels_path = os.path.join(self.options.model_dir, labels_file)

                if not os.path.isfile(labels_path):
                    labels_path = None
                models[model_name] = (Resnet50Model(path, labels_path))

        # Tensorflow prints out a bunch of stuff, so print out useful data here after a space.
        print('')
        print('Running on port: ' + str(self.options.port))
        print('Loaded models:')
        for model_name in models:
            if models[model_name].labels is not None:
                labels_str = 'yes'
            else:
                labels_str = 'no'
            print('    ' + model_name + ' (loaded labels: ' + labels_str + ')')

        while True:
            request = REQUEST_QUEUE.get()

            if isinstance(request, network_compute_bridge_pb2.ListAvailableModelsRequest):
                out_proto = network_compute_bridge_pb2.ListAvailableModelsResponse()
                for model_name in models:
                    out_proto.available_models.append(model_name)
                    # To show available labels
                    #if models[model_name].labels is not None:
                    #    labels_msg = out_proto.labels.add()
                    #    labels_msg.model_name = model_name
                    #    for n in models[model_name].labels:
                    #        labels_msg.available_labels.append(models[model_name].labels[n])
                RESPONSE_QUEUE.put(out_proto)
                continue
            else:
                out_proto = network_compute_bridge_pb2.NetworkComputeResponse()

            # Find the model
            if request.input_data.model_name not in models:
                print('Cannot find model "' + request.input_data.model_name + '" in loaded models.')
                RESPONSE_QUEUE.put(out_proto)
                continue

            # Got a request, run the model.
            self.run_model(request, models[request.input_data.model_name])
Beispiel #2
0
def process_thread(args, request_queue, response_queue):
    # Load the model(s)
    models = {}
    for model in args.model:
        this_model = TensorFlowObjectDetectionModel(model[0], model[1])
        models[this_model.name] = this_model

    print('')
    print('Service ' + args.name + ' running on port: ' + str(args.port))

    print('Loaded models:')
    for model_name in models:
        print('    ' + model_name)

    while True:
        request = request_queue.get()

        if isinstance(request,
                      network_compute_bridge_pb2.ListAvailableModelsRequest):
            out_proto = network_compute_bridge_pb2.ListAvailableModelsResponse(
            )
            for model_name in models:
                out_proto.available_models.append(model_name)
            response_queue.put(out_proto)
            continue
        else:
            out_proto = network_compute_bridge_pb2.NetworkComputeResponse()

        # Find the model
        if request.input_data.model_name not in models:
            err_str = 'Cannot find model "' + request.input_data.model_name + '" in loaded models.'
            print(err_str)

            # Set the error in the header.
            out_proto.header.error.code = header_pb2.CommonError.CODE_INVALID_REQUEST
            out_proto.header.error.message = err_str
            response_queue.put(out_proto)
            continue

        model = models[request.input_data.model_name]

        # Unpack the incoming image.
        if request.input_data.image.format == image_pb2.Image.FORMAT_RAW:
            pil_image = Image.open(io.BytesIO(request.input_data.image.data))
            if request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_GREYSCALE_U8:
                # If the input image is grayscale, convert it to RGB.
                image = cv2.cvtColor(pil_image, cv2.COLOR_GRAY2RGB)

            elif request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGB_U8:
                # Already an RGB image.
                image = pil_image

            else:
                print('Error: image input in unsupported pixel format: ',
                      request.input_data.image.pixel_format)
                response_queue.put(out_proto)
                continue

        elif request.input_data.image.format == image_pb2.Image.FORMAT_JPEG:
            dtype = np.uint8
            jpg = np.frombuffer(request.input_data.image.data, dtype=dtype)
            image = cv2.imdecode(jpg, -1)

            if len(image.shape) < 3:
                # If the input image is grayscale, convert it to RGB.
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)

        image_width = image.shape[0]
        image_height = image.shape[1]

        detections = model.predict(image)

        num_objects = 0

        # All outputs are batches of tensors.
        # Convert to numpy arrays, and take index [0] to remove the batch dimension.
        # We're only interested in the first num_detections.
        num_detections = int(detections.pop('num_detections'))
        detections = {
            key: value[0, :num_detections].numpy()
            for key, value in detections.items()
        }

        boxes = detections['detection_boxes']
        classes = detections['detection_classes']
        scores = detections['detection_scores']

        for i in range(boxes.shape[0]):
            if scores[i] < request.input_data.min_confidence:
                continue

            box = tuple(boxes[i].tolist())

            # Boxes come in with normalized coordinates.  Convert to pixel values.
            box = [
                box[0] * image_width, box[1] * image_height,
                box[2] * image_width, box[3] * image_height
            ]

            score = scores[i]

            if classes[i] in model.category_index.keys():
                label = model.category_index[classes[i]]['name']
            else:
                label = 'N/A'

            num_objects += 1

            print('Found object with label: "' + label + '" and score: ' +
                  str(score))

            point1 = np.array([box[1], box[0]])
            point2 = np.array([box[3], box[0]])
            point3 = np.array([box[3], box[2]])
            point4 = np.array([box[1], box[2]])

            # Add data to the output proto.
            out_obj = out_proto.object_in_image.add()
            out_obj.name = "obj" + str(num_objects) + "_label_" + label

            vertex1 = out_obj.image_properties.coordinates.vertexes.add()
            vertex1.x = point1[0]
            vertex1.y = point1[1]

            vertex2 = out_obj.image_properties.coordinates.vertexes.add()
            vertex2.x = point2[0]
            vertex2.y = point2[1]

            vertex3 = out_obj.image_properties.coordinates.vertexes.add()
            vertex3.x = point3[0]
            vertex3.y = point3[1]

            vertex4 = out_obj.image_properties.coordinates.vertexes.add()
            vertex4.x = point4[0]
            vertex4.y = point4[1]

            # Pack the confidence value.
            confidence = wrappers_pb2.FloatValue(value=score)
            out_obj.additional_properties.Pack(confidence)

            if not args.no_debug:
                polygon = np.array([point1, point2, point3, point4], np.int32)
                polygon = polygon.reshape((-1, 1, 2))
                cv2.polylines(image, [polygon], True, (0, 255, 0), 2)

                caption = "{}: {:.3f}".format(label, score)
                left_x = min(point1[0],
                             min(point2[0], min(point3[0], point4[0])))
                top_y = min(point1[1], min(point2[1],
                                           min(point3[1], point4[1])))
                cv2.putText(image, caption, (int(left_x), int(top_y)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        print('Found ' + str(num_objects) + ' object(s)')

        if not args.no_debug:
            debug_image_filename = 'network_compute_server_output.jpg'
            cv2.imwrite(debug_image_filename, image)
            print('Wrote debug image output to: "' + debug_image_filename +
                  '"')

        response_queue.put(out_proto)
Beispiel #3
0
    def run_model(self, request, this_model):

        # Define the out proto
        out_proto = network_compute_bridge_pb2.NetworkComputeResponse()

        if request.input_data.image.format == image_pb2.Image.FORMAT_RAW:
            pil_image = Image.open(io.BytesIO(request.input_data.image.data))
            pil_image = ndimage.rotate(pil_image, 0)
            if request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_GREYSCALE_U8:
                image = cv2.cvtColor(pil_image, cv2.COLOR_GRAY2RGB)  # Converted to RGB for Tensorflow
            elif request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGB_U8:
                # Already in the correct format
                image = pil_image
            else:
                print('Error: image input in unsupported pixel format: ', request.input_data.image.pixel_format)
                RESPONSE_QUEUE.put(out_proto)
                return
        elif request.input_data.image.format == image_pb2.Image.FORMAT_JPEG:
            dtype = np.uint8
            jpg = np.frombuffer(request.input_data.image.data, dtype=dtype)
            image = cv2.imdecode(jpg, -1)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            if len(image.shape) < 3:
                # Single channel image, convert to RGB.
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)


        print('')
        print('Starting model eval...')
        # Run the model on the image
        keras_image = preprocess_image(image)
        (keras_image, scale) = resize_image(keras_image)
        keras_image = np.expand_dims(keras_image, axis=0)
        start = timer()
        boxes, scores, classes = this_model.predict(keras_image)
        end = timer()
        print("Model eval took " + str(end - start) + ' seconds')

        boxes /= scale

        # Package detections into the output proto
        num_objects = 0
        boxes = boxes[0]
        scores = scores[0]
        classes = classes[0]

        # Only use the detection w/ the highest confidence for Sensor pointing features
        max_conf = max(scores)

        print(f"Max conf was {max_conf}")
        
        for i in range(len(boxes)):
            # Skip if not the Fire Extinguisher class
            if classes[i] != 7: continue

            label = this_model.labels[(classes[i])]
            box = boxes[i]
            score = scores[i]

            if score < max_conf:
                # scores are sorted so we can break
                continue

            num_objects += 1

            #draw_box(draw, b, color=color)
            print('Found object with label: "' + label + '" and score: ' + str(score))

            print(f"Box is {box}")

            point1 = np.array([box[0], box[1]])
            point2 = np.array([box[0], box[3]])
            point3 = np.array([box[2], box[3]])
            point4 = np.array([box[2], box[1]])

            # Add data to the output proto.
            out_obj = out_proto.object_in_image.add()
            out_obj.name = label

            vertex1 = out_obj.image_properties.coordinates.vertexes.add()
            vertex1.x = point1[0]
            vertex1.y = point1[1]

            vertex2 = out_obj.image_properties.coordinates.vertexes.add()
            vertex2.x = point2[0]
            vertex2.y = point2[1]

            vertex3 = out_obj.image_properties.coordinates.vertexes.add()
            vertex3.x = point3[0]
            vertex3.y = point3[1]

            vertex4 = out_obj.image_properties.coordinates.vertexes.add()
            vertex4.x = point4[0]
            vertex4.y = point4[1]

            # Pack the confidence value.
            confidence = wrappers_pb2.FloatValue(value=score)
            out_obj.additional_properties.Pack(confidence)

            if not self.options.no_debug:

                polygon = np.array([point1, point2, point3, point4], np.int32)
                polygon = polygon.reshape((-1, 1, 2))
                cv2.polylines(image, [polygon], True, (0, 255, 0), 2)

                caption = "{}: {:.3f}".format(label, score)
                left_x = min(point1[0], min(point2[0], min(point3[0], point4[0])))
                top_y = min(point1[1], min(point2[1], min(point3[1], point4[1])))
                cv2.putText(image, caption, (int(left_x), int(top_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 2)

        print('Found ' + str(num_objects) + ' object(s)')

        if not self.options.no_debug:
            debug_image_filename = 'retinanet_server_output.jpg'
            cv2.imwrite(debug_image_filename, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
            print('Wrote debug image output to: "' + debug_image_filename + '"')

        # Pack all the outputs up and send them back.
        self.out_queue.put(out_proto)
Beispiel #4
0
def process_images(options, model_extension):
    """Starts Tensorflow and detects objects in the incoming images.
    """
    models = {}
    for f in os.listdir(options.model_dir):
        if f in models:
            print('Warning: duplicate model name of "' + f +
                  '", ignoring second model.')
            continue

        path = os.path.join(options.model_dir, f)
        if os.path.isfile(path) and path.endswith(model_extension):
            model_name = ''.join(f.rsplit(model_extension,
                                          1))  # remove the extension

            # reverse replace the extension
            labels_file = '.csv'.join(f.rsplit(model_extension, 1))
            labels_path = os.path.join(options.model_dir, labels_file)

            if not os.path.isfile(labels_path):
                labels_path = None
            models[model_name] = (TensorflowModel(path, labels_path))

    # Tensorflow prints out a bunch of stuff, so print out useful data here after a space.
    print('')
    print('Running on port: ' + str(options.port))
    print('Loaded models:')
    for model_name in models:
        if models[model_name].labels is not None:
            labels_str = 'yes'
        else:
            labels_str = 'no'
        print('    ' + model_name + ' (loaded labels: ' + labels_str + ')')

    while True:
        request = REQUEST_QUEUE.get()

        if isinstance(request,
                      network_compute_bridge_pb2.ListAvailableModelsRequest):
            out_proto = network_compute_bridge_pb2.ListAvailableModelsResponse(
            )
            for f in models:
                out_proto.available_models.append(f)
            RESPONSE_QUEUE.put(out_proto)
            continue
        else:
            out_proto = network_compute_bridge_pb2.NetworkComputeResponse()

        # Find the model
        if request.input_data.model_name not in models:
            err_str = 'Cannot find model "' + request.input_data.model_name + '" in loaded models.'
            print(err_str)

            # Set the error in the header.
            out_proto.header.error.code = header_pb2.CommonError.CODE_INVALID_REQUEST
            out_proto.header.error.message = err_str
            RESPONSE_QUEUE.put(out_proto)
            continue

        model = models[request.input_data.model_name]

        if request.input_data.image.format == image_pb2.Image.FORMAT_RAW:
            pil_image = Image.open(io.BytesIO(request.input_data.image.data))
            pil_image = ndimage.rotate(pil_image, 0)
            if request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_GREYSCALE_U8:
                image = cv2.cvtColor(
                    pil_image,
                    cv2.COLOR_GRAY2RGB)  # Converted to RGB for Tensorflow
            elif request.input_data.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGB_U8:
                # Already in the correct format
                image = pil_image
            else:
                err_str = 'Error: image input in unsupported pixel format: ', request.input_data.image.pixel_format
                print(err_str)

                # Set the error in the header.
                out_proto.header.error.code = header_pb2.CommonError.CODE_INVALID_REQUEST
                out_proto.header.error.message = err_str
                RESPONSE_QUEUE.put(out_proto)
                continue
        elif request.input_data.image.format == image_pb2.Image.FORMAT_JPEG:
            dtype = np.uint8
            jpg = np.frombuffer(request.input_data.image.data, dtype=dtype)
            image = cv2.imdecode(jpg, -1)

            if len(image.shape) < 3:
                # Single channel image, convert to RGB.
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)

        boxes, scores, classes, _ = model.predict(image)

        num_objects = 0

        for i in range(len(boxes)):

            label = str(classes[i])
            box = boxes[i]
            score = scores[i]

            if score < request.input_data.min_confidence:
                # scores are sorted so we can break
                break

            num_objects += 1

            #draw_box(draw, b, color=color)
            print('Found object with label: "' + label + '" and score: ' +
                  str(score))

            point1 = np.array([box[1], box[0]])
            point2 = np.array([box[3], box[0]])
            point3 = np.array([box[3], box[2]])
            point4 = np.array([box[1], box[2]])

            # Add data to the output proto.
            out_obj = out_proto.object_in_image.add()
            out_obj.name = "obj" + str(num_objects) + "_label_" + label

            vertex1 = out_obj.image_properties.coordinates.vertexes.add()
            vertex1.x = point1[0]
            vertex1.y = point1[1]

            vertex2 = out_obj.image_properties.coordinates.vertexes.add()
            vertex2.x = point2[0]
            vertex2.y = point2[1]

            vertex3 = out_obj.image_properties.coordinates.vertexes.add()
            vertex3.x = point3[0]
            vertex3.y = point3[1]

            vertex4 = out_obj.image_properties.coordinates.vertexes.add()
            vertex4.x = point4[0]
            vertex4.y = point4[1]

            # Pack the confidence value.
            confidence = wrappers_pb2.FloatValue(value=score)
            out_obj.additional_properties.Pack(confidence)

            if not options.no_debug:

                polygon = np.array([point1, point2, point3, point4], np.int32)
                polygon = polygon.reshape((-1, 1, 2))
                cv2.polylines(image, [polygon], True, (0, 255, 0), 2)

                caption = "{}: {:.3f}".format(label, score)
                left_x = min(point1[0],
                             min(point2[0], min(point3[0], point4[0])))
                top_y = min(point1[1], min(point2[1],
                                           min(point3[1], point4[1])))
                cv2.putText(image, caption, (int(left_x), int(top_y)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        print('Found ' + str(num_objects) + ' object(s)')

        if not options.no_debug:
            debug_image_filename = 'tensorflow_server_output.jpg'
            cv2.imwrite(debug_image_filename, image)
            print('Wrote debug image output to: "' + debug_image_filename +
                  '"')

        RESPONSE_QUEUE.put(out_proto)