Пример #1
0
 def show_inference(self, model, image_path):
     # the array based representation of the image will be used later in order to prepare the
     # result image with boxes and labels on it.
     image_np = np.array(Image.open(image_path))
     # Actual detection.
     output_dict = self.run_inference_for_single_image(model, image_np)
     # print(output_dict)
     # Visualization of the results of a detection.
     vis_util.visualize_boxes_and_labels_on_image_array(
         image_np,
         output_dict['detection_boxes'],
         output_dict['detection_classes'],
         output_dict['detection_scores'],
         self.category_index,
         instance_masks=output_dict.get('detection_masks_reframed', None),
         use_normalized_coordinates=True,
         line_thickness=8)
     scores = output_dict['detection_scores']
     classes = output_dict['detection_classes']
     final_score = np.squeeze(scores)
     count = 0
     for i in range(len(scores)):
         if len(scores) == 1:
             if final_score > 0.5 and classes[i] == 1:
                 count = count + 1
         elif scores is None or final_score[i] > 0.5 and classes[i] == 1:
             count = count + 1
     print(count)
     self.count_person.append(count)
     display(Image.fromarray(image_np))
Пример #2
0
    def get_classification(self, img):
        # Bounding Box Detection.
        try:
            with self.detection_graph.as_default():
                category_index = self.labelify()
                # Expand dimension since the model expects image to have shape [1, None, None, 3].
                img_expanded = np.expand_dims(img, axis=0)
                (boxes, scores, classes, num) = self.sess.run(
                    [self.d_boxes, self.d_scores, self.d_classes, self.num_d],
                    feed_dict={self.image_tensor: img_expanded})

                vis_util.visualize_boxes_and_labels_on_image_array(
                    img,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=8)

                boxes = self.detection_graph.get_tensor_by_name(
                    'detection_boxes:0')
                scores = self.detection_graph.get_tensor_by_name(
                    'detection_scores:0')
                classes = self.detection_graph.get_tensor_by_name(
                    'detection_classes:0')
                num_detections = self.detection_graph.get_tensor_by_name(
                    'num_detections:0')
                return img
        except Exception as e:
            print("Exception during classification", e)
        return
    def predict_fun(self):
        image = Image.open(
            os.path.join(self.folder_path, self.files[self.n_curr]))
        image_np = self.load_image_into_array(image)

        image_np_expand = np.expand_dims(image_np, axis=0)

        output_dict = self.run_inference_for_single_image(image_np)
        print(self.filter_class)
        if (self.filter_class != None):
            output_dict['detection_boxes'] = np.squeeze(
                output_dict['detection_boxes'])
            output_dict['detection_classes'] = np.squeeze(
                output_dict['detection_classes'])
            output_dict['detection_scores'] = np.squeeze(
                output_dict['detection_scores'])
            indices = 1
            if (self.filter_class == 'Person'):
                indices = np.argwhere(output_dict['detection_classes'] == 1)
            elif (self.filter_class == 'Dog'):
                indices = np.argwhere(output_dict['detection_classes'] == 18)

            elif (self.filter_class == 'Cat'):
                indices = np.argwhere(output_dict['detection_classes'] == 17)
            elif (self.filter_class == 'Bottle'):
                indices = np.argwhere(output_dict['detection_classes'] == 44)
            elif (self.filter_class == 'Chair'):
                indices = np.argwhere(output_dict['detection_classes'] == 62)
            print(indices)
            output_dict['detection_boxes'] = np.squeeze(
                output_dict['detection_boxes'][indices])
            output_dict['detection_classes'] = np.squeeze(
                output_dict['detection_classes'][indices])
            output_dict['detection_scores'] = np.squeeze(
                output_dict['detection_scores'][indices])

        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            self.category_index[0],
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            min_score_thresh=self.threshold,
            line_thickness=8)
        fig = plt.figure(figsize=(7, 5), frameon=False)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        ax.imshow(image_np, aspect='auto')
        fig.savefig("output.png", bbox_inches='tight', dpi=fig.dpi)
        self.pixmap = QPixmap('output.png')
        self.labelImage.setPixmap(self.pixmap)
        print("done")
def show_inference(model, image_np):
    #image_np = np.array(Image.open(image_path))
    output_dict = run_inference_for_single_image(model, image_np)
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks_reframes', None),
        use_normalized_coordinates=True,
        line_thickness=8)
    cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
Пример #5
0
def show_inference(model, image_path):
    image_np = np.array(Image.open(image_path))
    output_dict = run_inference_for_single_image(model, image_np)
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks_reframes', None),
        use_normalized_coordinates=True,
        line_thickness=8)
    plt.imshow(image_np)
    plt.axis('off')
    plt.savefig("./ProcessedImages/Processed_" +
                str(image_path).split("/")[-1] + ".jpg")
Пример #6
0
def test_detection(tfrecords_filename, tfrecords_num, detect_fn):
    image_tensors = tensors_from_tfrecord(tfrecords_filename,
                                          tfrecords_num,
                                          dtype=tf.uint8)

    for image_tensor in image_tensors:
        image_np = image_tensor.numpy()

        # The model expects a batch of images, so add an axis with `tf.newaxis`.
        input_tensor = tf.expand_dims(image_tensor, 0)

        detections = detect_fn(input_tensor)

        # All outputs are batches tensors.
        # Convert to numpy arrays, and take index [0] to remove the batch dimension.
        # We're only interested in the first num_detections.
        num_detections = int(detections.pop('num_detections'))

        detections = {
            key: value[0, :num_detections].numpy()
            for key, value in detections.items()
        }
        detections['num_detections'] = num_detections

        # detection_classes should be ints.
        detections['detection_classes'] = (
            detections['detection_classes'].astype(np.int64))

        image_np_with_detections = image_np.astype(int).copy()

        visualization_utils.visualize_boxes_and_labels_on_image_array(
            image_np_with_detections,
            detections['detection_boxes'],
            detections['detection_classes'],
            detections['detection_scores'],
            category_index,
            use_normalized_coordinates=True,
            max_boxes_to_draw=100,
            min_score_thresh=.3,
            agnostic_mode=False)

        plt.figure(figsize=(8, 8))
        plt.imshow(image_np_with_detections)

    plt.show()
Пример #7
0
def gen():

    myrtmp_addr = "rtmp://localhost/live/indycar live=1"
    cap = cv2.VideoCapture(myrtmp_addr)

    while True:
        ret, frame = cap.read()
        if not ret:
            print('Input source error!')
            break

        image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)

        # Actual detection.
        output_dict = sess.run(tensor_dict,
                               feed_dict={image_tensor: image_np_expanded})

        # all outputs are float32 numpy arrays, so convert types as appropriate
        output_dict['num_detections'] = int(output_dict['num_detections'][0])
        output_dict['detection_classes'] = output_dict['detection_classes'][
            0].astype(np.uint8)
        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
        output_dict['detection_scores'] = output_dict['detection_scores'][0]
        if 'detection_masks' in output_dict:
            output_dict['detection_masks'] = output_dict['detection_masks'][0]

# Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            category_index,
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            line_thickness=8)

        frame = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
        ret, jpeg = cv2.imencode('.jpg', frame)
        frame = jpeg.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def predict(file, filename):
    PATH_TO_IMAGE = file
    NUM_CLASSES = 4
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
        sess = tf.Session(graph=detection_graph)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name(
        'detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    image = cv2.imread(PATH_TO_IMAGE)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_expanded = np.expand_dims(image_rgb, axis=0)

    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_expanded})
    vis_util.visualize_boxes_and_labels_on_image_array(
        image,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8,
        min_score_thresh=0.60)
    cv2.imwrite('uploads/' + filename + 'result.jpg', image)
    return 0
Пример #9
0
def visualize_detections(image_np, detections, category_index):
    image_np_with_detections = image_np.copy()
    #labels_path = download_labels('mscoco_label_map.pbtxt')
    #category_index = label_map_util.create_category_index_from_labelmap(labels_path, use_display_name=True)

    boxes = np.asarray(detections["detection_boxes"][0])
    classes = np.asarray(detections["detection_classes"][0]).astype(np.int64)
    scores = np.asarray(detections["detection_scores"][0])
    mask = np.asarray(detections["detection_masks_reframed"])

    # Visualizing the results
    vis_utils.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections,
        boxes,
        classes,
        scores,
        category_index,
        instance_masks=mask,
        use_normalized_coordinates=True,
        line_thickness=3)

    return image_np_with_detections
Пример #10
0
def print_image(image_path, IMAGE_SIZE, detection_graph, category_index):
  image = Image.open(image_path)
  # the array based representation of the image will be used later in order to prepare the
  # result image with boxes and labels on it.
  image_np = load_image_into_numpy_array(image)
  # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  image_np_expanded = np.expand_dims(image_np, axis=0)
  # Actual detection.
  output_dict = run_inference_for_single_image(image_np, detection_graph)
  # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
      image_np,
      output_dict['detection_boxes'],
      output_dict['detection_classes'],
      output_dict['detection_scores'],
      category_index,
      instance_masks=output_dict.get('detection_masks'),
      use_normalized_coordinates=True,
      line_thickness=8)
  # IMAGE_SIZE == tuple | Size, in inches, of the output images.
  plt.figure(figsize=IMAGE_SIZE)
  plt.imshow(image_np)
def test_object_detection():
    # Create the label/category maps for drawing the bbox objects.
    label_map = label_map_util.load_labelmap(config.PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=config.NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Load the model.
    model = odl.load_model()

    # Get test images.
    test_image = get_test_images()[TEST_IMAGE]

    # Feed the model and the first images to the inference runner.
    output_dict = odl.run_inference_for_single_image(test_image, model)

    # Log the detections.
    objs = set()
    scores = output_dict['detection_scores']
    classes = output_dict['detection_classes']
    for i in range(len(scores)):
        score = scores[i]
        c = classes[i]
        if score >= 0.2:
            objs.add(category_index[c]['name'])
    print("Objects detected: %s" % str(objs))

    # Save the output.
    vis_util.visualize_boxes_and_labels_on_image_array(
        test_image,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks'),
        use_normalized_coordinates=True,
        line_thickness=8,
        min_score_thresh=0.4)
    vis_util.save_image_array_as_png(test_image, 'test_screens/test_image.png')
Пример #12
0
def show_inference(model, image_path):
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    image_np = np.array(PIL.Image.open(image_path))
    # image_np = np.array(PIL.Image.open("C:/Users/dell/Desktop/images.jpg"))
    # Actual detection.
    output_dict = run_inference_for_single_image(model, image_np)
    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks_reframed', None),
        use_normalized_coordinates=True,
        line_thickness=5)

    # display(Image.fromarray(image_np)) #Bunu aşağıdaki kodla değiştirdim.
    img = Image.fromarray(image_np, "RGB")
    img.show()
    print("esra")
def print_image(image_path, IMAGE_SIZE, detection_graph, category_index):
    '''DOCSTRING
       Displays the image using matplotlib with bounding boxes and the class in
       the image.

       #todo: come back and resize the detecion class displayed on the image
       --------------
       INPUTS:
       image_path: the path to the image you want to load
       IMAGE_SIZE: the desired output size
       detection_graph: the detection graph of the model you want to use to
                        predict on the image
       category_index: the category_index that will be used to go from class id
                       to class name
       --------------
       Returns:
       None, but it does print a pretty image
    '''
    image = Image.open(image_path)
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    image_np = load_image_into_numpy_array(image)
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    # Actual detection.
    output_dict = run_inference_for_single_image(image_np, detection_graph)
    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks'),
        use_normalized_coordinates=True,
        line_thickness=8)
    # IMAGE_SIZE == tuple | Size, in inches, of the output images.
    plt.figure(figsize=IMAGE_SIZE)
    plt.imshow(image_np)
Пример #14
0
def detect_objects(image_np, sess, detection_graph, categories_to_detect=[]):
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Each box represents a part of the image where a particular object was detected.
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Actual detection.
    (boxes, scores, classes,
     num_detections) = sess.run([boxes, scores, classes, num_detections],
                                feed_dict={image_tensor: image_np_expanded})

    detected = dict()
    for i in categories_to_detect:
        detected[i] = 0
        for j in range(len(scores)):
            detected[i] = max(detected[i], scores[0][i])

    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8)

    return image_np, detected
Пример #15
0
def detect_object():
	# start = time.time()
	data = request.data.decode("utf-8")
	# return data
	# params = json.loads(data)
	imgdata = base64.b64decode(data)
	print (len(imgdata))

	# start = time.time()
	# detection_graph = tf.Graph()
	# with detection_graph.as_default():
	#     od_graph_def = tf.GraphDef()
	#     with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
	# 	    serialized_graph = fid.read()
	# 	    od_graph_def.ParseFromString(serialized_graph)
	# 	    tf.import_graph_def(od_graph_def, name='')
	# print (time.time() - start)
	with detection_graph.as_default():
	  with tf.Session(graph=detection_graph) as sess:
	    image = Image.open(io.BytesIO(imgdata))
	    image_np = load_image_into_numpy_array(image)
	    # Definite input and output Tensors for detection_graph
	    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
	    # Each box represents a part of the image where a particular object was detected.
	    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
	    # Each score represent how level of confidence for each of the objects.
	    # Score is shown on the result image, together with the class label.
	    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
	    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
	    num_detections = detection_graph.get_tensor_by_name('num_detections:0')
	    # for image_path in TEST_IMAGE_PATHS:
	    # image = Image.open(image_path)
	    # the array based representation of the image will be used later in order to prepare the
	    # result image with boxes and labels on it.
	    # image_np = load_image_into_numpy_array(image)
	      # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
	    image_np_expanded = np.expand_dims(image_np, axis=0)
	      # Actual detection.
	    (boxes, scores, classes, num) = sess.run(
	          [detection_boxes, detection_scores, detection_classes, num_detections],
	          feed_dict={image_tensor: image_np_expanded})
	      # Visualization of the results of a detection.
	    vis_util.visualize_boxes_and_labels_on_image_array(
	          image_np,
	          np.squeeze(boxes),
	          np.squeeze(classes).astype(np.int32),
	          np.squeeze(scores),
	          category_index,
	          use_normalized_coordinates=True,
	          line_thickness=8)
	      
	      #json_data =  json.dumps({'image': image_np.tolist()})

	      # with open('/Users/ashishyadav/Desktop/out.txt', 'w') as outfile:
	      # 	json.dump(json_data, outfile)

	      #return json_data
	    # print (image_np.tobytes())
	    im = Image.fromarray(image_np)
	    imbyte = io.BytesIO()
	    im.save(imbyte, format='PNG')
	    return base64.b64encode(imbyte.getvalue())
Пример #16
0
    class_dict[entry.id] = {'name': entry.name}

raw_dataset = list(tf.data.TFRecordDataset(config.TRAIN_RECORD))
random.shuffle(raw_dataset)

fig, ax = plt.subplots(2, 5, figsize=(15, 5))

for i, raw_record in enumerate(raw_dataset[:10]):

    example = decoder.decode(raw_record)

    image = example['image'].numpy()
    boxes = example['groundtruth_boxes'].numpy()
    classes = example['groundtruth_classes'].numpy()

    scores = np.ones(boxes.shape[0])

    visualization_utils.visualize_boxes_and_labels_on_image_array(
        image,
        boxes,
        classes,
        scores,
        class_dict,
        max_boxes_to_draw=None,
        use_normalized_coordinates=True)

    ax.flat[i].axis('off')
    ax.flat[i].imshow(image)

fig.tight_layout()
Пример #17
0
    def get_localization(self, image, visual=False):
        """Determines the locations of the cars in the image
        Args:
            image: camera image

        Returns:
            list of bounding boxes: coordinates [y_up, x_left, y_down, x_right]

        """
        category_index = {
            1: {
                'id': 1,
                'name': u'person'
            },
            2: {
                'id': 2,
                'name': u'bicycle'
            },
            3: {
                'id': 3,
                'name': u'car'
            },
            4: {
                'id': 4,
                'name': u'motorcycle'
            },
            5: {
                'id': 5,
                'name': u'airplane'
            },
            6: {
                'id': 6,
                'name': u'bus'
            },
            7: {
                'id': 7,
                'name': u'train'
            },
            8: {
                'id': 8,
                'name': u'truck'
            },
            9: {
                'id': 9,
                'name': u'boat'
            },
            10: {
                'id': 10,
                'name': u'traffic light'
            },
            11: {
                'id': 11,
                'name': u'fire hydrant'
            },
            13: {
                'id': 13,
                'name': u'stop sign'
            },
            14: {
                'id': 14,
                'name': u'parking meter'
            }
        }

        with self.detection_graph.as_default():
            image_expanded = np.expand_dims(image, axis=0)
            (boxes, scores, classes, num_detections) = self.sess.run(
                [self.boxes, self.scores, self.classes, self.num_detections],
                feed_dict={self.image_tensor: image_expanded})

            if visual == True:
                vis_utils.visualize_boxes_and_labels_on_image_array(
                    image,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    min_score_thresh=.4,
                    line_thickness=3)

                plt.figure(figsize=(9, 6))
                plt.imshow(image)
                plt.show()

            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes)
            scores = np.squeeze(scores)

            cls = classes.tolist()

            # The ID for car in COCO data set is 3
            idx_vec = [
                i for i, v in enumerate(cls)
                if ((v == 3) and (scores[i] > 0.3))
            ]

            if len(idx_vec) == 0:
                print('no detection!')
                self.car_boxes = []
            else:
                tmp_car_boxes = []
                for idx in idx_vec:
                    dim = image.shape[0:2]
                    box = self.box_normal_to_pixel(boxes[idx], dim)
                    box_h = box[2] - box[0]
                    box_w = box[3] - box[1]
                    ratio = box_h / (box_w + 0.01)

                    if ((ratio < 0.8) and (box_h > 20) and (box_w > 20)):
                        tmp_car_boxes.append(box)
                        print(box, ', confidence: ', scores[idx], 'ratio:',
                              ratio)

                    else:
                        print('wrong ratio or wrong size, ', box,
                              ', confidence: ', scores[idx], 'ratio:', ratio)

                self.car_boxes = tmp_car_boxes

        return self.car_boxes
Пример #18
0
            # Extract detection scores
            scores = detection_graph.get_tensor_by_name('detection_scores:0')
            # Extract detection classes
            classes = detection_graph.get_tensor_by_name('detection_classes:0')
            # Extract number of detectionsd
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')
            # Actual detection.
            (boxes, scores, classes, num_detections) = sess.run(
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            # Visualization of the results of a detection.
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_np,
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                use_normalized_coordinates=True,
                line_thickness=8)

            out.write(image_np)

            # Display output
            cv2.imshow('object detection', image_np)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

# Clean up
cap.release()
out.release()
Пример #19
0
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key] = tf.compat.v1.get_default_graph(
                ).get_tensor_by_name(tensor_name)

        while True:

            ret, image_np = cap.read()
            # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
            image_np_expanded = np.expand_dims(image_np, axis=0)
            # Actual detection.
            output_dict = run_inference_for_single_image(
                image_np, detection_graph)
            # Visualization of the results of a detection.
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_np,
                output_dict['detection_boxes'],
                output_dict['detection_classes'],
                output_dict['detection_scores'],
                category_index,
                instance_masks=output_dict.get('detection_masks'),
                use_normalized_coordinates=True,
                line_thickness=8)
            cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cap.release()
                cv2.destroyAllWindows()
                break

        # printscreen_extended = np.expand_dims(cap, axis=0)
Пример #20
0
 def Detect(self):
     # Get frame, frame is given in 3d array of RGB values
     ret1, frame1 = self.video1.read()
     if not ret1: return None # Exit with empty values if 
     frame1 = imutils.rotate(frame1, angle=180)
     ret2, frame2 = self.video2.read()
     if not ret2: return None # Exit with empty values if 
     frame2 = imutils.rotate(frame2, angle=90)
     
     # Map colors on image for openCV
     frame_rgb1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
     frame_expanded1 = np.expand_dims(frame_rgb1, axis=0)
     frame_rgb2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
     frame_expanded2 = np.expand_dims(frame_rgb2, axis=0)
 
     # This is the part with the actual detection
     (boxes1, scores1, classes1, num1) = self.sess.run(
         [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
         feed_dict={self.image_tensor: frame_expanded1})
     (boxes2, scores2, classes2, num2) = self.sess.run(
         [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
         feed_dict={self.image_tensor: frame_expanded2})
     
     # Define predicted class from the highest confidence guess of the image
     predicted_class1 = self.category_index[int(classes1[0][0])]['name']
     try:
         predicted_class2 = self.category_index[int(classes2[0][0])]['name']
     except KeyError:
         print("Error: ", classes2[0][0], "when should be in: ", self.category_index)
         raise KeyError
     
     results = {"video1": {"boxes": boxes1[0][0].tolist(), "scores": scores1[0][0], "classes": predicted_class1},
                "video2": {"boxes": boxes2[0][0].tolist(), "scores": scores2[0][0], "classes": predicted_class2},
                "hand_val": predicted_class1}
     #print(category_index)
 
     # Draw the bounding box and prediction
     vis_util.visualize_boxes_and_labels_on_image_array(
         frame1,
         np.squeeze(boxes1),
         np.squeeze(classes1).astype(np.int32),
         np.squeeze(scores1),
         self.category_index,
         use_normalized_coordinates=True,
         line_thickness=8,
         min_score_thresh=0.60)
     
     # Draw the bounding box and prediction
     vis_util.visualize_boxes_and_labels_on_image_array(
         frame2,
         np.squeeze(boxes2),
         np.squeeze(classes2).astype(np.int32),
         np.squeeze(scores2),
         self.category_index,
         use_normalized_coordinates=True,
         line_thickness=8,
         min_score_thresh=0.60)
     
     print("going to coors")
     #results["coors"] = self.cclass.Filter(self.cclass.GetCoors(boxes1[0][0], boxes2[0][0]))
     results["coors"] = self.cclass.GetCoors(boxes1[0][0], boxes2[0][0])
     print("done")
     print(results)
     
     #time.sleep(DELAY)
     
     if self.Verbose:
         text1 = "Top View Values: {} , {} , {}".format(results["coors"][0], results["coors"][1], results["hand_val"])
         text2 = "Side View Values: {}".format(results["coors"][2])
         frame1 = cv2.putText(frame1, text1, org, font,  
                fontScale, color, thickness, cv2.LINE_AA) 
         frame2 = cv2.putText(frame2, text2, org, font,  
                fontScale, color, thickness, cv2.LINE_AA) 
         # cv2.imshow("Obj Detect 1: {} , {} , {} , {}".format(results["coors"][0], results["coors"][1], results["coors"][2], results["hand_val"]), frame1)
         # cv2.imshow("Obj Detect 2: {} , {} , {} , {}".format(results["coors"][0], results["coors"][1], results["coors"][2], results["hand_val"]), frame2)
         cv2.imshow("Obj Detect 1", frame1)
         cv2.imshow("Obj Detect 2", frame2)
         if cv2.waitKey(1) == ord('q'):
             return None
     
     return results
         
Пример #21
0
    def browse_mul_image_file(self, _):
        self.selected_image_file = tkFileDialog.askopenfilename(
            initialdir="~/Downloads",
            title="Select file",
            filetypes=(("jpeg files", "*.jpg"), ("all files", "*.*")))
        if self.selected_image_file:
            self.mul_image_file_entry.delete(0, END)
            self.mul_image_file_entry.insert(0, self.selected_image_file)

    #def object_detect_show(self):

        CWD_PATH = os.getcwd()

        #IMAGE_NAME = '000001.jpg'

        PATH_TO_CKPT = os.path.join(CWD_PATH, 'models', 'research',
                                    'object_detection', 'inference_graph',
                                    'frozen_inference_graph.pb')
        # Path to label map file
        PATH_TO_LABELS = os.path.join(CWD_PATH, 'models', 'research',
                                      'object_detection', 'training',
                                      'labelmap.pbtxt')

        # Path to image
        PATH_TO_IMAGE = self.selected_image_file

        NUM_CLASSES = 2

        label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
        categories = label_map_util.convert_label_map_to_categories(
            label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)

        # Load the Tensorflow model into memory.
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

            sess = tf.Session(graph=detection_graph)

        # Define input and output tensors (i.e. data) for the object detection classifier

        # Input tensor is the image
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

        # Output tensors are the detection boxes, scores, and classes
        # Each box represents a part of the image where a particular object was detected
        detection_boxes = detection_graph.get_tensor_by_name(
            'detection_boxes:0')

        # Each score represents level of confidence for each of the objects.
        # The score is shown on the result image, together with the class label.
        detection_scores = detection_graph.get_tensor_by_name(
            'detection_scores:0')
        detection_classes = detection_graph.get_tensor_by_name(
            'detection_classes:0')

        # Number of objects detected
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        # Load image using OpenCV and
        # expand image dimensions to have shape: [1, None, None, 3]
        # i.e. a single-column array, where each item in the column has the pixel RGB value
        image = cv2.imread(PATH_TO_IMAGE)
        image_expanded = np.expand_dims(image, axis=0)

        # Perform the actual detection by running the model with the image as input
        (boxes, scores, classes,
         num) = sess.run([
             detection_boxes, detection_scores, detection_classes,
             num_detections
         ],
                         feed_dict={image_tensor: image_expanded})

        # Draw the results of the detection (aka 'visulaize the results')

        image_detected = vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8,
            min_score_thresh=0.60)
        #image = Image.open(image)
        '''self.tk_image = ImageTk.PhotoImage(image)
        self.main_panel.config(
            width=max(image.width(), 256), height=max(image.width(), 256))
        self.main_panel.create_image(0, 0, image=self.tk_image, anchor=NW)'''

        cv2.imwrite(os.path.join(CWD_PATH, 'waka.jpg'), image)

        image = Image.open(os.path.join(CWD_PATH, 'waka.jpg'))
        self.tk_image = ImageTk.PhotoImage(image)
        self.main_panel.config(width=max(self.tk_image.width(), 256),
                               height=max(self.tk_image.height(), 256))
        self.main_panel.create_image(0, 0, image=self.tk_image, anchor=NW)