Exemple #1
0
    def segmenting_images(self):
        self.log.info('Loading model...')
        segment_image = custom_segmentation()
        segment_image.inferConfig(num_classes=1, class_names=["PigFace"], detection_threshold=0.95)
        # segment_image.load_model("model/model.h5")
        segment_image.load_model("../model/mask_rcnn_model.036-0.139239.h5")

        # segment_image.segmentImage("1.png", "../app/upload/1.png" , show_bboxes=True, output_image_name="../output/1.png", verbose=True)

        dir_path = project_config.image_upload_dir_path
        output_path = project_config.output_dir_path
        while True:
            self.log.info('Ready to upload directory', datetime.now())
            i = 0
            files = [x for x in os.listdir(dir_path) if (x.endswith('.jpg') or x.endswith('.JPG') or x.endswith('.png') or x.endswith('.PNG'))]

            for imageFullFileName in files:
                image_file_name = os.path.basename(imageFullFileName)
                self.log.info('Image filename: ', image_file_name)
                start_time = datetime.now()
                try:
                    segment_image.segmentImage(image_file_name, dir_path + r"/" + image_file_name, show_bboxes=True, output_image_name=output_path + r"/" + image_file_name)
                except:
                    self.log.info('Image filename: ', image_file_name)
                os.remove(os.path.join(dir_path, imageFullFileName))
                end_time = datetime.now()
                diff = (end_time-start_time).microseconds / 1000
                self.log.info('Elapsed time for Segmentation: ', '%.2gs' % diff)
            time.sleep(0.2)
Exemple #2
0
def annotateSingle(input_path, output_path):
  """
  Annotates a single image

  Args:
    input_path: The raw image file
    output_path: The file where the annotated image should be stored
  """
  segment_image = custom_segmentation()
  segment_image.inferConfig(num_classes= 1, class_names= ["BG", ""])
  img = cv2.imread(input_path)
  h, w, colors = img.shape
  grayscale_matrix = np.zeros((h, w))
  for y in range(h):
    for x in range(w):
      rgb_values = img[y,x]
      grayscale_matrix[y,x] = np.min(rgb_values)
  if np.mean(grayscale_matrix) < 255/2:
    model = DARK_MODEL
  else:
    model = LIGHT_MODEL

  segment_image.load_model(model)
  segment_image.segmentImage(input_path, show_bboxes=False, output_image_name=output_path,
  extract_segmented_objects= False, save_extracted_objects=False)
 def __init__(self,
              resolution=(720, 800),
              model_path=os.path.join(p().SEG_MODELS, 'D.h5'),
              intrinsics='1280_720_color'):
     self.master = custom_segmentation()
     self.master.inferConfig(num_classes=1, class_names=["BG", "mh5"])
     self.master.load_model(model_path)
     self.crop_resolution = resolution
     self.intrinsics = proj.makeIntrinsics(intrinsics)
Exemple #4
0
def annotateFolder(input_path, output_path):
  """
  Annotates an entire folder of worm images and stores it in a new folder
  Uses LIGHT_MODEL and DARK_MODEL based on the average shade of each image.

  Args:
    input_path: The folder location storing the images relative to the python file
    output_path: The folder location to store the annotated images
  """

  all_image = os.listdir(input_path)
  os.mkdir(output_path)
  segment_light = custom_segmentation()
  segment_light.inferConfig(num_classes= 1, class_names= ["BG", ""])
  segment_light.load_model(LIGHT_MODEL)

  segment_dark = custom_segmentation()
  segment_dark.inferConfig(num_classes= 1, class_names= ["BG", ""])
  segment_dark.load_model(DARK_MODEL)

  for item in all_image:
    img = cv2.imread(input_path + "/"+item)
    h, w, colors = img.shape
    grayscale_matrix = np.zeros((h, w))
    for y in range(h):
      for x in range(w):
        rgb_values = img[y,x]
        grayscale_matrix[y,x] = np.min(rgb_values)
    if np.mean(grayscale_matrix) > 255/2:
      print("Light")
      segment_light.segmentImage(input_path+"/"+item, show_bboxes=False, output_image_name=output_path+"/Annotated_"+item,
      extract_segmented_objects= False, save_extracted_objects=False)
    else:
      print("Dark")
      segment_dark.segmentImage(input_path+"/"+item, show_bboxes=False, output_image_name=output_path+"/Annotated_"+item,
      extract_segmented_objects= False, save_extracted_objects=False)
Exemple #5
0
import pixellib
from pixellib.instance import custom_segmentation

test_video = custom_segmentation()
test_video.inferConfig(num_classes=7,
                       class_names=[
                           "BG", "base_link", "link_s", "link_l", "link_u",
                           "link_r", "link_b"
                       ])
test_video.load_model("models/segmentation/multi/A.h5")
test_video.process_video("data/set10/og_vid.avi",
                         show_bboxes=True,
                         output_video_name="output/multiseg_test.avi",
                         frames_per_second=15)
Exemple #6
0
import pixellib
from pixellib.instance import custom_segmentation
import cv2

capture = cv2.VideoCapture(0)

segment_camera = custom_segmentation()
segment_camera.inferConfig(num_classes=2,
                           class_names=["BG", "butterfly", "squirrel"])
segment_camera.load_model("Nature_model_resnet101.h5")
segment_camera.process_camera(capture,
                              frames_per_second=10,
                              output_video_name="output_video.mp4",
                              show_frames=True,
                              frame_name="frame")
Exemple #7
0
# in this lesson we would learn how to use our model , and detect sevral objects in an image
# Moreover , we will extract the object to seperated images

import pixellib
import cv2
import numpy as np
from pixellib.instance import custom_segmentation

segment_image = custom_segmentation()
segment_image.inferConfig(network_backbone="resnet101",
                          num_classes=3,
                          class_names=["BG", "Banana", "Apple", "Tomato"])
segment_image.load_model("c:/models/eval/mask_rcnn_model.032-0.200773.h5")

segmask, output = segment_image.segmentImage(
    "C:/GitHub/Object-Detection/Pixellib/moreThanOneApple.jpg",
    show_bboxes=True,
    extract_segmented_objects=True,
    save_extracted_objects=True,
    output_image_name=
    "C:/GitHub/Object-Detection/Pixellib/moreThanOneAppleOut.jpg")

# lets see the object
#print(segmask)

# We need this section : extracted_objects

res = segmask["extracted_objects"]
#firstImage = res[0]
#firstImageArr = np.uint8(firstImage)