示例#1
0
    def _step(self):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # If we're in test mode, just grab the image and
        # push a fake descriptor without trying to use
        # smqtk.
        if not apply_descriptor_test_mode:
            # Get image from conatiner
            in_img = in_img_c.image()

            # convert generic image to PIL image
            pil_image = get_pil_image(in_img)
            pix = np.array(pil_image)

            # get image in acceptable format
            # TBD use in memory transfer
            pil_image.save("file.png")
            test_data = DataFileElement("file.png")

            result = self.generator.compute_descriptor(test_data, self.factory)
            desc_list = result.vector().tolist()

            # push list to output port
            self.push_to_port_using_trait('vector', desc_list)
        else:
            desc_list = 4096 * [0.223]  # Create  fake descriptor in test mode
            self.push_to_port_using_trait('vector', desc_list)

        self._base_step()
示例#2
0
    def _step(self):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # If we're in test mode, just grab the image and
        # push a fake descriptor without trying to use
        # smqtk.
        if not apply_descriptor_test_mode:
            # Get image from conatiner
            in_img = in_img_c.image()


            # convert generic image to PIL image
            pil_image = get_pil_image(in_img)
            pix = np.array(pil_image)

            # get image in acceptable format
            # TBD use in memory transfer
            pil_image.save( "file.png" )
            test_data = DataFileElement("file.png")

            result = self.generator.compute_descriptor(test_data, self.factory)
            desc_list = result.vector().tolist()

            # push list to output port
            self.push_to_port_using_trait( 'vector', desc_list )
        else:
            desc_list =  4096 * [0.223] # Create  fake descriptor in test mode
            self.push_to_port_using_trait('vector', desc_list)

        self._base_step()
示例#3
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # Get image from container
        in_img = in_img_c.image()

        # convert generic image to PIL image
        pil_image = get_pil_image(in_img)

        # draw on the image to prove we can do it
        num = 37
        import PIL.ImageDraw
        draw = PIL.ImageDraw.Draw(pil_image)
        draw.line((0, 0) + pil_image.size, fill=128, width=5)
        draw.line((0, pil_image.size[1], pil_image.size[0], 0),
                  fill=32768,
                  width=5)
        #                 x0   y0   x1       y1
        draw.rectangle([num, num, num + 100, num + 100], outline=125)
        del draw

        new_image = from_pil(pil_image)  # get new image handle
        new_ic = ImageContainer(new_image)

        # push object to output port
        self.push_to_port_using_trait('out_image', new_ic)

        self._base_step()
示例#4
0
    def _step(self):
        print("[DEBUG] ----- start step")

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
        # grab image container from port using traits
        frame_c = self.grab_input_using_trait('image')
        # Get image from container
        frame_in = frame_c.image()
        #convert generic image to PIL
        pil_image = get_pil_image(frame_in)
        #convert to matrix
        frame = np.array(pil_image)

        detected_set = DetectedObjectSet()
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray_frame = cv2.equalizeHist(gray_frame)
        faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
        for (x, y, w, h) in faces:
            bbox = BoundingBox(x, y, x + w, y + h)

            # get new image handle
            #new_ic = ImageContainer( frame )
            dot = DetectedObjectType("face", 1.0)
            detected_set.add(DetectedObject(bbox, 1.0, dot))
        # push object to output port
        self.push_to_port_using_trait('detected_object_set', detected_set)

        self._base_step()
示例#5
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait("image")

        imageHeight = in_img_c.height()
        imageWidth = in_img_c.width()

        if (self.normImageType):
            print("Normalize image")

            in_img = in_img_c.image().asarray().astype("uint16")

            bottom, top = self.get_scaling_values(self.normImageType,
                                                  imageHeight)
            in_img = self.lin_normalize_image(in_img, bottom, top)

            in_img = np.tile(in_img, (1, 1, 3))
        else:
            in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

        self.push_to_port_using_trait("image_norm",
                                      ImageContainer(Image(in_img)))

        startTime = time.time()
        boxes, scores, classes = self.generate_detection(
            self.detection_graph, in_img)
        elapsed = time.time() - startTime
        print("Done running detector in {}".format(
            humanfriendly.format_timespan(elapsed)))

        goodBoxes = []
        detections = DetectedObjectSet()

        for i in range(0, len(scores)):
            if (scores[i] >= self.confidenceThresh):
                bbox = boxes[i]
                goodBoxes.append(bbox)

                topRel = bbox[0]
                leftRel = bbox[1]
                bottomRel = bbox[2]
                rightRel = bbox[3]

                xmin = leftRel * imageWidth
                ymin = topRel * imageHeight
                xmax = rightRel * imageWidth
                ymax = bottomRel * imageHeight

                obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax),
                                     scores[i])
                detections.add(obj)

        print("Detected {}".format(len(goodBoxes)))

        self.push_to_port_using_trait("detected_object_set", detections)

        self._base_step()
示例#6
0
    def detect(self, in_img_c):

        image_height = in_img_c.height()
        image_width = in_img_c.width()

        if (self.norm_image_type and self.norm_image_type != "none"):
            print("Normalizing input image")

            in_img = in_img_c.image().asarray().astype("uint16")

            bottom, top = self.get_scaling_values(self.norm_image_type, in_img,
                                                  image_height)
            in_img = self.lin_normalize_image(in_img, bottom, top)

            in_img = np.tile(in_img, (1, 1, 3))
        else:
            in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

        start_time = time.time()
        boxes, scores, classes = self.generate_detection(
            self.detection_graph, in_img)
        elapsed = time.time() - start_time
        print("Done running detector in {}".format(
            humanfriendly.format_timespan(elapsed)))

        good_boxes = []
        detections = DetectedObjectSet()

        for i in range(0, len(scores)):
            if (scores[i] >= self.confidence_thresh):
                bbox = boxes[i]
                good_boxes.append(bbox)

                top_rel = bbox[0]
                left_rel = bbox[1]
                bottom_rel = bbox[2]
                right_rel = bbox[3]

                xmin = left_rel * image_width
                ymin = top_rel * image_height
                xmax = right_rel * image_width
                ymax = bottom_rel * image_height

                dot = DetectedObjectType(self.category_name, scores[i])
                obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax),
                                     scores[i], dot)
                detections.add(obj)

        print("Detected {}".format(len(good_boxes)))
        return detections
示例#7
0
  def detect( self, in_img_c ):

    import tensorflow as tf
    import humanfriendly

    image_height = in_img_c.height(); image_width = in_img_c.width()

    if (self.norm_image_type and self.norm_image_type != "none"):
      print("Normalizing input image")

      in_img = in_img_c.image().asarray().astype("uint16")

      bottom, top = self.get_scaling_values(self.norm_image_type, in_img, image_height)
      in_img = self.lin_normalize_image(in_img, bottom, top)

      in_img = np.tile(in_img, (1,1,3))
    else:
      in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB"))

    start_time = time.time()
    boxes, scores, classes = self.generate_detection(self.detection_graph, in_img)
    elapsed = time.time() - start_time
    print("Done running detector in {}".format(humanfriendly.format_timespan(elapsed)))

    good_boxes = []
    detections = DetectedObjectSet()

    for i in range(0, len(scores)):
       if(scores[i] >= self.confidence_thresh):
         bbox = boxes[i]
         good_boxes.append(bbox)

         top_rel = bbox[0]
         left_rel = bbox[1]
         bottom_rel = bbox[2]
         right_rel = bbox[3]
      
         xmin = left_rel * image_width
         ymin = top_rel * image_height
         xmax = right_rel * image_width
         ymax = bottom_rel * image_height

         dot = DetectedObjectType(self.category_name, scores[i])
         obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax), scores[i], dot)
         detections.add(obj)

    print("Detected {}".format(len(good_boxes)))
    return detections
示例#8
0
    def _step( self ):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait( 'image' )
        tracks = self.grab_input_using_trait( 'object_track_set' )

        # Get python image from conatiner (just for show)
        in_img = get_pil_image( in_img_c.image() ).convert( 'RGB' )

        if len( tracks.tracks() ) == 0:
          # Fill image
          in_img = pil_image.new( mode='RGB', size=in_img.size,
            color = ( randint( 0, 255 ), randint( 0, 255 ), randint( 0, 255 ) ) )

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait( 'image', ImageContainer( from_pil( in_img ) ) )

        self._base_step()