예제 #1
0
    def find_line_and_confidence(self):
        top_line = self.find_line()

        rect = self.calculate_bottom_bounding_box(top_line)
        if usb_is_connected:
            self.snapshot.draw_rectangle(rect, color=(255, 255, 255))
        self.logger.debug("rect=", rect)
        if (rect[1] < self.snapshot.height() >> 1):
            sensor.flush()
        bottom_line = self.find_line(roi=rect)
        if (bottom_line):
            if usb_is_connected:
                self.snapshot.draw_line(bottom_line.line(), color=(0, 0, 255))
            if (top_line):
                confidence = 1 - abs(bottom_line.x1() - top_line.x2()) / (
                    self.snapshot.width() << 1)
            else:
                confidence = 1 - bottom_line.x1() / (
                    self.snapshot.width() << 1)
            self.logger.debug("confidence=", confidence)
        else:
            confidence = 0

        self.logger.trace("returing line=", top_line, ", confidence=",
                          confidence)
        return (top_line, confidence)
예제 #2
0
    def find_line(self, **keyword_parameters):
        global top_area_theshold, bottom_area_theshold
        self.logger.trace("entering find_line...")
        if (self.threshold == None):
            raise RuntimeError("Set threshold before trying to find a line")

        self.logger.debug("calling get_regression... threshold=",
                          self.threshold)
        if ('roi' in keyword_parameters):
            roi = keyword_parameters['roi']
            #roi_stats = self.snapshot.get_statistics(roi = roi)
            #self.logger.debug("roi_stats=", roi_stats)
            area_theshold = bottom_area_theshold
        else:
            roi = self.line_roi
            area_theshold = top_area_theshold

        self.line = self.snapshot.get_regression(self.threshold, \
            area_threshold = area_theshold, pixels_threshold = _PIXELS_THRESHOLD, \
            robust = True, roi = roi)

        if ('roi' in keyword_parameters):
            pass
        elif usb_is_connected and self.line:
            self.logger.trace("line found")
            self.snapshot.draw_line(self.line.line(), color=self.line_color)
            sensor.flush()

        self.logger.trace("returing line=", self.line)
        return self.line
예제 #3
0
    def take(self):
        self.logger.trace("entering take...")
        try:
            self.watchdog.feed()
        except AttributeError:
            pass  # no watchdog to feed

        # after taking a picture: img.binary(COLOR_THRESHOLDS if COLOR_LINE_FOLLOWING else GRAYSCALE_THRESHOLDS)
        self.logger.trace("taking snapshot...")
        self.snapshot = sensor.snapshot()

        #if (pyb.elapsed_millis(self.glare_check_millis) > MILLIS_BETWEEN_GLARE_CHECK):
        #self._check_glare()
        #self.glare_check_millis = pyb.millis()

        if usb_is_connected:
            self.snapshot.draw_rectangle(self.line_roi, color=(150, 150, 150))

        if self.SHOW_BINARY_VIEW:
            if (self.threshold == None):
                raise RuntimeError(
                    "Set threshold before trying to use binary view")
            self.logger.trace("calling binary...")
            self.snapshot.binary(self.threshold)
        elif self.use_hist:
            self.logger.trace("calling histeq...")
            self.snapshot.histeq()
        #elif self.CHROMINVAR:
        #trace("calling chrominvar...")
        #self.snapshot.chrominvar()

        sensor.flush()
        self.logger.trace("returing snapshot...")
        return self.snapshot
예제 #4
0
def wait_for_april_tag():
    global state
    state = READY_FOR_INPUT
    while (state == READY_FOR_INPUT):
        set_pw_colors(time.ticks())

        # if not taking snapshots, delay
        #pyb.udelay(led_timer.period())

        clock.tick() # for fps()
        img = sensor.snapshot()

        for tag in img.find_apriltags():
            state = GOT_INPUT
            sensor.flush()
        #print(clock.fps())
    return tag.id();
예제 #5
0
파일: 20-drawing.py 프로젝트: openmv/openmv
def unittest(data_path, temp_path):
    import sensor
    sensor.reset()
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    img = sensor.snapshot().clear()
    img.set_pixel(img.width()//2+50, 120, 255)
    img.set_pixel(img.width()//2-50, 120, 255)
    img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
    img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
    img.draw_circle(img.width()//2, img.height()//2, 40)
    img.draw_string(11, 10, "HelloWorld!")
    img.draw_cross(img.width()//2, img.height()//2)
    sensor.flush()
    img.difference(data_path+"/drawing.pgm")
    stats = img.get_statistics()
    return (stats.max() == 0) and (stats.min() == 0)
예제 #6
0
def unittest(data_path, temp_path):
    import sensor
    sensor.reset()
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    img = sensor.snapshot().clear()
    img.set_pixel(img.width() // 2 + 50, 120, 255)
    img.set_pixel(img.width() // 2 - 50, 120, 255)
    img.draw_line([img.width() // 2 - 50, 50, img.width() // 2 + 50, 50])
    img.draw_rectangle([img.width() // 2 - 25, img.height() // 2 - 25, 50, 50])
    img.draw_circle(img.width() // 2, img.height() // 2, 40)
    img.draw_string(11, 10, "HelloWorld!")
    img.draw_cross(img.width() // 2, img.height() // 2)
    sensor.flush()
    img.difference(data_path + "/drawing.pgm")
    stats = img.get_statistics()
    return (stats.max() == 0) and (stats.min() == 0)
예제 #7
0
    # Higher threshold results in a higher detection rate, with more false positives.
    objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.4)

    # Draw faces
    for face in objects:
        print(face)
        #img.draw_rectangle(face)
        (x, y, r) = circle_from_rect(face)
        img.draw_circle(x, y, r)
        # Now find eyes within each face.
        # Note: Use a higher threshold here (more detections) and lower scale (to find small objects)
        eyes = img.find_features(eyes_cascade,
                                 threshold=0.5,
                                 scale_factor=1.2,
                                 roi=face)
        for e in eyes:
            print("found eyes")
            print(e)
            #(x, y, r) = circle_from_rect(e)
            #img.draw_circle(x, y, r)
            img.draw_rectangle(e)
            red_led.on()
            notFound = False

sensor.flush()
time.sleep(5000)

# Print FPS.
# Note: Actual FPS is higher, streaming the FB makes it slower.
#print(clock.fps())