예제 #1
0
        def get_mapped_centroid(b):
            # By default the readout window is set the whole sensor pixel array with x/y==0.
            # The resolution you see if produced by taking pixels from the readout window on
            # the camera. The x/y location is relative to the sensor center.
            x, y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)

            # The camera driver will try to scale to fit whatever resolution you pass to max
            # width/height that fit on the sensor while keeping the aspect ratio.
            ratio = min(w / float(sensor.width()), h / float(sensor.height()))

            # Reference cx() to the center of the viewport and then scale to the readout.
            mapped_cx = (b.cx() - (sensor.width() / 2.0)) * ratio
            # Since we are keeping the aspect ratio there might be an offset in x.
            mapped_cx += (w - (sensor.width() * ratio)) / 2.0
            # Add in our displacement from the sensor center
            mapped_cx += x + (sensor_w / 2.0)

            # Reference cy() to the center of the viewport and then scale to the readout.
            mapped_cy = (b.cy() - (sensor.height() / 2.0)) * ratio
            # Since we are keeping the aspect ratio there might be an offset in y.
            mapped_cy += (h - (sensor.height() * ratio)) / 2.0
            # Add in our displacement from the sensor center
            mapped_cy += y + (sensor_h / 2.0)

            return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
예제 #2
0
    def get_angle(blob) -> float:
        """Get horizontal relative angle (in degrees) of the blob.

        Args:
            blob: a blob object (you can get one with ball_blob())

        Returns:
            The angle between [-35.4, +35.4]
        """
        rel_angle = Camera.HFOV * (blob.cxf() -
                                   sensor.width() / 2) / sensor.width()
        return rel_angle
예제 #3
0
 def reset_sensor():
     sensor.reset()
     sensor.set_pixformat(
         sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE)
     sensor.set_framesize(FRAME_SIZE)
     sensor.set_vflip(True)
     sensor.set_hmirror(True)
     sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \
                          int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE))
     sensor.skip_frames(time=200)
     if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False)
     if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
예제 #4
0
        def center_on_blob(b, res):
            mapped_cx, mapped_cy = get_mapped_centroid(b)

            # Switch to the res (if res was unchanged this does nothing).
            sensor.set_framesize(res)

            # Construct readout window. x/y are offsets from the center.
            x = int(mapped_cx - (sensor_w / 2.0))
            y = int(mapped_cy - (sensor_h / 2.0))
            w = sensor.width()
            h = sensor.height()

            # Focus on the centroid.
            sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (x, y, w, h))

            # See if we are hitting the edge.
            new_x, new_y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)

            # You can use these error values to drive servos to move the camera if you want.
            x_error = x - new_x
            y_error = y - new_y

            if x_error < 0: print("-X Limit Reached ", end="")
            if x_error > 0: print("+X Limit Reached ", end="")
            if y_error < 0: print("-Y Limit Reached ", end="")
            if y_error > 0: print("+Y Limit Reached ", end="")
예제 #5
0
파일: find_line.py 프로젝트: wuyou33/ETAfly
def find_line(picture, ctrl, uart):

    img = picture.binary([RED_THRESHOLD])  #二值化
    line = img.get_regression(
        [(100, 100, 0, 0, 0, 0)],
        robust=True)  # get_regression 线性回归计算,这一计算通过最小二乘法进行
    if (line):
        img.draw_line(line.line(), color=127)  # 画一条绿线
        rho_err = int(abs(line.rho()) - sensor.width() / 2)  #计算一条直线与图像中央的距离

        #xy轴的角度变换
        theta_err = line.theta()  # 0~180
        if line.theta() > 90:  #左为负
            angle_err = line.theta() - 180
        else:
            angle_err = line.theta()  #右为正

        line_info.rho_err = pack('h', rho_err)  #char 类型
        line_info.rho_err = bytearray(line_info.rho_err)
        line_info.angle_err = pack('h', angle_err)  #short 类型 其类型为一个列表
        line_info.angle_err = bytearray(line_info.angle_err)
        uart.write(pack_line_data(line_info, ctrl, 1))  #未识别 标志位为1
        LEDG.on()
    else:

        uart.write(pack_line_data(line_info, ctrl, 0))  #未识别 标志位为0
        LEDG.off()
예제 #6
0
def barcode_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((sensor.width(), sensor.height()//8))
    codes = sensor.snapshot().find_barcodes()
    if not codes: return bytes() # No detections.
    return max(codes, key = lambda c: c.w() * c.h()).payload().encode()
예제 #7
0
def all_barcode_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((sensor.width(), sensor.height()//8))
    codes = sensor.snapshot().find_barcodes()
    if not codes: return bytes() # No detections.
    return str(codes).encode()
예제 #8
0
    def calculate_bottom_bounding_box(self, top_line):
        if (top_line):
            self.logger.debug("top_line=", top_line)
            theta2 = theta(top_line)
            #self.logger.debug("theta=", theta2)
            self.logger.debug("x1 + x2=", top_line.x1() + top_line.x2())

            half_width = self.snapshot.width() >> 1
            if (top_line.x1() + top_line.x2() > sensor.width()
                    and top_line.x1() > top_line.x2() and abs(theta2) > 45):
                x = top_line.x1()
            else:
                x = top_line.x2()
            #x = min(top_line.x1() - half_width, top_line.x2() - half_width) + half_width
            #x = top_line.x2() if abs(theta2) < 45 else top_line.x1()
            y = max(top_line.y1(), top_line.y2())
            height = min(self.snapshot.height() >> 2,
                         self.snapshot.height() - y)
            width = max(
                abs(int(math.tan(math.radians(theta2)) * height)) * 2, 20)
            x = x - (width >> 1)
            if (x < 0):
                width += x
                x = 0

            self.logger.debug("x=", x, " y=", y, " width=", width, " height=",
                              height)
            return (x, y, width, height)

        return (0, (self.snapshot.height() >> 1) + self.line_roi[1],
                self.snapshot.width(),
                (self.snapshot.height() >> 1) - self.line_roi[1])
def raw_image_snapshot(data):
    pixformat, framesize = struct.unpack("<II", data)
    sensor.set_pixformat(pixformat)
    sensor.set_framesize(framesize)
    img = sensor.snapshot()
    return struct.pack("<IIII", sensor.width(), sensor.height(),
                       sensor.get_pixformat(), img.size())
예제 #10
0
def getClosestToCenter(object_):
    if object_:
        l = [
            math.sqrt((obj.cx() - sensor.width() / 2)**2 +
                      (obj.cy() - sensor.height() / 2)**2) for obj in object_
        ]
        object = object_[l.index(min(l))]
        return (object.cx(), object.cy())
    return 0
예제 #11
0
def getSteerValues(lines_, bothV, bothX):
    new = False
    if lines_[0] and lines_[1]:
        new = True
        leftV = lines_[0].theta()+90 if lines_[0].theta() < 90 else abs(lines_[0].theta()-90)
        rightV = lines_[1].theta()+90 if lines_[1].theta() < 90 else abs(lines_[1].theta()-90)
        bothV = (leftV+rightV) / 2
        bothX = (lines_[0].x2() + lines_[1].x2())/2 - sensor.width()/2
    return bothV, bothX, new
def clamp_roi(roi):
    """
    find four clamps roi position
    """
    wid = 10
    ht = 10
    x = int(roi[0] + (roi[2] - wid) / 2)
    y = int(roi[1] + (roi[3] - ht) / 2)
    clp_roi = [[roi[0], y, wid, ht], [x, roi[1], wid, ht],
               [roi[0] + roi[2] - wid, y, wid, ht],
               [x, roi[1] + roi[3] - ht, wid, ht]]
    for r in clp_roi:
        if r[0] < 0: r[0] = 0
        if r[1] < 0: r[1] = 0
        if r[0] >= sensor.width(): r[0] = sensor.width() - 1
        if r[1] >= sensor.height(): r[1] = sensor.height() - 1
        if r[0] + r[2] > sensor.width(): r[2] = sensor.width() - r[0]
        if r[1] + r[3] > sensor.height(): r[3] = sensor.height() - r[1]
    print(clp_roi)
    return clp_roi
예제 #13
0
    def distance_to(blob) -> float:
        """Calculate real distance between camera and object.

        Args:
            blob: a blob object (you can get one with ball_blob())

        Returns:
            The distance in millimeters.
        """
        obj_width_on_sensor = (Camera.REAL_SIZE * blob.h() /
                               2) / sensor.width()
        distance = (Camera.BALL_DIAMETER *
                    Camera.FOCAL_LENGTH) / obj_width_on_sensor
        return distance
예제 #14
0
def initFrameBuffer():
    # Take from the main frame buffer's RAM to allocate a second frame buffer.
    # There's a lot more RAM in the frame buffer than in the MicroPython heap.
    # However, after doing this you have a lot less RAM for some algorithms...
    # So, be aware that it's a lot easier to get out of RAM issues now. However,
    # frame differencing doesn't use a lot of the extra space in the frame buffer.
    # But, things like AprilTags do and won't work if you do this...
    extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), IMG_TYPE)

    print("About to save background image...")
    sensor.skip_frames(time=2000)  # Give the user time to get ready.
    extra_fb.replace(sensor.snapshot())
    print("Saved background image - Now frame differencing!")
    return extra_fb
예제 #15
0
def uarm_setup(grayscale=False, resolution=2):
    sensor.reset()
    sensor.set_pixformat(sensor.GRAYSCALE if grayscale else sensor.RGB565)
    # image resolution affects the calibration data
    size_lookup = [
        sensor.QQQQVGA, sensor.QQQVGA, sensor.QQVGA, sensor.QVGA, sensor.VGA
    ]
    sensor.set_framesize(size_lookup[resolution])
    # both rotate & mirror the image, so it's coordinates match the uArm's
    sensor.set_hmirror(True)
    # chop off the extra black spaces after rotation
    sensor.set_windowing((int((sensor.width() - sensor.height()) / 2), 0,
                          sensor.height(), sensor.height()))
    # any other sensor configurations go here...
    # finally, skip some frames after configuring
    sensor.skip_frames()
예제 #16
0
 def __init__(self):
     self.logger = Logger()
     self.logger.set_level(DEBUG_LOG_LEVEL)
     self.logger.info("initializing  Camera")
     self.reset_sensor()
     self.SHOW_BINARY_VIEW = False
     #self.CHROMINVAR = True
     self.line_color = (0, 255, 0)
     # [(1, 80, -25, 28, -58, -10)]
     self.threshold = None  # self.old_threshold = [(44, 87, -27, 23, -62, -15)] #Threshold.BLUE
     self.use_hist = True
     self.line_roi = (0, round(sensor.height() / 15), sensor.width(),
                      round(sensor.height() / 2))
     #self.blobs_roi = (0, 0, sensor.width(), round(sensor.height() / 3))
     self.snapshot = None
     #self.glare_check_millis = MILLIS_BETWEEN_GLARE_CHECK
     self.adjuster = 16384
예제 #17
0
def getSteerValues(lines_, bothV_, bothX_):  #, servo_, motor_):
    new = False
    if lines_[0] and lines_[1]:
        new = True
        leftV = lines_[0].theta() + 90 if lines_[0].theta() < 90 else abs(
            lines_[0].theta() - 90)
        rightV = lines_[1].theta() + 90 if lines_[1].theta() < 90 else abs(
            lines_[1].theta() - 90)
        bothV_ = (leftV + rightV) / 2
        bothX_ = (lines_[0].x2() + lines_[1].x2()) / 2 - sensor.width() / 2
        # servo_ = 1023/180*(bothV_+bothX_)
        # motor_ = 512-abs(servo_-512)

        # servo_ = max(min(servo_, 1023), 0)
        # motor_ = max(min(motor_, 1023), 0)

    return bothV_, bothX_, new  #, servo_, motor_
예제 #18
0
 def init(self,
          gain_db=0,
          shutter_us=500000,
          framesize=sensor.WQXGA2,
          force_reset=True,
          flip=False):
     if self.simulate:
         self.shutter = shutter_us
         self.gain = gain_db
         self.snap_started = False
         return
     if force_reset or self.has_error or self.gain != gain_db or self.shutter != shutter_us or self.framesize != framesize or self.flip != flip:
         sensor.reset()
         sensor.set_pixformat(self.pixfmt)
         sensor.set_framesize(framesize)
         if flip:  # upside down camera
             sensor.set_vflip(True)
             sensor.set_hmirror(True)
         self.flip = flip
         self.framesize = framesize
         if shutter_us < 0:
             sensor.set_auto_exposure(True)
         else:
             if shutter_us > 500000:
                 sensor.__write_reg(0x3037, 0x08)  # slow down PLL
                 if shutter_us > 1000000:
                     pyb.delay(100)
                     sensor.__write_reg(0x3037, 0x18)  # slow down PLL
                     if shutter_us > 1500000:
                         pyb.delay(100)
                         sensor.__write_reg(0x3036, 80)  # slow down PLL
                         # warning: doesn't work well, might crash
                 pyb.delay(200)
             sensor.set_auto_exposure(False, shutter_us)
         self.shutter = shutter_us
         if gain_db < 0:
             sensor.set_auto_gain(True)
         else:
             sensor.set_auto_gain(False, gain_db)
         self.gain = gain_db
         self.wait_init = 2
         self.width = sensor.width()
         self.height = sensor.height()
예제 #19
0
 def render(self, timer, img=None, scale=None):
     """ Renders an image to the LCD shield """
     img = img or self.img
     scale = scale or self.scale
     if self.draw_stats:
         img = img.copy((0, 0, sensor.width(), sensor.height()), scale,
                        scale).to_rgb565(copy=True)
         self.draw_fps(img)
         self.draw_exposure(img)
     if self.draw_line_stats:
         self.draw_line_stat(img)
     if self.draw_lap_times:
         self.draw_laps(img)
     if self.draw_timer:
         self.draw_time(img)
     if self.draw_lines:
         for iterations, line in self._known_lines:
             scaled_line = ulab.array(line.line()) * scale
             img.draw_line(int(scaled_line[0]),
                           int(scaled_line[1]),
                           int(scaled_line[2]),
                           int(scaled_line[3]),
                           color=self.line_draw_color)
     lcd.display(img)
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
            displacement = extra_fb.find_displacement(img, \
                roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))

            # Below 0.1 or so (YMMV) and the results are just noise.
            if(displacement.response() > 0.1):
                pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
                pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
# SOH-(X)-GS-(Y)-EOT
#

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(
    sensor.B64X64)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time=2000)  # Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                 sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while (True):
    img = sensor.snapshot()  # Take a picture and return the image.
    pixelX = []
    pixelY = []

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
            displacement = extra_fb.find_displacement(
                img,
                roi=(x, y, BLOCK_W, BLOCK_H),
                template_roi=(x, y, BLOCK_W, BLOCK_H))
        type = "list"
    elif isinstance(variate,tuple):
        type = "tuple"
    elif isinstance(variate,dict):
        type = "dict"
    elif isinstance(variate,set):
        type = "set"
    return type

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.B64X32) # Set frame size to 64x64... (or 64x32)...
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
extra_fb.replace(sensor.snapshot())

Delta_x = 0
Delta_y = 0

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    displacement = extra_fb.find_displacement(img)
    extra_fb.replace(img)

    # Offset results are noisy without filtering so we drop some accuracy.
    sub_pixel_x = int(displacement.x_translation() * 5) / 5.0
    sub_pixel_y = int(displacement.y_translation() * 5) / 5.0
예제 #23
0
                 dual_buff=True)  #OV7740  Loop Time :91ms, run fps:10.98901
    #sensor.reset(freq=20000000)
else:
    sensor.reset()  # OV2640 Reset and initialize the sensor. It will
    # run automatically, call sensor.run(0) to stop
#sensor.shutdown(enable)
gc.collect()
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA)
#sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
#sensor.set_auto_whitebal(True)  #OV2640
sensor.set_hmirror(1)  #for unit V
sensor.set_vflip(1)  #for unit V

img_w = sensor.width()
img_h = sensor.height()
sensor_ID = sensor.get_id()
print("image sensor is " + str(sensor_ID) + ", with size " + str(img_w) +
      " x " + str(img_h))
sensor.run(1)
sensor.skip_frames(time=1000)  # Wait for settings take effect.
#sensor.skip_frames(30)             #Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.
Zeit_end = 600 * 1000
condition = True
Zeit_Anfang = time.ticks_ms()
run_cnt = 0
loop_time = 1
fps = 0
#BtnA to save img to"train"; BtnB to toggle QR-scan, may out of memory @ VGA!
interface = rpc.rpc_spi_master(cs_pin="P3", freq=10000000, clk_polarity=1, clk_phase=0)

# initialize the pin that will control the remote cam synchronisation
pin4 = Pin('P4', Pin.OUT_PP, Pin.PULL_NONE)
pin4.value(0)

# here we always choose the QVGA format (320x240) inside a VGA image
img_width = 320
img_height = 240
sensor.reset()
sensor_format = sensor.GRAYSCALE # Grayscale is enough to find a chessboard
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
if img_width != sensor.width() or img_height != sensor.height():
    sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height))
sensor.skip_frames(time = 2000)

# get the current the current exposure and gains and send them to the remote cam so that the
# 2 cams have the same image settings
sensor.snapshot()
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
print("exposure is " + str(exposure_us))
rgb_gain_db = sensor.get_rgb_gain_db()
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, rgb_gain_db)

result = interface.call("sensor_config", struct.pack("<fIfff", gain_db, exposure_us,
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
    # Put in a z_rotation value below and you should see the r output be equal to that.
    if(0):
        expected_rotation = 20.0
        extra_fb.rotation_corr(z_rotation=(-expected_rotation))

    # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
    # Put in a zoom value below and you should see the z output be equal to that.
    if(0):
예제 #26
0
##green_led_on = True
##else:
##green_led_on = False
#pyb.LED(2).toggle()

#tim = Timer(4, freq=1000)      # create a timer object using timer 4 - trigger at 1Hz
#tim.callback(tick)          # set the callback to our tick function

# Camera Control Code
sensor.reset()
sensor.set_pixformat(
    sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE)
sensor.set_framesize(FRAME_SIZE)
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \
                     int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE))
sensor.skip_frames(time=200)
if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False)
if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
clock = time.clock()

old_time = pyb.millis()

throttle_old_result = None
throttle_i_output = 0
throttle_output = THROTTLE_OFFSET

steering_old_result = None
steering_i_output = 0
steering_output = STEERING_OFFSET
예제 #27
0
파일: flood_fill.py 프로젝트: openmv/openmv
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()

    # seed_threshold controls the maximum allowed difference between
    # the initial pixel and any filled pixels. It's important to
    # set this such that flood fill doesn't fill the whole image.

    # floating_threshold controls the maximum allowed difference
    # between any two pixels. This can easily fill the whole image
    # with even a very low threshold.

    # flood_fill will fill pixels that both thresholds.

    # You can invert what gets filled with "invert" and clear
    # everything but the filled area with "clear_background".

    x = sensor.width() // 2
    y = sensor.height() // 2
    img = sensor.snapshot().flood_fill(x, y, \
        seed_threshold=0.05, floating_thresholds=0.05, \
        color=(255, 0, 0), invert=False, clear_background=False)

    print(clock.fps())
예제 #28
0
def distToCell(blob):
    dist = sqrt(((blob.x() - sensor.width() / 2) *
                 (blob.x() - sensor.width() / 2)) +
                ((blob.y() - sensor.height()) * (blob.y() - sensor.height())))
    return dist
예제 #29
0
            # Find the blob in the lower resolution image.
            blobs = img.find_blobs(TRACKING_THRESHOLDS,
                                   area_threshold=TRACKING_AREA_THRESHOLD,
                                   pixels_threshold=TRACKING_PIXEL_THRESHOLD)

            # If we loose the blob then we need to find a new one.
            if not len(blobs):
                # Reset resolution.
                sensor.set_framesize(SEARCHING_RESOLUTION)
                sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h))
                break

            # Narrow down the blob list and highlight the blob.
            most_dense_blob = max(blobs, key = lambda x: x.density())
            img.draw_rectangle(most_dense_blob.rect())

            print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob))

            x_diff = most_dense_blob.cx() - (sensor.width() / 2.0)
            y_diff = most_dense_blob.cy() - (sensor.height() / 2.0)

            w_threshold = (sensor.width() / 2.0) * TRACKING_EDGE_TOLERANCE
            h_threshold = (sensor.height() / 2.0) * TRACKING_EDGE_TOLERANCE

            # Re-center on the blob if it starts going out of view (costs FPS).
            if abs(x_diff) > w_threshold or abs(y_diff) > h_threshold:
                center_on_blob(most_dense_blob, TRACKING_RESOLUTION)

    print(clock.fps())
예제 #30
0
# 来查找非无限线。

import time
import image
import sensor

sensor.reset()
sensor.set_pixformat(sensor.RGB565)  # grayscale is faster
# sensor.set_pixformat(sensor.GRAYSCALE)  # grayscale is faster
sensor.set_framesize(sensor.QQVGA)  # QVGA: 320x240, QQVGA: 160x120
sensor.skip_frames(time=2000)

enable_lens_corr = True  # turn on for straighter lines...

# for getting screen resolution
screen_width = sensor.width()
screen_height = sensor.height()

# white and black threshold
THRESHOLD = 14

# for reducing noises in image
EROSION_SIZE = 2

# for infinite lines
horizontal_range = 30
vertical_range = 30
theta = 0


def find_lines(img):
예제 #31
0
def find_max(blobs):
    max_size = 0
    for blob in blobs:
        if max_size < blob[2] * blob[3]:
            max_blob = blob
            max_size = blob[2] * blob[3]
    return max_blob


##################################################
# main
##################################################
# 学習対象の範囲
rect_width = 50
rect_height = 50
r = [(sensor.width() // 2) - (rect_width // 2),
     (sensor.height() // 2) - (rect_height // 2), rect_width, rect_height]

# 準備
for i in range(60):
    img = sensor.snapshot()
    img.draw_rectangle(r)
    lcd.display(img)
    lcd.draw_string(0, 0, "Learning thresholds in %d ..." % (60 - i))

# しきい値を学習
threshold = [0, 0, 0, 0, 0, 0]
for i in range(60):
    img = sensor.snapshot()
    hist = img.get_histogram(roi=r)
    lo = hist.get_percentile(0.05)
예제 #32
0
# pin used to sync the 2 cams
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)

# setting the SPI communication as a slave
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)

# here we always choose the QVGA format (320x240) inside a VGA image
img_width = 320
img_height = 240
sensor.reset()
sensor_format = sensor.GRAYSCALE
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
if img_width != sensor.width() or img_height != sensor.height():
    sensor.set_windowing(
        (int((sensor.width() - img_width) / 2),
         int((sensor.height() - img_height) / 2), img_width, img_height))
sensor.skip_frames(time=2000)
sensor.snapshot()


################################################################
# Call Backs
################################################################
def sensor_config(data):
    global processing
    gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack(
        "<fIfff", data)
    sensor.set_auto_gain(False, gain_db)
예제 #33
0
        else:
            cidx = index - 1
            area = int(sortedCells[cidx].w() * sortedCells[cidx].h())
            #print ()
            #print ("y: " + )
            #print ("area: " + area)
            #print ("index: " + index)
            can.send_advanced_track_data(sortedCells[cidx].x(),
                                         sortedCells[cidx].y(), area, 0, 11, 0,
                                         index)

    if len(sortedCells) != 0:
        img.draw_rectangle(sortedCells[0].x(),
                           sortedCells[0].y(),
                           sortedCells[0].w(),
                           sortedCells[0].h(),
                           color=(255, 0, 0))
        pyb.LED(1).on()
        pyb.LED(3).off()
    else:
        pyb.LED(1).off()
        pyb.LED(3).on()

    if can.get_frame_counter() % 50 == 0:
        can.send_config_data()
        can.send_camera_status(sensor.width(), sensor.height())

    pyb.delay(5)
    #print("HB %d" % can.get_frame_counter())
    #can.check_mode();