Esempio n. 1
0
        def get_mapped_centroid(b):
            # By default the readout window is set the whole sensor pixel array with x/y==0.
            # The resolution you see if produced by taking pixels from the readout window on
            # the camera. The x/y location is relative to the sensor center.
            x, y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)

            # The camera driver will try to scale to fit whatever resolution you pass to max
            # width/height that fit on the sensor while keeping the aspect ratio.
            ratio = min(w / float(sensor.width()), h / float(sensor.height()))

            # Reference cx() to the center of the viewport and then scale to the readout.
            mapped_cx = (b.cx() - (sensor.width() / 2.0)) * ratio
            # Since we are keeping the aspect ratio there might be an offset in x.
            mapped_cx += (w - (sensor.width() * ratio)) / 2.0
            # Add in our displacement from the sensor center
            mapped_cx += x + (sensor_w / 2.0)

            # Reference cy() to the center of the viewport and then scale to the readout.
            mapped_cy = (b.cy() - (sensor.height() / 2.0)) * ratio
            # Since we are keeping the aspect ratio there might be an offset in y.
            mapped_cy += (h - (sensor.height() * ratio)) / 2.0
            # Add in our displacement from the sensor center
            mapped_cy += y + (sensor_h / 2.0)

            return (mapped_cx, mapped_cy) # X/Y location on the sensor array.
def raw_image_snapshot(data):
    pixformat, framesize = struct.unpack("<II", data)
    sensor.set_pixformat(pixformat)
    sensor.set_framesize(framesize)
    img = sensor.snapshot()
    return struct.pack("<IIII", sensor.width(), sensor.height(),
                       sensor.get_pixformat(), img.size())
def uarm_setup(grayscale=False, resolution=2):
    sensor.reset()
    sensor.set_pixformat(sensor.GRAYSCALE if grayscale else sensor.RGB565)
    # image resolution affects the calibration data
    size_lookup = [
        sensor.QQQQVGA, sensor.QQQVGA, sensor.QQVGA, sensor.QVGA, sensor.VGA
    ]
    sensor.set_framesize(size_lookup[resolution])
    # both rotate & mirror the image, so it's coordinates match the uArm's
    sensor.set_hmirror(True)
    # chop off the extra black spaces after rotation
    sensor.set_windowing((int((sensor.width() - sensor.height()) / 2), 0,
                          sensor.height(), sensor.height()))
    # any other sensor configurations go here...
    # finally, skip some frames after configuring
    sensor.skip_frames()
Esempio n. 4
0
    def draw_laps(self, img=None):
        img = img or self.img
        if len(self.lap_timestamps) > 1 and utime.ticks_diff(
                utime.ticks_add(
                    self.lap_timestamps[-1], self.lap_notification_timeout),
                utime.ticks_ms()) >= 0:
            lap_time = utime.ticks_diff(self.lap_timestamps[-1],
                                        self.lap_timestamps[-2])
            hours, remainder = divmod(lap_time, 3600000)
            minutes, remainder = divmod(remainder, 60000)
            seconds, milliseconds = divmod(remainder, 1000)
            lap_delta = '{:03}'.format(milliseconds)

            if seconds:
                lap_delta = '{:02}.'.format(int(seconds)) + lap_delta
                if minutes:
                    lap_delta = '{:02}:'.format(int(minutes)) + lap_delta
                    if hours:
                        lap_delta = '{:02}:'.format(int(hours)) + lap_delta
                else:
                    lap_delta = lap_delta + 's'
            else:
                lap_delta = lap_delta + 'ms'

            lap_delta = 'Lap: ' + lap_delta

            img.draw_string(10,
                            int(sensor.height() / 2),
                            lap_delta,
                            string_vflip=self.flip_text,
                            string_hmirror=self.mirror_text,
                            string_rotation=self.rotate_text)
            return lap_time
Esempio n. 5
0
def barcode_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((sensor.width(), sensor.height()//8))
    codes = sensor.snapshot().find_barcodes()
    if not codes: return bytes() # No detections.
    return max(codes, key = lambda c: c.w() * c.h()).payload().encode()
Esempio n. 6
0
def all_barcode_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((sensor.width(), sensor.height()//8))
    codes = sensor.snapshot().find_barcodes()
    if not codes: return bytes() # No detections.
    return str(codes).encode()
Esempio n. 7
0
        def center_on_blob(b, res):
            mapped_cx, mapped_cy = get_mapped_centroid(b)

            # Switch to the res (if res was unchanged this does nothing).
            sensor.set_framesize(res)

            # Construct readout window. x/y are offsets from the center.
            x = int(mapped_cx - (sensor_w / 2.0))
            y = int(mapped_cy - (sensor_h / 2.0))
            w = sensor.width()
            h = sensor.height()

            # Focus on the centroid.
            sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (x, y, w, h))

            # See if we are hitting the edge.
            new_x, new_y, w, h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW)

            # You can use these error values to drive servos to move the camera if you want.
            x_error = x - new_x
            y_error = y - new_y

            if x_error < 0: print("-X Limit Reached ", end="")
            if x_error > 0: print("+X Limit Reached ", end="")
            if y_error < 0: print("-Y Limit Reached ", end="")
            if y_error > 0: print("+Y Limit Reached ", end="")
Esempio n. 8
0
 def __init__(self):
     self.logger = Logger()
     self.logger.set_level(DEBUG_LOG_LEVEL)
     self.logger.info("initializing  Camera")
     self.reset_sensor()
     self.SHOW_BINARY_VIEW = False
     #self.CHROMINVAR = True
     self.line_color = (0, 255, 0)
     # [(1, 80, -25, 28, -58, -10)]
     self.threshold = None  # self.old_threshold = [(44, 87, -27, 23, -62, -15)] #Threshold.BLUE
     self.use_hist = True
     self.line_roi = (0, round(sensor.height() / 15), sensor.width(),
                      round(sensor.height() / 2))
     #self.blobs_roi = (0, 0, sensor.width(), round(sensor.height() / 3))
     self.snapshot = None
     #self.glare_check_millis = MILLIS_BETWEEN_GLARE_CHECK
     self.adjuster = 16384
Esempio n. 9
0
def getClosestToCenter(object_):
    if object_:
        l = [
            math.sqrt((obj.cx() - sensor.width() / 2)**2 +
                      (obj.cy() - sensor.height() / 2)**2) for obj in object_
        ]
        object = object_[l.index(min(l))]
        return (object.cx(), object.cy())
    return 0
Esempio n. 10
0
 def draw_exposure(self, img=None, scale=None):
     """ Draws camera exposure lines on an image """
     img = img or self.img
     scale = scale or self.scale
     img.draw_string(0,
                     int(sensor.height() * scale - 10),
                     "{:03d}el".format(self.get_exposure_lines()),
                     string_vflip=self.flip_text,
                     string_hmirror=self.mirror_text,
                     string_rotation=self.rotate_text)
     return img
Esempio n. 11
0
 def draw_time(self, img=None):
     img = img or self.img
     if self.lap_timestamps:
         img.draw_string(10,
                         int(sensor.height() / 2 + 10),
                         '{:d}ms'.format(
                             utime.ticks_diff(utime.ticks_ms(),
                                              self.lap_timestamps[-1])),
                         string_vflip=self.flip_text,
                         string_hmirror=self.mirror_text,
                         string_rotation=self.rotate_text)
def clamp_roi(roi):
    """
    find four clamps roi position
    """
    wid = 10
    ht = 10
    x = int(roi[0] + (roi[2] - wid) / 2)
    y = int(roi[1] + (roi[3] - ht) / 2)
    clp_roi = [[roi[0], y, wid, ht], [x, roi[1], wid, ht],
               [roi[0] + roi[2] - wid, y, wid, ht],
               [x, roi[1] + roi[3] - ht, wid, ht]]
    for r in clp_roi:
        if r[0] < 0: r[0] = 0
        if r[1] < 0: r[1] = 0
        if r[0] >= sensor.width(): r[0] = sensor.width() - 1
        if r[1] >= sensor.height(): r[1] = sensor.height() - 1
        if r[0] + r[2] > sensor.width(): r[2] = sensor.width() - r[0]
        if r[1] + r[3] > sensor.height(): r[3] = sensor.height() - r[1]
    print(clp_roi)
    return clp_roi
Esempio n. 13
0
def initFrameBuffer():
    # Take from the main frame buffer's RAM to allocate a second frame buffer.
    # There's a lot more RAM in the frame buffer than in the MicroPython heap.
    # However, after doing this you have a lot less RAM for some algorithms...
    # So, be aware that it's a lot easier to get out of RAM issues now. However,
    # frame differencing doesn't use a lot of the extra space in the frame buffer.
    # But, things like AprilTags do and won't work if you do this...
    extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), IMG_TYPE)

    print("About to save background image...")
    sensor.skip_frames(time=2000)  # Give the user time to get ready.
    extra_fb.replace(sensor.snapshot())
    print("Saved background image - Now frame differencing!")
    return extra_fb
Esempio n. 14
0
 def init(self,
          gain_db=0,
          shutter_us=500000,
          framesize=sensor.WQXGA2,
          force_reset=True,
          flip=False):
     if self.simulate:
         self.shutter = shutter_us
         self.gain = gain_db
         self.snap_started = False
         return
     if force_reset or self.has_error or self.gain != gain_db or self.shutter != shutter_us or self.framesize != framesize or self.flip != flip:
         sensor.reset()
         sensor.set_pixformat(self.pixfmt)
         sensor.set_framesize(framesize)
         if flip:  # upside down camera
             sensor.set_vflip(True)
             sensor.set_hmirror(True)
         self.flip = flip
         self.framesize = framesize
         if shutter_us < 0:
             sensor.set_auto_exposure(True)
         else:
             if shutter_us > 500000:
                 sensor.__write_reg(0x3037, 0x08)  # slow down PLL
                 if shutter_us > 1000000:
                     pyb.delay(100)
                     sensor.__write_reg(0x3037, 0x18)  # slow down PLL
                     if shutter_us > 1500000:
                         pyb.delay(100)
                         sensor.__write_reg(0x3036, 80)  # slow down PLL
                         # warning: doesn't work well, might crash
                 pyb.delay(200)
             sensor.set_auto_exposure(False, shutter_us)
         self.shutter = shutter_us
         if gain_db < 0:
             sensor.set_auto_gain(True)
         else:
             sensor.set_auto_gain(False, gain_db)
         self.gain = gain_db
         self.wait_init = 2
         self.width = sensor.width()
         self.height = sensor.height()
Esempio n. 15
0
 def render(self, timer, img=None, scale=None):
     """ Renders an image to the LCD shield """
     img = img or self.img
     scale = scale or self.scale
     if self.draw_stats:
         img = img.copy((0, 0, sensor.width(), sensor.height()), scale,
                        scale).to_rgb565(copy=True)
         self.draw_fps(img)
         self.draw_exposure(img)
     if self.draw_line_stats:
         self.draw_line_stat(img)
     if self.draw_lap_times:
         self.draw_laps(img)
     if self.draw_timer:
         self.draw_time(img)
     if self.draw_lines:
         for iterations, line in self._known_lines:
             scaled_line = ulab.array(line.line()) * scale
             img.draw_line(int(scaled_line[0]),
                           int(scaled_line[1]),
                           int(scaled_line[2]),
                           int(scaled_line[3]),
                           color=self.line_draw_color)
     lcd.display(img)
        type = "list"
    elif isinstance(variate,tuple):
        type = "tuple"
    elif isinstance(variate,dict):
        type = "dict"
    elif isinstance(variate,set):
        type = "set"
    return type

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.B64X32) # Set frame size to 64x64... (or 64x32)...
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
extra_fb.replace(sensor.snapshot())

Delta_x = 0
Delta_y = 0

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    displacement = extra_fb.find_displacement(img)
    extra_fb.replace(img)

    # Offset results are noisy without filtering so we drop some accuracy.
    sub_pixel_x = int(displacement.x_translation() * 5) / 5.0
    sub_pixel_y = int(displacement.y_translation() * 5) / 5.0
Esempio n. 17
0
    #sensor.reset(freq=20000000)
else:
    sensor.reset()  # OV2640 Reset and initialize the sensor. It will
    # run automatically, call sensor.run(0) to stop
#sensor.shutdown(enable)
gc.collect()
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA)
#sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
#sensor.set_auto_whitebal(True)  #OV2640
sensor.set_hmirror(1)  #for unit V
sensor.set_vflip(1)  #for unit V

img_w = sensor.width()
img_h = sensor.height()
sensor_ID = sensor.get_id()
print("image sensor is " + str(sensor_ID) + ", with size " + str(img_w) +
      " x " + str(img_h))
sensor.run(1)
sensor.skip_frames(time=1000)  # Wait for settings take effect.
#sensor.skip_frames(30)             #Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.
Zeit_end = 600 * 1000
condition = True
Zeit_Anfang = time.ticks_ms()
run_cnt = 0
loop_time = 1
fps = 0
#BtnA to save img to"train"; BtnB to toggle QR-scan, may out of memory @ VGA!
while (condition):
interface = rpc.rpc_spi_master(cs_pin="P3", freq=10000000, clk_polarity=1, clk_phase=0)

# initialize the pin that will control the remote cam synchronisation
pin4 = Pin('P4', Pin.OUT_PP, Pin.PULL_NONE)
pin4.value(0)

# here we always choose the QVGA format (320x240) inside a VGA image
img_width = 320
img_height = 240
sensor.reset()
sensor_format = sensor.GRAYSCALE # Grayscale is enough to find a chessboard
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
if img_width != sensor.width() or img_height != sensor.height():
    sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height))
sensor.skip_frames(time = 2000)

# get the current the current exposure and gains and send them to the remote cam so that the
# 2 cams have the same image settings
sensor.snapshot()
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
print("exposure is " + str(exposure_us))
rgb_gain_db = sensor.get_rgb_gain_db()
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, rgb_gain_db)

result = interface.call("sensor_config", struct.pack("<fIfff", gain_db, exposure_us,
Esempio n. 19
0
    max_size = 0
    for blob in blobs:
        if max_size < blob[2] * blob[3]:
            max_blob = blob
            max_size = blob[2] * blob[3]
    return max_blob


##################################################
# main
##################################################
# 学習対象の範囲
rect_width = 50
rect_height = 50
r = [(sensor.width() // 2) - (rect_width // 2),
     (sensor.height() // 2) - (rect_height // 2), rect_width, rect_height]

# 準備
for i in range(60):
    img = sensor.snapshot()
    img.draw_rectangle(r)
    lcd.display(img)
    lcd.draw_string(0, 0, "Learning thresholds in %d ..." % (60 - i))

# しきい値を学習
threshold = [0, 0, 0, 0, 0, 0]
for i in range(60):
    img = sensor.snapshot()
    hist = img.get_histogram(roi=r)
    lo = hist.get_percentile(0.05)
    hi = hist.get_percentile(0.95)
#small_img.to_grayscale()
#small_img.to_bitmap()

big_img = image.Image(128, 128, sensor.RGB565)
big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32, hint=hint)
#big_img.to_grayscale()
#big_img.to_bitmap()

alpha_div = 1
alpha_value = 0
alpha_step = 2

x_bounce = sensor.width() // 2
x_bounce_toggle = 1

y_bounce = sensor.height() // 2
y_bounce_toggle = 1

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()
    #img.to_grayscale()
    #img.to_bitmap()
    img.draw_image(big_img,
                   x_bounce,
                   y_bounce,
                   rgb_channel=-1,
                   alpha=alpha_value // alpha_div,
                   hint=hint | image.CENTER)
Esempio n. 21
0
            # Find the blob in the lower resolution image.
            blobs = img.find_blobs(TRACKING_THRESHOLDS,
                                   area_threshold=TRACKING_AREA_THRESHOLD,
                                   pixels_threshold=TRACKING_PIXEL_THRESHOLD)

            # If we loose the blob then we need to find a new one.
            if not len(blobs):
                # Reset resolution.
                sensor.set_framesize(SEARCHING_RESOLUTION)
                sensor.ioctl(sensor.IOCTL_SET_READOUT_WINDOW, (sensor_w, sensor_h))
                break

            # Narrow down the blob list and highlight the blob.
            most_dense_blob = max(blobs, key = lambda x: x.density())
            img.draw_rectangle(most_dense_blob.rect())

            print(clock.fps(), "BLOB cx:%d, cy:%d" % get_mapped_centroid(most_dense_blob))

            x_diff = most_dense_blob.cx() - (sensor.width() / 2.0)
            y_diff = most_dense_blob.cy() - (sensor.height() / 2.0)

            w_threshold = (sensor.width() / 2.0) * TRACKING_EDGE_TOLERANCE
            h_threshold = (sensor.height() / 2.0) * TRACKING_EDGE_TOLERANCE

            # Re-center on the blob if it starts going out of view (costs FPS).
            if abs(x_diff) > w_threshold or abs(y_diff) > h_threshold:
                center_on_blob(most_dense_blob, TRACKING_RESOLUTION)

    print(clock.fps())
Esempio n. 22
0
def distToCell(blob):
    dist = sqrt(((blob.x() - sensor.width() / 2) *
                 (blob.x() - sensor.width() / 2)) +
                ((blob.y() - sensor.height()) * (blob.y() - sensor.height())))
    return dist
Esempio n. 23
0
file = open("camId.txt")
cam = int(file.readline())
file.close()
sensor.reset()
sensor.set_pixformat(fmt)
sensor.set_framesize(res)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()
startOfPacket = {
    "cam": cam,
    "time": pyb.elapsed_millis(0),
    "fmt": fmt,
    "height": sensor.height(),
    "width": sensor.width()
}
endOfPacket = {"end": 0}

# Capture the color thresholds for whatever was in the center of the image.
r = [(320 // 2) - (50 // 2), (240 // 2) - (50 // 2), 50,
     50]  # 50x50 center of QVGA.

#print("Auto algorithms done. Hold the object you want to track in front of the camera in the 	box.")
#print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!")

led1.on()
for i in range(60):
    img = sensor.snapshot()
    img.draw_rectangle(r)
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
    # Put in a z_rotation value below and you should see the r output be equal to that.
    if(0):
        expected_rotation = 20.0
        extra_fb.rotation_corr(z_rotation=(-expected_rotation))

    # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
    # Put in a zoom value below and you should see the z output be equal to that.
    if(0):
Esempio n. 25
0
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)  # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA)  # or QQVGA...
sensor.skip_frames(time=2000)
clock = time.clock()

while (True):
    clock.tick()

    # seed_threshold controls the maximum allowed difference between
    # the initial pixel and any filled pixels. It's important to
    # set this such that flood fill doesn't fill the whole image.

    # floating_threshold controls the maximum allowed difference
    # between any two pixels. This can easily fill the whole image
    # with even a very low threshold.

    # flood_fill will fill pixels that both thresholds.

    # You can invert what gets filled with "invert" and clear
    # everything but the filled area with "clear_background".

    x = sensor.width() // 2
    y = sensor.height() // 2
    img = sensor.snapshot().flood_fill(x, y, \
        seed_threshold=0.05, floating_thresholds=0.05, \
        color=(255, 0, 0), invert=False, clear_background=False)

    print(clock.fps())
# SOH-(X)-GS-(Y)-EOT
#

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(
    sensor.B64X64)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time=2000)  # Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                 sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while (True):
    img = sensor.snapshot()  # Take a picture and return the image.
    pixelX = []
    pixelY = []

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
            displacement = extra_fb.find_displacement(
                img,
                roi=(x, y, BLOCK_W, BLOCK_H),
                template_roi=(x, y, BLOCK_W, BLOCK_H))
Esempio n. 27
0
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()

    # seed_threshold controls the maximum allowed difference between
    # the initial pixel and any filled pixels. It's important to
    # set this such that flood fill doesn't fill the whole image.

    # floating_threshold controls the maximum allowed difference
    # between any two pixels. This can easily fill the whole image
    # with even a very low threshold.

    # flood_fill will fill pixels that both thresholds.

    # You can invert what gets filled with "invert" and clear
    # everything but the filled area with "clear_background".

    x = sensor.width() // 2
    y = sensor.height() // 2
    img = sensor.snapshot().flood_fill(x, y, \
        seed_threshold=0.05, floating_thresholds=0.05, \
        color=(255, 0, 0), invert=False, clear_background=False)

    print(clock.fps())
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
            displacement = extra_fb.find_displacement(img, \
                roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))

            # Below 0.1 or so (YMMV) and the results are just noise.
            if(displacement.response() > 0.1):
                pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
                pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
Esempio n. 29
0
##green_led_on = True
##else:
##green_led_on = False
#pyb.LED(2).toggle()

#tim = Timer(4, freq=1000)      # create a timer object using timer 4 - trigger at 1Hz
#tim.callback(tick)          # set the callback to our tick function

# Camera Control Code
sensor.reset()
sensor.set_pixformat(
    sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE)
sensor.set_framesize(FRAME_SIZE)
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \
                     int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE))
sensor.skip_frames(time=200)
if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False)
if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
clock = time.clock()

old_time = pyb.millis()

throttle_old_result = None
throttle_i_output = 0
throttle_output = THROTTLE_OFFSET

steering_old_result = None
steering_i_output = 0
steering_output = STEERING_OFFSET
Esempio n. 30
0
import time
import image
import sensor

sensor.reset()
sensor.set_pixformat(sensor.RGB565)  # grayscale is faster
# sensor.set_pixformat(sensor.GRAYSCALE)  # grayscale is faster
sensor.set_framesize(sensor.QQVGA)  # QVGA: 320x240, QQVGA: 160x120
sensor.skip_frames(time=2000)

enable_lens_corr = True  # turn on for straighter lines...

# for getting screen resolution
screen_width = sensor.width()
screen_height = sensor.height()

# white and black threshold
THRESHOLD = 14

# for reducing noises in image
EROSION_SIZE = 2

# for infinite lines
horizontal_range = 30
vertical_range = 30
theta = 0


def find_lines(img):
    grayscale_img = img.to_grayscale(copy=True)
Esempio n. 31
0
throttle_i_output = 0
throttle_output = 0

steering_old_result = None
steering_i_output = 0
steering_output = 90

while True:
    clock.tick()
    img = sensor.snapshot() if COLOR_LINE_FOLLOWING else sensor.snapshot().histeq()
    if BINARY_VIEW: img = img.binary(COLOR_THRESHOLDS if COLOR_LINE_FOLLOWING else GRAYSCALE_THRESHOLDS)
    if DO_NOTHING: continue

    line = img.get_regression(([(20, 100, -128, 127, -128, 127)] if BINARY_VIEW else COLOR_THRESHOLDS) \
           if COLOR_LINE_FOLLOWING else ([(255, 255)] if BINARY_VIEW else GRAYSCALE_THRESHOLDS), \
           robust = True, roi = (0, sensor.height() // 4, sensor.width(), sensor.height()))
    print_string = ""

    if line and (line.magnitude() >= MAG_THRESHOLD):
        img.draw_line(line.line(), color = (127, 127, 127) if COLOR_LINE_FOLLOWING else 127)

        new_time = pyb.millis()
        delta_time = new_time - old_time
        old_time = new_time

        #
        # Figure out steering and do steering PID
        #

        steering_new_result = figure_out_my_steering(line, img)
        steering_delta_result = (steering_new_result - steering_old_result) if (steering_old_result != None) else 0
Esempio n. 32
0
# pin used to sync the 2 cams
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)

# setting the SPI communication as a slave
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)

# here we always choose the QVGA format (320x240) inside a VGA image
img_width = 320
img_height = 240
sensor.reset()
sensor_format = sensor.GRAYSCALE
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
if img_width != sensor.width() or img_height != sensor.height():
    sensor.set_windowing(
        (int((sensor.width() - img_width) / 2),
         int((sensor.height() - img_height) / 2), img_width, img_height))
sensor.skip_frames(time=2000)
sensor.snapshot()


################################################################
# Call Backs
################################################################
def sensor_config(data):
    global processing
    gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack(
        "<fIfff", data)
    sensor.set_auto_gain(False, gain_db)
Esempio n. 33
0
        else:
            cidx = index - 1
            area = int(sortedCells[cidx].w() * sortedCells[cidx].h())
            #print ()
            #print ("y: " + )
            #print ("area: " + area)
            #print ("index: " + index)
            can.send_advanced_track_data(sortedCells[cidx].x(),
                                         sortedCells[cidx].y(), area, 0, 11, 0,
                                         index)

    if len(sortedCells) != 0:
        img.draw_rectangle(sortedCells[0].x(),
                           sortedCells[0].y(),
                           sortedCells[0].w(),
                           sortedCells[0].h(),
                           color=(255, 0, 0))
        pyb.LED(1).on()
        pyb.LED(3).off()
    else:
        pyb.LED(1).off()
        pyb.LED(3).on()

    if can.get_frame_counter() % 50 == 0:
        can.send_config_data()
        can.send_camera_status(sensor.width(), sensor.height())

    pyb.delay(5)
    #print("HB %d" % can.get_frame_counter())
    #can.check_mode();