示例#1
0
def face_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)
    faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
    if not faces: return bytes() # No detections.
    for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255))
    out_face = max(faces, key = lambda f: f[2] * f[3])
    return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3])
示例#2
0
def color_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)
    thresholds = struct.unpack("<bbbbbb", data)
    blobs = sensor.snapshot().find_blobs([thresholds],
                                         pixels_threshold=500,
                                         area_threshold=500,
                                         merge=True,
                                         margin=20)
    if not blobs: return bytes() # No detections.
    for b in blobs:
        sensor.get_fb().draw_rectangle(b.rect(), color = (255, 0, 0))
        sensor.get_fb().draw_cross(b.cx(), b.cy(), color = (0, 255, 0))
    out_blob = max(blobs, key = lambda b: b.density())
    return struct.pack("<HH", out_blob.cx(), out_blob.cy())
示例#3
0
def raw_image_read(data):
    if not len(data):
        interface.schedule_callback(raw_image_read_cb)
        return bytes()
    else:
        offset, size = struct.unpack("<II", data)
        return memoryview(sensor.get_fb().bytearray())[offset:offset+size]
示例#4
0
def all_apriltag_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QQVGA)
    tags = sensor.snapshot().find_apriltags()
    if not tags: return bytes() # No detections.
    draw_detections(sensor.get_fb(), tags)
    return str(tags).encode()
示例#5
0
def all_datamatrix_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((320, 240))
    codes = sensor.snapshot().find_datamatrices()
    if not codes: return bytes() # No detections.
    draw_detections(sensor.get_fb(), codes)
    return str(codes).encode()
示例#6
0
def datamatrix_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((320, 240))
    codes = sensor.snapshot().find_datamatrices()
    if not codes: return bytes() # No detections.
    draw_detections(sensor.get_fb(), codes)
    return max(codes, key = lambda c: c.w() * c.h()).payload().encode()
示例#7
0
def apriltag_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QQVGA)
    tags = sensor.snapshot().find_apriltags()
    if not tags: return bytes() # No detections.
    draw_detections(sensor.get_fb(), tags)
    output_tag = max(tags, key = lambda t: t.w() * t.h())
    return struct.pack("<HHHH", output_tag.cx(), output_tag.cy(), output_tag.id(),
                       int(math.degrees(output_tag.rotation())))
示例#8
0
def raw_image_read_cb():
    global processing
    interface.put_bytes(sensor.get_fb().bytearray(), 5000)  # timeout
    processing = False
示例#9
0
def raw_image_read_cb():
    interface.put_bytes(sensor.get_fb().bytearray(), 5000) # timeout
示例#10
0
def loop(control, thermal, screen, menu, input_handler, camera_slave, **kwargs):

    led_green = LED(2) # red led
    led_green.off()

    usb = pyb.USB_VCP()
    running_from_ide = usb.isconnected()


    def camera_slave_sync_cancel_condition():
        if control.preview is not CameraPreview.VISIBLE or menu.state is not CameraState.PREVIEW:
            logger.info("Cancel camera slave sync")
            return True
        return False

    camera_slave.sync_cancel_condition = camera_slave_sync_cancel_condition

    state = None
    camera_preview = None
    camera_playback_img_name = ""
    previous_text = ""
    screen_refresh_needed = True
    state_ticks_ms = utime.ticks_ms()

    menu.process_action("middle")
    while True:
        changed_state = state is not menu.state
        state = menu.state

        changed_preview = camera_preview is not control.preview
        camera_preview = control.preview

        # -----------------------------------------------------------------------------------------
        # INITIALIZATIONS
        if changed_state:
            state_ticks_ms = utime.ticks_ms()
            logger.info("Processing state change to ", menu.state)
            if menu.state is CameraState.PREVIEW:
                control.fps_reset()
            if menu.state is CameraState.PLAYBACK or menu.state is CameraState.POSTVIEW:
                #control.set_normal_resolution()
                control.update_playback_img_name(go_to_last=True)
                camera_playback_img_name = ""

        if changed_preview:
            control.fps_reset()
            logger.info("Processing preview change to ", control.preview)

        # -----------------------------------------------------------------------------------------
        # RUN TIME

        if state is CameraState.PREVIEW:
            control.fps_tick()
            screen_refresh_needed = True
            text = "\n" + menu.generate_text()

            if camera_preview is CameraPreview.THERMAL or camera_preview is CameraPreview.THERMAL_ANALYSIS or camera_preview is CameraPreview.THERMAL_GREY or camera_preview is CameraPreview.MIX:

                thermal.get_spotmeter_values()
                def map_g_to_temp(g):
                    return ((g * (thermal.temperature_max - thermal.temperature_min)) / 255.0) + thermal.temperature_min

                img = sensor.snapshot()
                img_touch_x = max(0,min(sensor.width() - 1, round(control.x * sensor.width()/screen.width) ))
                img_touch_y = max(0,min(sensor.height() - 1, round(control.y * sensor.height()/screen.height) ))
                pixel = "{:.2f}".format(map_g_to_temp(img.get_pixel(img_touch_x, img_touch_y)))

                if camera_preview is CameraPreview.THERMAL_GREY:
                    img.to_rgb565()
                elif camera_preview is CameraPreview.THERMAL or camera_preview is CameraPreview.MIX:
                    img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
                elif camera_preview is CameraPreview.THERMAL_ANALYSIS:
                    # Color tracking concept from lepton_object_temp_color_1.py in the OpenMV examples
                    # Color Tracking Thresholds (Grayscale Min, Grayscale Max)
                    threshold_list = [(200, 255)]

                    blob_stats = []
                    blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True)
                    # Collect stats into a list of tuples
                    for blob in blobs:
                        blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list,
                                                                                                roi=blob.rect()).mean())))
                    img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
                    # Draw stuff on the colored image
                    for blob in blobs:
                        img.draw_rectangle(blob.rect())
                        img.draw_cross(blob.cx(), blob.cy())
                    for blob_stat in blob_stats:
                        img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False)

                qqvga2qvga(sensor.get_fb(), screen.screen_buff)

            if camera_preview is CameraPreview.VISIBLE or camera_preview is CameraPreview.MIX:
                input_handler.disable()
                sync_success = camera_slave.sync()
                input_handler.enable()
                if not sync_success:
                    logger.info("Failed sync")
                    screen.screen_buff.fill(c=(10,10,10))
                    screen.screen_buff.draw_string(screen.width//2, screen.height//2, "ERROR", c=(255,0,0))

                if camera_preview is CameraPreview.VISIBLE:
                    qvga2qvga(camera_slave.rx_buff, screen.screen_buff, 0, 1)
                    pixel = screen.screen_buff.get_pixel(control.x, control.y)
                elif camera_preview is CameraPreview.MIX:
                    qvga2qvga(camera_slave.rx_buff, screen.screen_buff, 0, 2)

            if menu.page != "ROOT" or control.always_pixel_pointer:
                screen.screen_buff.draw_rectangle(control.x-5, control.y-5, 10, 10, color=(255,0,255), thickness=1, fill=True)
                if menu.state is CameraState.PREVIEW:
                    text_y_offset = 50 if control.y < screen.height//2 else -50
                    screen.screen_buff.draw_string(control.x - 20, control.y + text_y_offset, "{}".format(pixel))

        if state is CameraState.PLAYBACK or state is CameraState.POSTVIEW:
            screen_refresh_needed = False

            if menu.page == "ANALYSIS":
                screen_refresh_needed = True

            text = menu.generate_text()
            if  previous_text != text:
                screen_refresh_needed = True

            if not control.playback_img_name:
                logger.info("No image to be loaded")
                menu.back()
                continue
            elif control.playback_img_name != camera_playback_img_name or screen_refresh_needed:
                camera_playback_img_name = control.playback_img_name
                logger.info("Displaying image...", control.playback_img_name, " and text ", text)
                try:
                    img = image.Image(control.playback_img_name, copy_to_fb=True)
                    qvga2qvga(sensor.get_fb(), screen.screen_buff, 0, 1)

                    if control.to_save_fb_as_startup:
                        control.save_fb_as_startup(screen.screen_buff)
                    img.to_grayscale()
                    try:
                        file_name = control.playback_img_name.split("/")[-1].split(".")[0]
                        file_name = file_name.split("_")
                        thermal.temperature_min = float(file_name[1]) / 100
                        thermal.temperature_max = float(file_name[2]) / 100
                    except Exception as e:
                        print("Could not get min and max from name", e)
                    print("file_name", file_name, thermal.temperature_min, thermal.temperature_max)
                except OSError as e:
                    screen.screen_buff.draw_rectangle(0, 0, screen.width, screen.height, color=(30,30,30), fill=True)
                    screen.screen_buff.draw_string(round(screen.width/6), round(screen.height/2.3), "ERROR LOADING...", color=(255,0,0), scale=2.0)
                    logger.info("Error while loading ", control.playback_img_name, ". Try Again. Error", e)


                if menu.page == "ANALYSIS":

                    def map_g_to_temp(g):
                        return ((g * (thermal.temperature_max - thermal.temperature_min)) / 255.0) + thermal.temperature_min

                    img_touch_x = max(0,min(sensor.width() - 1, round(control.x * sensor.width()/screen.width) ))
                    img_touch_y = max(0,min(sensor.height() - 1, round(control.y * sensor.height()/screen.height) ))
                    pixel = "{:.2f}".format(map_g_to_temp(img.get_pixel(img_touch_x, img_touch_y)))

                    screen.screen_buff.draw_rectangle(control.x-5, control.y-5, 10, 10, color=(255,0,255), thickness=1, fill=True)
                    text_y_offset = 50 if control.y < screen.height//2 else -50
                    screen.screen_buff.draw_string(control.x - 20, control.y + text_y_offset, "{}".format(pixel))

                screen_refresh_needed = True


            if state is CameraState.POSTVIEW and utime.ticks_diff(utime.ticks_ms(), state_ticks_ms) > 2000:
                menu.back()

        ########################################################################
        # INPUT TASKS which are BIG
        if control.to_save_img:
            control.save_img(screen.screen_buff, thermal.temperature_min, thermal.temperature_max)
            menu.process_action("postview")
            led_green.on()
            utime.sleep_ms(100)
            led_green.off()


        ########################################################################
        # DISPLAY IN SCREEN
        if menu.state is CameraState.PLAYBACK and menu.page == "MENU":
            screen.screen_buff.draw_rectangle(0,0,screen.width//2,screen.height,color=(0,0,0), fill=True)

        if screen_refresh_needed:
            previous_text = text
            screen.screen_buff.draw_string(10, 10, text, color=(57, 255, 20), scale=2.1, mono_space=False)
            if state is CameraState.PLAYBACK:
                logger.info("Refresh needed")
            screen.write_to_screen(screen.screen_buff)
            screen_refresh_needed = False

        ########################################################################
        # OTHER FUNCTIONALITY

        if not running_from_ide and usb.isconnected():
            screen.screen_buff.draw_rectangle(0, screen.height//3, screen.width, screen.height//3, color=(10,10,10), fill=True)
            screen.screen_buff.draw_string(round(screen.width/5), round(screen.height/2.3), "USB DEBUGGING", color=(255,0,0), scale=2.0)
            screen.write_to_screen(screen.screen_buff)
            utime.sleep_ms(500)
            input_handler.disable()
            exit(0)
        if input_handler.pin.value() == 0 and input_handler.time_since_interrupt() > 100:
            input_handler.interrupt_callback(line="LOOP")

        gc.collect()
示例#11
0
sensor.set_contrast(+2)
sensor.set_brightness(+3)
sensor.set_auto_gain(True)

sensor.skip_frames(time=2000)
clock = time.clock()

min_degree = 0
max_degree = 179

while (True):
    clock.tick()

    img = sensor.snapshot()

    line_img = sensor.get_fb()
    cir_img = sensor.get_fb()

    cir_img.gaussian(2)
    cir_img.binary([(40, 100)])

    if enable_lens_corr: img.lens_corr(1.8)  # for 2.8mm lens...

    for c in cir_img.find_circles(threshold=4000,
                                  x_margin=10,
                                  y_margin=10,
                                  r_margin=60,
                                  r_min=2,
                                  r_max=20,
                                  r_step=4):
        img.draw_circle(c.x(), c.y(), c.r(), color=(255, 0, 0))
示例#12
0
# Use this script to gather face images for building a TensorFlow dataset. This script automatically
# zooms in the largest face in the field of view which you can then save using the data set editor.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

largest_face = None
largest_face_timeout = 0

while (True):
    clock.tick()

    faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(
        image.HaarCascade("frontalface"))

    if faces:
        largest_face = max(faces, key=lambda f: f[2] * f[3])
        largest_face_timeout = 20

    if largest_face_timeout > 0:
        sensor.get_fb().crop(roi=largest_face)
        largest_face_timeout -= 1

    print(clock.fps())
示例#13
0
import VPOST as VP
import sensor

#Enable frame layer
disp = VP.display(ColorFormat=VP.YCBYCR)
disp.init()

sensor.reset()
sensor.set_pixformat(sensor.YUV422P, sensor.YUV422)
sensor.set_framesize(sensor.VGA, sensor.QVGA)

planar_img = sensor.get_fb(sensor.PIPE_PLANAR)
packet_img = sensor.get_fb(sensor.PIPE_PACKET)

sensor.skip_frames()

while True:
    sensor.snapshot(planar_image=planar_img, packet_image=packet_img)
    disp.render_image(packet_img)
示例#14
0
sensor.skip_frames(100)
sensor.run(1)

# Find /SD
if uos.getcwd() != "/sd":
    sd_flag = False
else:
    sd_flag = True

clock = utime.clock()
img_cnt = 0

while True:
    # Transfer a frame to PC
    led_r.value(0)
    fbuffer = sensor.get_fb()
    img = sensor.snapshot()
    fbuffer = img
    led_r.value(1)

    # Display img to LCD
    led_g.value(0)
    lcd.display(img)
    led_g.value(1)

    # Write img to SD
    led_b.value(0)
    if button_a.value() == 0:
        if sd_flag is True:
            file_name = "img_" + str(img_cnt) + ".jpg"
            lcd.draw_string(10, 10, file_name + " is taken!")
示例#15
0
        blank = array.array('d', [0, 0, 0, 0, 0, 0])
        print(blank)
        usb.send(array.array('d', [0, 0, 0, 0, 0, 0]),
                 timeout=round(1000 / fps))
        clock.tick()
        while (usb.isconnected()):
            #img.draw_circle(ring.x(), ring.y(), ring.r(), color = (255, 0, 0))
            # img.draw_cross(ring.x(), ring.y(), color = (255, 0, 0))
            gc.collect()
            sensor.snapshot().histeq(adaptive=True, clip_limit=2.5)
            #img.morph(kernel_size, kernel) # Run the kernel on every pixel of the image.

            maxButt = 0
            maxHead = 0
            for blob in sensor.get_fb().find_blobs(thresholds,
                                                   pixels_threshold=6,
                                                   area_threshold=10):
                if blob.code() == buttColor and (blob.area()) > maxButt:  #butt
                    maxButt = blob.area()
                    butt = blob
                    #img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20)
                if blob.code() == headColor and (blob.area()) > maxHead:  #head
                    maxHead = blob.area()
                    head = blob
                    #img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20)

            if maxButt * maxHead > 0:
                img.draw_string(butt.x() + 2, butt.y() + 2, "butt")
                img.draw_string(head.x() + 2, head.y() + 2, "head")

                headV = [head.cxf() - ring.x(), head.cyf() - ring.y()]