Beispiel #1
0
# Save_Image - By: ZZY - 周四 5月 3 2018

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    cut = img.save("/t.pgm", roi=(5, 5, 10, 10), quality=50)
    time.sleep(100)

    temp = image.Image("/t.pgm")
    print(temp.size())
#注册音频使能IO
if AUDIO_PA_EN_PIN:
    fm.register(AUDIO_PA_EN_PIN, fm.fpioa.GPIO1, force=True)
    audio_en = GPIO(GPIO.GPIO1, GPIO.OUT, value=1)

#注册音频控制IO
fm.register(34, fm.fpioa.I2S0_OUT_D1, force=True)
fm.register(35, fm.fpioa.I2S0_SCLK, force=True)
fm.register(33, fm.fpioa.I2S0_WS, force=True)

wav_dev = I2S(I2S.DEVICE_0)

lcd.init()
#lcd.clear()
#lcd.rotation(1) #由于图像默认是240*320,因此顺时钟旋转90°。
lcd.display(image.Image("neu.jpg"))

#speak('0')
player = audio.Audio(path="/sd/service-yuyin/{}.wav".format(0))
wav_info = player.play_process(wav_dev)
wav_dev.channel_config(wav_dev.CHANNEL_1,
                       I2S.TRANSMITTER,
                       resolution=I2S.RESOLUTION_16_BIT,
                       cycles=I2S.SCLK_CYCLES_32,
                       align_mode=I2S.RIGHT_JUSTIFYING_MODE)
wav_dev.set_sample_rate(wav_info[1])
wav_dev.set_sample_rate(44100)
player.volume(100)
while True:
    ret = player.play()
    if ret == None:
def unittest(data_path, temp_path):
    import image
    img = image.Image("unittest/data/shapes.ppm", copy_to_fb=True)
    circles = img.find_circles(threshold = 5000, x_margin = 30, y_margin = 30, r_margin = 30)
    return len(circles) >= 1
Beispiel #4
0
from fpioa_manager import *

import KPU as kpu

lcd.init()
lcd.rotation(2)

try:
    from pmu import axp192
    pmu = axp192()
    pmu.enablePMICSleepMode(True)
except:
    pass

try:
    img = image.Image("/sd/startup.jpg")
    lcd.display(img)
except:
    lcd.draw_string(lcd.width() // 2 - 100,
                    lcd.height() // 2 - 4, "Error: Cannot find start.jpg",
                    lcd.WHITE, lcd.RED)

task = kpu.load("/sd/e7148e972b5aaf9d_mbnet10_quant.kmodel")

labels = ["1", "2"]  #You can check the numbers here to real names.

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.run(1)
# Exercise 8.13.7

# Write a function to remove all the red from an image:
# http://interactivepython.org/runestone/static/thinkcspy/_static/LutherBellPic.jpg
# For this and the following exercises, use the luther.jpg photo.

import image

img = image.Image('luther.jpg')
win = image.ImageWin(img.getWidth(), img.getHeight())
img.draw(win)
img.setDelay(1, 15)

for row in range(img.getHeight()):
    for col in range(img.getWidth()):
        p = img.getPixel(col, row)
        g = p.getGreen()
        b = p.getBlue()
        newp = image.Pixel(0, g, b)
        img.setPixel(col, row, newp)

img.draw(win)
Beispiel #6
0
    def create(cls,
               repoPath,
               sdUUID,
               imgUUID,
               size,
               volFormat,
               preallocate,
               diskType,
               volUUID,
               desc,
               srcImgUUID,
               srcVolUUID,
               initialSize=None):
        """
        Create a new volume with given size or snapshot
            'size' - in sectors
            'volFormat' - volume format COW / RAW
            'preallocate' - Preallocate / Sparse
            'diskType' - enum (API.Image.DiskTypes)
            'srcImgUUID' - source image UUID
            'srcVolUUID' - source volume UUID
            'initialSize' - initial volume size in sectors,
                            in case of thin provisioning
        """
        dom = sdCache.produce(sdUUID)
        dom.validateCreateVolumeParams(volFormat,
                                       srcVolUUID,
                                       preallocate=preallocate)

        imgPath = image.Image(repoPath).create(sdUUID, imgUUID)

        volPath = os.path.join(imgPath, volUUID)
        volParent = None
        volType = sc.type2name(sc.LEAF_VOL)

        # Get the specific class name and class module to be used in the
        # Recovery tasks.
        clsModule, clsName = cls._getModuleAndClass()

        try:
            if srcVolUUID != sc.BLANK_UUID:
                # When the srcImgUUID isn't specified we assume it's the same
                # as the imgUUID
                if srcImgUUID == sc.BLANK_UUID:
                    srcImgUUID = imgUUID

                volParent = cls(repoPath, sdUUID, srcImgUUID, srcVolUUID)

                if not volParent.isLegal():
                    raise se.createIllegalVolumeSnapshotError(
                        volParent.volUUID)

                if imgUUID != srcImgUUID:
                    volParent.share(imgPath)
                    volParent = cls(repoPath, sdUUID, imgUUID, srcVolUUID)

                # Override the size with the size of the parent
                size = volParent.getSize()

        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception as e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCannotGetParent(
                "Couldn't get parent %s for volume %s: %s" %
                (srcVolUUID, volUUID, e))

        try:
            cls.log.info("Creating volume %s", volUUID)

            # Rollback sentinel to mark the start of the task
            vars.task.pushRecovery(
                task.Recovery(task.ROLLBACK_SENTINEL, clsModule, clsName,
                              "startCreateVolumeRollback",
                              [sdUUID, imgUUID, volUUID]))

            # Create volume rollback
            vars.task.pushRecovery(
                task.Recovery("Halfbaked volume rollback", clsModule, clsName,
                              "halfbakedVolumeRollback",
                              [sdUUID, volUUID, volPath]))

            # Specific volume creation (block, file, etc...)
            try:
                metaId = cls._create(dom,
                                     imgUUID,
                                     volUUID,
                                     size,
                                     volFormat,
                                     preallocate,
                                     volParent,
                                     srcImgUUID,
                                     srcVolUUID,
                                     volPath,
                                     initialSize=initialSize)
            except (se.VolumeAlreadyExists, se.CannotCreateLogicalVolume,
                    se.VolumeCreationError, se.InvalidParameterException) as e:
                cls.log.error("Failed to create volume %s: %s", volPath, e)
                vars.task.popRecovery()
                raise
            # When the volume format is raw what the guest sees is the apparent
            # size of the file/device therefore if the requested size doesn't
            # match the apparent size (eg: physical extent granularity in LVM)
            # we need to update the size value so that the metadata reflects
            # the correct state.
            if volFormat == sc.RAW_FORMAT:
                apparentSize = int(
                    dom.getVSize(imgUUID, volUUID) / sc.BLOCK_SIZE)
                if apparentSize < size:
                    cls.log.error(
                        "The volume %s apparent size %s is smaller "
                        "than the requested size %s", volUUID, apparentSize,
                        size)
                    raise se.VolumeCreationError()
                if apparentSize > size:
                    cls.log.info(
                        "The requested size for volume %s doesn't "
                        "match the granularity on domain %s, "
                        "updating the volume size from %s to %s", volUUID,
                        sdUUID, size, apparentSize)
                    size = apparentSize

            vars.task.pushRecovery(
                task.Recovery("Create volume metadata rollback", clsModule,
                              clsName, "createVolumeMetadataRollback",
                              map(str, metaId)))

            cls.newMetadata(metaId, sdUUID, imgUUID, srcVolUUID, size,
                            sc.type2name(volFormat), sc.type2name(preallocate),
                            volType, diskType, desc, sc.LEGAL_VOL)

            if dom.hasVolumeLeases():
                cls.newVolumeLease(metaId, sdUUID, volUUID)

        except se.StorageException:
            cls.log.error("Unexpected error", exc_info=True)
            raise
        except Exception as e:
            cls.log.error("Unexpected error", exc_info=True)
            raise se.VolumeCreationError("Volume creation %s failed: %s" %
                                         (volUUID, e))

        # Remove the rollback for the halfbaked volume
        vars.task.replaceRecoveries(
            task.Recovery("Create volume rollback", clsModule, clsName,
                          "createVolumeRollback",
                          [repoPath, sdUUID, imgUUID, volUUID, imgPath]))

        return volUUID
Beispiel #7
0
# CorgiDude GPIO | From AiDude.io, aiiotshop.com/p/58
import image, lcd, time
from Dude import dude,PORT

lcd.init()
lcd.rotation(1)

dude.BeginADC(PORT.INPUT2)
dude.BeginAHT(PORT.INPUT2)

while(True):
    img = image.Image(size=(240,240))
    #read sensor
    adc = dude.AnalogRead(PORT.INPUT2,2)
    temp,humid = dude.ReadAHT(PORT.INPUT2)

    img.draw_rectangle(0,0,240,240, fill=True, color=(int(adc/3.5*255),0,0))
    img.draw_string(5,5, "T=%2.2f" % temp,color=(0,255,0),scale=3)
    img.draw_string(5,35, "H=%2.2f" % humid,color=(0,255,0),scale=3)
    img.draw_string(5,65, "ADC=%2.2f" % adc,color=(0,255,0),scale=3)

    '''
    if dude.DigitalRead(PORT.INPUT1,4) == 1:
        img.draw_rectangle(0,0,240,240, fill=True, color=(0,0,255))
    '''
    lcd.display(img)
    time.sleep(0.1)
Beispiel #8
0
key_gpio.irq(set_key_state, GPIO.IRQ_RISING, GPIO.WAKEUP_NOT_SUPPORT)

lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_hmirror(1)
sensor.set_vflip(1)
sensor.run(1)
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)  # anchor for face detect
dst_point = [(44, 59), (84, 59), (64, 82), (47, 105),
             (81, 105)]  # standard face key point position
a = kpu.init_yolo2(task_fd, 0.5, 0.3, 5, anchor)
img_lcd = image.Image()
img_face = image.Image(size=(128, 128))
a = img_face.pix_to_ai()
record_ftr = []
record_ftrs = []
names = [
    'Mr.1', 'Mr.2', 'Mr.3', 'Mr.4', 'Mr.5', 'Mr.6', 'Mr.7', 'Mr.8', 'Mr.9',
    'Mr.10'
]

ACCURACY = 85

while (1):
    img = sensor.snapshot()
    clock.tick()
    code = kpu.run_yolo2(task_fd, img)
Beispiel #9
0
 def blank_draw():
     if ui.enable:
         ui.canvas = image.Image(size=(ui.height, ui.weight)
                             )  # 10ms # 168.75kb (112kb)
Beispiel #10
0
def unittest(data_path, temp_path):
    import image
    img = image.Image("unittest/data/datamatrix.pgm", copy_to_fb=True)
    matrices = img.find_datamatrices()
    return len(matrices) == 1 and matrices[0][0:] == (34, 15, 90, 89, 'https://openmv.io/', 0.0, 18, 18, 18, 0)
Beispiel #11
0
def face_recog(pic_name, vi_ip):
    print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()
    snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse()
    d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
    pyb.LED(2).on()
    pyb.LED(3).on()
    name_lbp_list = []
    uos.chdir("/CamFaces")
    for filename in uos.listdir("/CamFaces"):
        if filename.endswith(".pgm"):
            try:
                img = None
                img = image.Image(filename, copy_to_fb=True).mask_ellipse()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                dist = image.match_descriptor(d0, d1, 50)
                sensor.dealloc_extra_fb()
                pname = filename
                und_loc = pname.index('_')
                pname = pname[0:(und_loc)]
                name_lbp_list.append(pname)
                name_lbp_list.append(dist)
                continue
            except Exception as e:
                print(e)
                print("error producing LBP value")
        else:
            print("file found that is not of type pgm")
    print(name_lbp_list)
    gc.collect()
    end = 0
    name_avg = []
    i = 0
    start = 0
    while i < len(name_lbp_list):
        if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                               name_lbp_list[i + 2]):
            end = i + 2
            face = []
            face = name_lbp_list[start:end]
            print(face)
            j = 1
            sum_lbp = 0
            while j < len(face):
                sum_lbp += face[j]
                j += 2
            name_avg.append(face[0])
            name_avg.append(sum_lbp / (len(face) / 2))
            start = i + 2
        i += 2
    face = []
    face = name_lbp_list[(end):(len(name_lbp_list))]
    print(face)
    gc.collect()
    j = 1
    sum_lbp = 0
    while j < len(face):
        sum_lbp += face[j]
        j += 2
    name_avg.append(face[0])
    name_avg.append(sum_lbp / (len(face) / 2))
    print(name_avg)
    lbps = []
    k = 1
    while k < len(name_avg):
        lbps.append(name_avg[k])
        k += 2
    print(lbps)
    gc.collect()
    min_lbp = min(lbps)
    print(min_lbp)
    ind = lbps.index(min(lbps))
    ind += 1
    found_person = name_avg[2 * ind - 2]
    id_name = "The person you are looking at is: " + found_person
    print(id_name)
    uos.remove("/snapshot-person.pgm")
    pyb.LED(2).off()
    pyb.LED(3).off()
    chost = vi_ip
    cport = 8080
    client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
    client.connect((chost, cport))
    print("connected to visually impaired user's smartphone")
    to_send = id_name + "\n"
    client.send(to_send.encode())
    print("sent name to phone")
    client.close()
    gc.collect()
    return
Beispiel #12
0
#-------------------------------------------------------------------------------
# main body
#-------------------------------------------------------------------------------

print('Listing all templates...')
tpl_dname = '../../results/template_trans/'
tpl_fnames = [f for f in listdir(tpl_dname) if isfile(join(tpl_dname, f))]
tpl_fnames.sort()

print('Creating a template object array...')
tpls = [template.Template(tpl_dname + f) for f in tpl_fnames]

print('Reading the image...')
img_fname = '../../outputs/4_flipped/EUD-visit2-r1.jpg'
img = image.Image(img_fname)

print('Performing template matching...')
rets = [img.find_match(t) for t in tpls]
nccs = [ret['ncc'] for ret in rets]
locs = [ret['loc'] for ret in rets]

max_score = max(nccs)
index = [i for i, s in enumerate(nccs) if s == max_score]
index = index[0]
print('Max matching score (ncc) is %0.4f' % max_score)

print('Showing best matching...')
tpl = tpls[index]
ncc = nccs[index]
loc = locs[index]
Beispiel #13
0
lcd.init()  # 初始化lcd
lcd.rotation(2)
sensor.reset()  #初始化sensor 摄像头
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_hmirror(1)  #设置摄像头镜像
sensor.set_vflip(1)  #设置摄像头翻转
sensor.run(1)  #使能摄像头
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)  #anchor for face detect 用于人脸检测的Anchor
dst_point = [
    (44, 59), (84, 59), (64, 82), (47, 105), (81, 105)
]  #standard face key point position 标准正脸的5关键点坐标 分别为 左眼 右眼 鼻子 左嘴角 右嘴角
a = kpu.init_yolo2(task_fd, 0.5, 0.3, 5, anchor)  #初始化人脸检测模型
img_lcd = image.Image()  # 设置显示buf
img_face = image.Image(size=(128, 128))  #设置 128 * 128 人脸图片buf
a = img_face.pix_to_ai()  # 将图片转为kpu接受的格式

record_ftr = []  #空列表 用于存储当前196维特征
record_ftrs = []  #空列表 用于存储按键记录下人脸特征,可以将特征以txt等文件形式保存到sd卡后,读取到此列表,即可实现人脸断电存储。
record_names = []  #空列表 用于存储按键记录下人脸名字,顺序与record_ftrs相一致。
#names = ['Mr.1', 'Mr.2', 'Mr.3', 'Mr.4', 'Mr.5', 'Mr.6', 'Mr.7', 'Mr.8', 'Mr.9' , 'Mr.10'] # 人名标签,与上面列表特征值一一对应。
if "names.txt" in os.listdir():
    with open("names.txt", 'r') as f:
        record_names = f.read().splitlines()
        print(record_names)

if "ftrs.txt" in os.listdir():
    with open("ftrs.txt", 'r') as f:
        record_ftrs = f.read().split('\n|||||\n')
Beispiel #14
0
import time, sensor, image
from image import SEARCH_EX, SEARCH_DS
from pyb import UART


sensor.reset()

sensor.set_contrast(1)
sensor.set_gainceiling(16)

sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

template1 = image.Image("LorR1left.pgm")
template2 = image.Image("LorR2right.pgm")#camera rotate 90 degrees clockwise


uart = UART(3, 9600)

clock = time.clock()


#Run template matching
while (True):
    clock.tick()

    inp = 1
    #inp = uart.read()

    if inp:
Beispiel #15
0
import sys
import random

from fpioa_manager import fm
from machine import UART
fm.register(35, fm.fpioa.UART2_TX, force=True)
fm.register(34, fm.fpioa.UART2_RX, force=True)

uart_Port = UART(UART.UART2, 115200, 8, 0, 0, timeout=1000, read_buf_len=4096)
data_packet = bytearray([0x00, 0x00, 0x00, 0x00, 0x00])

lcd.init()
lcd.rotation(2)  #Rotate the lcd 180deg

try:
    img = image.Image("/flash/startup.jpg")
    lcd.display(img)
except:
    lcd.draw_string(lcd.width() // 2 - 100,
                    lcd.height() // 2 - 4, "Error: Cannot find start.jpg",
                    lcd.WHITE, lcd.RED)

from Maix import I2S, GPIO
import audio
from Maix import GPIO
from fpioa_manager import *

fm.register(board_info.SPK_SD, fm.fpioa.GPIO0)
spk_sd = GPIO(GPIO.GPIO0, GPIO.OUT)
spk_sd.value(1)  #Enable the SPK output
Beispiel #16
0
 def bg_draw():
     if ui.enable:
         if ui.bak == None:
             ui.bak = image.Image(ui.bg_path)  # 90ms
         ui.canvas.draw_image(ui.bak, 0, 0)  # 20ms
Beispiel #17
0
'''
实验名称:LCD
版本: v1.0
日期: 2019.12
作者: 01Studio
说明:编程实现LCD显示信息。需要将01Studio.bmp文件发送到开发板。
'''

import lcd, image, utime

lcd.init()  #初始化LCD
lcd.clear(lcd.WHITE)  #清屏白色

#显示字符
lcd.draw_string(110, 120, "Hello 01Studio!", lcd.BLACK, lcd.WHITE)  #显示字符

utime.sleep(2)  #延时2秒

#显示图像,记得先将01Studio.bmp文件发送到开发板
lcd.rotation(1)  #由于图像默认是240*320,因此顺时钟旋转90°。
lcd.display(image.Image("01Studio.bmp"))
# This example shows off how to draw images in the frame buffer with a custom generated color palette.

import sensor, image, time, pyb

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)  # or GRAYSCALE...
sensor.set_framesize(sensor.QQVGA)  # or QQVGA...
sensor.skip_frames(time=2000)
clock = time.clock()

# the color palette is actually an image, this allows you to use image ops to create palettes
# the image must have 256 entries i.e. 256x1, 64x4, 16x16 and have the format rgb565

# Initialise palette source colors into an image
palette_source_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255)]
palette_source_color_image = image.Image(len(palette_source_colors), 1,
                                         sensor.RGB565)
for i, color in enumerate(palette_source_colors):
    palette_source_color_image[i] = color

# Scale the image to palette width and smooth them
palette = image.Image(256, 1, sensor.RGB565)
palette.draw_image(palette_source_color_image,
                   0,
                   0,
                   x_scale=palette.width() /
                   palette_source_color_image.width())
palette.mean(int(palette.width() / palette_source_color_image.width() / 2))

while (True):
    clock.tick()
Beispiel #19
0
def speaker(pin, melody, noteDurations):
    if melody == 0:
        tim = Timer(Timer.TIMER2, Timer.CHANNEL0, mode=Timer.MODE_PWM)
        tim.stop()
        noteDuration = int(noteDurations * 1000)
        time.sleep_ms(noteDuration)
        return

    listmelody = [
        0,
        131,
        147,
        165,
        175,
        196,
        220,
        247,  #3, 1 - 7
        262,
        294,
        330,
        349,
        392,
        440,
        494,  #4, 8 - 14
        523,
        587,
        659,
        698,
        784,
        880,
        988,  #5, 15 - 21
        139,  # C#3, 22
        156,  # Eb3, 23
        185,  # F#3, 24
        208,  # G#3, 25
        233,  # Bb3, 26
        277,  # C#4, 27
        311,  # Eb4, 28
        370,  # F#4, 29
        415,  # G#4, 30
        466,  # Bb4, 31
        555,  # C#5, 32
        622,  # Eb5, 33
        740,  # F#5, 34
        831,  # G#5, 35
        932,  # Bb5, 36
        1047,
        1175,
        1319,
        1397,
        1568,
        1760,
        1976,  #6, 37 - 43
        2093,
        2349,
        2637,
        2794,
        3136,
        3520,
        3951,  #7, 44 - 50
        1109,  # C#6, 52
        1245,  # Eb6, 53
        1480,  # F#6, 54
        1661,  # G#6, 55
        1865,  # Bb6, 56
        2217,  # C#7, 57
        2489,  # Eb6, 58
        2960,  # F#6, 59
        3322,  # G#6, 60
        3729  # Bb6, 61
    ]

    listnoteDurations = noteDurations
    # to calculate the note duration, take one second
    # divided by the note type.
    #e.g. quarter note = 1000 / 4, eighth note = 1000/8, etc.
    noteDuration = int(listnoteDurations * 1000)
    lcd.display(image.Image('music.jpg'))
    #lcd.draw_string(5, 15, 'Note is playing', lcd.RED, lcd.WHITE)
    tone(pin, listmelody[melody], noteDuration)
    lcd.display(image.Image('logo.jpg'))
Beispiel #20
0
# Copy image to framebuffer.
#
# This example shows how to load and copy an image to framebuffer for testing.

import sensor, image

# Still need to init sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)

# Set sensor pixel format
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Load image
img = image.Image("/image.pgm")

# Copy image to framebuffer
img.copy_to_fb()

# Update drawing
sensor.snapshot()
Beispiel #21
0
        buff = (data[i * 2] << 8 | data[i * 2 + 1])
        if buff > 32768:
            buff -= 65536

        if i < 3:
            MPU6886_DATA[i] = buff * 8.0 / 32768.0
        elif i == 3:
            MPU6886_DATA[i] = buff / 326.8 + 25.0
        else:
            MPU6886_DATA[i] = buff * 1000.0 / 32768.0


MPU6886_init()

# Init graph
graph = image.Image()
graph.draw_rectangle(20, 15, 200, 100, (150, 150, 150), 1, False)
graph.draw_line(10, 65, 230, 65, (100, 100, 100))


def write_graph(graph,
                data,
                graph_x_origin=20,
                graph_y_origin=115,
                x_offset=0,
                y_offset=0,
                color=(30, 111, 150)):
    for t in range(len(data)):
        graph.set_pixel(t + graph_x_origin + x_offset,
                        graph_y_origin - data[t] - y_offset, color)
Beispiel #22
0
import network, socket, time, utime, sensor, image, lcd, os
from machine import UART
from Maix import GPIO
from maix_motor import Maix_motor
from fpioa_manager import fm
import ujson

lcd.display(image.Image('logo.jpg'))
msg = 'Open Code&Robots APP, enter your WiFi credentials and scan the resulting QR code with MARK camera'
num_rows = len(msg) // 28
for i in range(num_rows + 3):
    lcd.draw_string(5, i * 15, msg[i * 28:i * 28 + 28], lcd.RED, lcd.WHITE)
time.sleep(2)

########## config ################
WIFI_SSID = 0
WIFI_PASSWD = 0
server_ip = 0
server_port = 3456
pan_angle = 90
tilt_angle = 90
bullet = 90

fm.register(25, fm.fpioa.GPIOHS25)  #cs
fm.register(8, fm.fpioa.GPIOHS8)  #rst
fm.register(9, fm.fpioa.GPIOHS9)  #rdy
print("Use hardware SPI for other maixduino")
fm.register(28, fm.fpioa.SPI1_D0, force=True)  #mosi
fm.register(26, fm.fpioa.SPI1_D1, force=True)  #miso
fm.register(27, fm.fpioa.SPI1_SCLK, force=True)  #sclk
##################################
Beispiel #23
0
# Reset sensor
sensor.reset()

# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
# Max resolution for template matching with SEARCH_EX is QQVGA
sensor.set_framesize(sensor.QQVGA)
# You can set windowing to reduce the search image.
#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60))
sensor.set_pixformat(sensor.GRAYSCALE)

# Load template.
# Template should be a small (eg. 32x32 pixels) grayscale image.
pin = image.Image("/template_pin.pgm")
hole = image.Image("/template_hole.pgm")

clock = time.clock()
#initialisation de la variable qui dit vaut 1 pour un trou, 2 pour une épingle et 0 si on ne sait pas
voir = 0

# Run template matching
while (True):
    clock.tick()
    img = sensor.snapshot()

    # find_template(template, threshold, [roi, step, search])
    # ROI: The region of interest tuple (x, y, w, h).
    # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster.
    # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search
Beispiel #24
0
            oldpixel1 = oldimage.getPixel(col, row)
            oldpixel2 = oldimage.getPixel(col + 1, row)
            oldpixel3 = oldimage.getPixel(col, row + 1)
            oldpixel4 = oldimage.getPixel(col + 1, row + 1)

            newred = int((oldpixel1.getRed() + oldpixel2.getRed() +
                          oldpixel3.getRed() + oldpixel4.getRed()) / 4)
            newgreen = int((oldpixel1.getGreen() + oldpixel2.getGreen() +
                            oldpixel3.getGreen() + oldpixel4.getGreen()) / 4)
            newblue = int((oldpixel1.getBlue() + oldpixel2.getBlue() +
                           oldpixel3.getBlue() + oldpixel4.getBlue()) / 4)

            newpixel = image.Pixel(newred, newgreen, newblue)
            oldimage.setPixel(col, row, newpixel)
            oldimage.setPixel(col + 1, row, newpixel)
            oldimage.setPixel(col, row + 1, newpixel)
            oldimage.setPixel(col + 1, row + 1, newpixel)

    print("here")
    return oldimage


img = image.Image("tiger.jpg")
smoothed = smooth(img)
print(smoothed)
win = image.ImageWin(smoothed.getWidth(), smoothed.getHeight())

smoothed.draw(win)
win.exitonclick()
# img.setDelay(1,15)   # setDelay(0) turns off animation
Beispiel #25
0
sensor.run(1)

# Init button
fpioa = FPIOA()
fpioa.set_function(board_info.PIN15, fm.fpioa.GPIOHS0)
pin = fpioa.get_Pin_num(fm.fpioa.GPIOHS0)
pin = GPIO(GPIO.GPIOHS0, GPIO.IN)

# Init touchscreen
i2c = I2C(I2C.I2C0, freq=400000, scl=30, sda=31)
ts.init(i2c)

# Load model
task = kpu.load(kmodel_path)

img = image.Image()
img_icon = image.Image()

# ===== Configure trash classification specific parameters =====

# Class label vector
# Last entry is the label for values below the threshold -> unknown
class_labels = [
    'Pappe', 'Glas', 'Metall', 'Papier', 'Kunststoffflasche', 'Kunststoff',
    'Restmuell'
]

# ===== Main program loop =====
first_cycle = True
pause = False
Beispiel #26
0
            img = None
            pmin = 999999
            num = 0

            def min(pmin, a, s):
                global num
                if a < pmin:
                    pmin = a
                    num = s
                return pmin

            for s in range(1, NUM_SUBJECTS + 1):
                dist = 0

                for i in range(2, NUM_SUBJECTS_IMGS + 1):
                    img = image.Image("face/s%d/%d.pgm" % (s, i))
                    d1 = img.find_lbp((0, 0, img.width(),
                                       img.height()))  #d1为第s文件夹中的第i张图片的lbp特征
                    dist += image.match_descriptor(
                        d0, d1)  #计算d0 d1即样本图像与被检测人脸的特征差异度。
                print("Average dist for subject %d: %d" %
                      (s, dist / NUM_SUBJECTS_IMGS))
                pmin = min(pmin, dist / NUM_SUBJECTS_IMGS,
                           s)  #特征差异度越小,被检测人脸与此样本更相似更匹配。
                print(pmin)
                i = 0
            s = 0
            if pmin > 100000:
                print(0)

            else:
Beispiel #27
0
import image
img=image.Image("luther.jpg")
newimg=image.EmptyImage(im.getWidth(),img.getHeight())
win=image.ImageWin()

for column in range(img.getWidth()):
	for row in range(img.getHeight()):
		p=img.getPixel(column,row)
		newpixel=image.Pixel(0,p.getGreen(),p.getBlue())
		img.setPixel(column,row,newpixel)
newimg.draw(win)
win.exitonclick()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)

hint = image.BICUBIC  # image.BILINEAR image.BICUBIC

# RGB channel extraction is done after scaling normally, this
# may produce false colors. Set this flag to do it before.
#
hint |= 0  # image.EXTRACT_RGB_CHANNEL_FIRST

# Color table application is done after scaling normally, this
# may produce false colors. Set this flag to do it before.
#
hint |= 0  # image.APPLY_COLOR_PALETTE_FIRST

small_img = image.Image(4, 4, sensor.RGB565)
small_img.set_pixel(0, 0, (0, 0, 127))
small_img.set_pixel(1, 0, (47, 255, 199))
small_img.set_pixel(2, 0, (0, 188, 255))
small_img.set_pixel(3, 0, (0, 0, 127))
small_img.set_pixel(0, 1, (0, 176, 255))
small_img.set_pixel(1, 1, (222, 0, 0))
small_img.set_pixel(2, 1, (50, 255, 195))
small_img.set_pixel(3, 1, (86, 255, 160))
small_img.set_pixel(0, 2, (255, 211, 0))
small_img.set_pixel(1, 2, (83, 255, 163))
small_img.set_pixel(2, 2, (255, 211, 0))
small_img.set_pixel(3, 2, (0, 80, 255))
small_img.set_pixel(0, 3, (255, 118, 0))
small_img.set_pixel(1, 3, (127, 0, 0))
small_img.set_pixel(2, 3, (0, 144, 255))
Beispiel #29
0
from Maix import FFT
from fpioa_manager import *
import image
import lcd
lcd.init()
fm.register(8, fm.fpioa.GPIO0)
wifi_en = GPIO(GPIO.GPIO0, GPIO.OUT)
wifi_en.value(0)
fm.register(20, fm.fpioa.I2S0_IN_D0)
fm.register(19, fm.fpioa.I2S0_WS)
fm.register(18, fm.fpioa.I2S0_SCLK)
rx = I2S(I2S.DEVICE_0)
rx.channel_config(rx.CHANNEL_0, rx.RECEIVER, align_mode=I2S.STANDARD_MODE)
sample_rate = 2048
rx.set_sample_rate(sample_rate)
img = image.Image()
sample_points = 512
FFT_points = 256
lcd_width = 320
lcd_height = 240
#hist_num = FFT_points #changeable
hist_num = 32  #changeable
if hist_num > 320:
    hist_num = 320
hist_width = int(320 / hist_num)  #changeable
x_shift = 0
while True:
    audio = rx.record(sample_points)
    FFT_res = FFT.run(audio.to_bytes(), FFT_points)
    FFT_amp = FFT.amplitude(FFT_res)
    img = img.clear()
Beispiel #30
0
# Signal Detect RED/GREEN and Arrow Detection.
# Using Template maching.
# Template Matching Example - Normalized Cross Correlation (NCC)
#
import sys
import time, sensor, image
import utime
from pyb import Pin, Timer, LED
from image import SEARCH_EX, SEARCH_DS

# template match
#tL = image.Image("/LA_64_64.pgm")
#tR = image.Image("/RA_64_64.pgm")
tL = image.Image("/LA_32_32_1.pgm")
tR = image.Image("/RA_32_32_1.pgm")
tU = image.Image("/UA_32_32.pgm")
#template = image.Image("/H.pgm")

# Color
# PWM OUT
tim = Timer(4, freq=1000)  # Frequency in Hz
tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=5)
thresholds = [
    (30, 100, 15, 127, 15, 127),  # generic_red_thresholds
    (30, 100, -64, -8, -32, 32)
]  # generic_green_thresholds
loopTime = 800
clock = time.clock()

#timled = Timer(2)
#timled.init(freq=2)         # trigger at 2Hz