Example #1
0
def test_color_new():
    assert Color(0, 0, 0) == (0.0, 0.0, 0.0)
    assert Color(1, 1, 1) == (1.0, 1.0, 1.0)
    assert Color(255, 255, 255) == (1.0, 1.0, 1.0)
    with pytest.raises(ValueError):
        Color()
Example #2
0
def captureOption():
    #Clear Terminal
    ClearTerminal()
    #Capture Option
    typew(
        "\nOptions: \t\tValue\n1)Add text\t\t{}\n2)Brightness\t\t{}\n3)Contrast\t\t{}\n4)Text size\t\t{}\n5)Text Color\n6)Text Background Color\n7)Image Effect\t\t{}\n8)Exposure Mode\t\t{}\n9)Reset\n10)Back\n\nSelect: "
        .format(camera.annotate_text, camera.brightness, camera.contrast,
                camera.annotate_text_size, camera.image_effect,
                camera.exposure_mode))
    choose = input()
    #Text
    if choose == "1":
        typew("Text: ")
        text = input()
        camera.annotate_text = text
        captureOption()
    #Brightness
    elif choose == "2":
        typew("\nBrightness({}): ".format(camera.brightness))
        brightness = int(input())
        if brightness >= 0 and brightness <= 100:
            camera.brightness = brightness
        else:
            typew("\nPlease input within 0-100\n")
        captureOption()
    #Contrast
    elif choose == "3":
        typew("\nContrast({}): ".format(camera.contrast))
        contrast = int(input())
        if contrast >= 0 and contrast <= 100:
            camera.contrast = contrast
        else:
            typew("\nPlease input within 0-100\n")
        captureOption()
    #Text size
    elif choose == "4":
        typew("\nText size({}): ".format(camera.annotate_text_size))
        size = int(input())
        if size >= 6 and size <= 160:
            camera.annotate_text_size = size
        else:
            typew("\nPlease input within 6-160\n")
        captureOption()
    #Text color
    elif choose == "5":
        typew("\nText color: ")
        colortype = input()
        colorselection = [
            "red", "green", "blue", "black", "white", "yellow", "purple",
            "pink", "orange", "brown", "magenta", "cyan", "grey"
        ]
        if colortype.lower() in colorselection:
            camera.annotate_foreground = Color(colortype.lower())
        else:
            typew("\nPlease type a valid color.\n")
        captureOption()
    #Text background color
    elif choose == "6":
        typew("\nText background color: ")
        colortype = input()
        colorselection = [
            "red", "green", "blue", "black", "white", "yellow", "purple",
            "pink", "orange", "brown", "magenta", "cyan", "grey"
        ]
        if colortype.lower() in colorselection:
            camera.annotate_background = Color(colortype.lower())
        else:
            typew("\nPlease type a valid color.\n")
        captureOption()
    #Image Effect
    elif choose == "7":
        typew(
            "\nEffect: none, negative, solarize, sketch, denoise, emboss, oilpaint, hatch, gpen, pastel, watercolor, film, blur, saturation, colorswap, washedout, posterise, colorpoint, colorbalance, cartoon, deinterlace1, and deinterlace2\n"
        )
        typew("\nImage Effect({}): ".format(camera.image_effect))
        effect = input()
        EffectType = [
            "none", "negative", "solarize", "sketch", "denoise", "emboss",
            "oilpaint", "hatch", "gpen", "pastel", "watercolor", "film",
            "blur", "saturation", "colorswap", "washedout", "posterise",
            "colorpoint", "colorbalance", "cartoon", "deinterlace1",
            "deinterlace2"
        ]
        if effect.lower() in EffectType:
            camera.image_effect = effect.lower()
        else:
            typew("\nPlease type a valid effect.\n")
        captureOption()
    #Exposure Mode
    elif choose == "8":
        typew(
            "\nExposure Type: off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach, verylong, fixedfps, antishake, and fireworks\n"
        )
        typew("Exposure Mode({}): ".format(camera.exposure_mode))
        mode = input()
        modeType = [
            "off", "auto", "night", "nightpreview", "backlight", "spotlight",
            "sports", "snow", "beach", "verylong", "fixedfps", "antishake",
            "fireworks"
        ]
        if mode.lower() in modeType:
            camera.exposure_mode = mode.lower()
        else:
            typew("\nPlease type a valid exposure mode.\n")
        captureOption()
    #Reset
    elif choose == "9":
        camera.annotate_text = ""
        camera.contrast = 0
        camera.brightness = 50
        camera.annotate_text_size = 32
        camera.annotate_foreground = Color("white")
        camera.annotate_background = None
        camera.image_effect = "none"
        camera.exposure_mode = "auto"
        typew("Done reset!!")
        captureOption()
    #Back
    elif choose == "10":
        Start()
    #Invalid option
    else:
        typew("Please type a valid selection")
        captureOption()
Example #3
0
#pi_cam
from picamera import PiCamera, Color
from time import sleep

camera = PiCamera()

#settings
camera.rotation = 180
camera.annotate_text = "Wanted for flying under the influence"
camera.annotate_text_size = 150
camera.annotate_background = Color('white')
camera.annotate_background = Color('red')
camera.image_effect = 'posterise'

camera.start_preview(alpha=200)
sleep(5)
camera.capture('/home/pi/Desktop/test1.jpg')
camera.stop_preview()
Example #4
0
from picamera import PiCamera, Color #get these obects in here
from time import sleep #and this one

camera = PiCamera() #make a camera object called 'camera'
num = 0 #this is a counter
num2 = 0 #this too
effectsList = ['sketch', 'posterise', 'gpen', 'cartoon', 'negative', 'solarize', 'pastel', 'none', 'denoise', 'emboss', 'oilpaint', 'hatch', 'watercolor', 'film', 'blur', 'saturation', 'colorswap', 'washedout', 'colorpoint', 'colorbalance', 'deinterlace1', 'deinterlace2']
#this long boi contains all the effects we're gonna rotate through

while(num < 22): #do this 22 times (we used a while loop because for loops are really gross in Python)
    camera.start_preview() #open the camera window
    camera.image_effect = effectsList[num] #set the effect to element 'num' of the effects list (first time is 'sketch', then 'posterise', etc.)
    camera.annotate_text_size = 55 #set text size
    camera.annotate_background = Color('Blue') #set background color
    camera.annotate_text = "This is effect " + effectsList[num] #write some text that says what affect we're using
    sleep(5) #pause for 5 seconds
    if(num2 < 5): #for the first 5 iterations,
        camera.capture('/home/pi/Desktop/' + effectsList[num] + '.jpg') #take a photo at this location with filename of effect type
        num2 = num2 + 1 #increment this counter
    num = num + 1 #increment this counter
camera.stop_preview() #close the camera window
from picamera import PiCamera, Color
from time import sleep

camera = PiCamera()

#Image can be rotated by 90, 180, 270 degrees, or 0 to reset
camera.rotation = 180

#Preview only works if Pi is connected to a monitor
#An alpha level can be passed to start_preview to alter the transparency of the preview (0-255)
#camera.start_preview(alpha = 200)
camera.start_preview()
"""
The annotation text's background and foreground color can be
altered using the Color library from picamera
"""
camera.annotate_background = Color("blue")
camera.annotate_foreground = Color("yellow")
#Text can be added to an image using annotate_text
camera.annotate_text = "Hello world!"

#Sleep for at least 2 seconds to allow the sensor to set its light levels
sleep(5)

#The path passed to capture is where the picture is saved
camera.capture("/home/pi/Desktop/text.jpg")

camera.stop_preview()
from picamera import PiCamera, Color
from time import sleep

demoCamera = PiCamera()

demoCamera.start_preview()  #打开摄像头预览
demoCamera.annotate_background = Color('white')
demoCamera.annotate_foreground = Color('red')
demoCamera.resolution = (480, 320)  #设置摄像头的分辨率
demoCamera.framate = 60  #设定摄像头的帧率
demoCamera.annotate_text = " SWS3009B - 2018"  #在图像上方显示一段文字
sleep(5)  #休息5秒
demoCamera.capture('/home/pi/Desktop/classPhoto.jpg')  #拍下一张照片
demoCamera.stop_preview()  #关闭摄像头预览
# min (64, 64), max (2592, 1944)
# camera.resolution = (2592, 1944)

# max 15
# camera.framerate = 15

# min 0, max 100
camera.brightness = 70

# min 0, max 100
camera.contrast = 50

# min 6, max 160
camera.annotate_text_size = 32
camera.annotate_background = Color('black')
camera.annotate_foreground = Color('white')

while True:
    curtime = datetime.now()
    outstring = curtime.strftime('%Y/%m/%d %H:%M:%S')
    filename = "/home/adrian/go/src/adrian/upstairs-" + curtime.strftime(
        '%Y%m%d-%H:%M:%S') + ".jpg"
    camera.annotate_text = outstring

    # camera.start_preview()
    # sleep(1)
    camera.capture(filename)
    # camera.stop_preview()
    sleep(120)
Example #8
0
from picamera import PiCamera, Color
from time import sleep
import datetime as dt
import numpy as np
import os

from flask import request
from flask import Flask, url_for

camera = PiCamera()
camera.rotation = 180
camera.annotate_text_size = 92
camera.annotate_foreground = Color(r=255, g=255, b=255)
camera.annotate_background = Color(r=0, g=0, b=0)


def take_picture(name='image.jpg'):
    #path='/home/pi/Desktop/'
    unique_id = ''.join([str(np.random.randint(10)) for i in range(20)])
    path = "static/" + unique_id
    camera.annotate_text = "Facebook Hackathon " + dt.datetime.now().strftime(
        '%Y-%m-%d %H:%M:%S')

    camera.start_preview()
    sleep(1)
    camera.capture(path + name)
    camera.stop_preview()

    return path + name

Example #9
0
from picamera import PiCamera, Color
from time import sleep
import time
import datetime

camera = PiCamera()
# camera.resolution = (2592, 1944)
# camera.framerate = 15
# camera.rotation = 180
camera.annotate_text_size = 30
camera.annotate_foreground = Color('white')
camera.start_preview()
for i in range(2):
    sleep(2)
    camera.annotate_text = (time.strftime('%H:%M:%S', time.localtime()) +
                            " (" + datetime.date.today().isoformat() + ")")
    camera.capture('/home/pi/Desktop/image%s.jpg' % i)
camera.stop_preview()
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    cameraW = 640
    cameraH = 480

    frameTime = time.time() * 1000

    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
        sock.connect((targetHost, targetPort))

        with picamera.PiCamera(resolution=(cameraW, cameraH),
                               framerate=30) as camera:
            camera.start_preview(alpha=255)
            camera.annotate_foreground = Color('black')
            camera.annotate_background = Color('white')
            try:
                stream = io.BytesIO()
                for _ in camera.capture_continuous(stream,
                                                   format='jpeg',
                                                   use_video_port=True):
                    stream.seek(0)
                    image = Image.open(stream).convert('RGB').resize(
                        (width, height), Image.ANTIALIAS)
                    start_time = time.time()
                    results = classify_image(interpreter, image)
                    elapsed_ms = (time.time() - start_time) * 1000
                    stream.seek(0)
                    stream.truncate()

                    msg = ""
                    for i in range(10):
                        label = labels[results[1][i] + 1]
                        prob = clamp(0, results[2][i], 1)
                        top = clamp(0, results[0][i][0], 1)
                        left = clamp(0, results[0][i][1], 1)
                        bottom = clamp(0, results[0][i][2], 1)
                        right = clamp(0, results[0][i][3], 1)
                        msg += (
                            "{0:20} {1:3.1f}% {2:3.3f} {3:3.3f} {4:3.3f} {5:3.3f} {6: 5.1f}ms\n"
                            .format(label, prob * 100, top, left, bottom,
                                    right, elapsed_ms))
                    msg += (
                        "--------------------------------------------------\n")
                    sock.sendall(bytes(msg + "\n", "utf-8"))

                    #pdb.set_trace()

                    bestIdx = np.argmax(results[2])
                    label = labels[results[1][bestIdx] + 1]
                    prob = clamp(0, results[2][bestIdx], 1)
                    top = clamp(0, results[0][bestIdx][0], 1)
                    left = clamp(0, results[0][bestIdx][1], 1)
                    bottom = clamp(0, results[0][bestIdx][2], 1)
                    right = clamp(0, results[0][bestIdx][3], 1)
                    camera.annotate_text = '%s (%.1f%%)\n%.1fms' % (
                        label, prob * 100, elapsed_ms)
            finally:
                camera.stop_preview()
Example #11
0
def main():
    ##################################################
    # Initialise LCD
    ## LCD = LCD_1in8.LCD()
    LCD = ST7735()
    ## Lcd_ScanDir = LCD_1in8.SCAN_DIR_DFT
    ## LCD.LCD_Init(Lcd_ScanDir)
    LCD.begin()
    ## screenbuf = Image.new("RGB", (LCD.LCD_Dis_Column, LCD.LCD_Dis_Page), "WHITE")
    screenbuf = Image.new("RGB", (DISPLAY_WIDTH, DISPLAY_HEIGHT), "WHITE")
    draw = ImageDraw.Draw(screenbuf)
    draw.text((33, 22), 'Initialising...', fill="BLUE", font=FONT_LARGE)
    ## LCD.LCD_PageImage(screenbuf)
    LCD.display(screenbuf)

    ##################################################

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    cameraW = 640
    cameraH = 480

    frameTime = time.time() * 1000

    with picamera.PiCamera(resolution=(cameraW, cameraH),
                           framerate=30) as camera:
        camera.start_preview(alpha=255)
        camera.annotate_foreground = Color('black')
        camera.annotate_background = Color('white')
        try:
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (width, height), Image.ANTIALIAS)
                start_time = time.process_time()
                results = classify_image(interpreter, image)
                elapsed_ms = (time.process_time() - start_time) * 1000
                stream.seek(0)
                stream.truncate()

                # paste camera captured image into screen buffer
                screenbuf.paste(image.resize((DISPLAY_WIDTH, DISPLAY_HEIGHT)))

                availColours = [
                    'salmon', 'olive', 'orange', 'purple', 'aqua', 'darkgray',
                    'yellow', 'sienna', 'red', 'blue', 'green'
                ]
                usedColours = {}
                msg = ""
                for i in range(10):
                    label = labels[results[1][i] + 1]
                    prob = clamp(0, results[2][i], 1)
                    if prob >= 0.5:
                        if label in usedColours:
                            colour = usedColours[label]
                        elif availColours == []:
                            colour = 'salmon'
                        else:
                            colour = availColours.pop()
                            usedColours[label] = colour

                        top = clamp(0, results[0][i][0], 1)
                        left = clamp(0, results[0][i][1], 1)
                        bottom = clamp(0, results[0][i][2], 1)
                        right = clamp(0, results[0][i][3], 1)
                        # draw bounding box
                        draw.rectangle(
                            [(left * DISPLAY_WIDTH, top * DISPLAY_HEIGHT),
                             (right * DISPLAY_WIDTH, bottom * DISPLAY_HEIGHT)],
                            outline=colour)
                        desc = ("{0} {1:3.1f}%".format(label, prob * 100))
                        txtw, txth = FONT_SMALL.getsize(desc)
                        # draw label rectangle
                        draw.rectangle(
                            [(left * DISPLAY_WIDTH, top * DISPLAY_HEIGHT),
                             (left * DISPLAY_WIDTH + txtw,
                              top * DISPLAY_HEIGHT + txth)],
                            fill=colour)
                        # draw label
                        draw.text((left * DISPLAY_WIDTH, top * DISPLAY_HEIGHT),
                                  desc,
                                  fill="WHITE",
                                  font=FONT_SMALL)

                        # record info for log
                        msg += (
                            "{0:20} {1:3.1f}% {2:3.3f} {3:3.3f} {4:3.3f} {5:3.3f} {6: 5.1f}ms\n"
                            .format(label, prob * 100, top, left, bottom,
                                    right, elapsed_ms))

                elapsed_txt = "{0: 5.1f}ms".format(elapsed_ms)
                txtw, txth = FONT_SMALL.getsize(elapsed_txt)
                draw.text((DISPLAY_WIDTH - txtw, 0),
                          elapsed_txt,
                          fill="WHITE",
                          font=FONT_SMALL)

                LCD.display(screenbuf)
                msg += ("--------------------------------------------------\n")
                print(msg)

                #pdb.set_trace()

                bestIdx = np.argmax(results[2])
                label = labels[results[1][bestIdx] + 1]
                prob = clamp(0, results[2][bestIdx], 1)
                top = clamp(0, results[0][bestIdx][0], 1)
                left = clamp(0, results[0][bestIdx][1], 1)
                bottom = clamp(0, results[0][bestIdx][2], 1)
                right = clamp(0, results[0][bestIdx][3], 1)
                # camera.annotate_text = '%s (%.1f%%)\n%.1fms' % (label, prob*100, elapsed_ms)
        finally:
            camera.stop_preview()
            LCD.LCD_Clear()
Example #12
0
display_text = [
    'Pwr: {}'.format(0), 'Cad: {}'.format(0.0), 'Dist: {}'.format(0.0),
    'H/r: {}'.format(0)
]
text_height = 20
text_font = ImageFont.truetype(
    '/usr/share/fonts/truetype/freefont/FreeSans.ttf', text_height)
for i in range(len(display_text)):
    draw.text((10, 10 + text_height * i),
              display_text[i],
              font=text_font,
              fill='black')

# Add the image to the preview overlay
overlay = camera.add_overlay(img.tostring(), format='rgba', size=img.size)
overlay.layer = 3
overlay.fullscreen = True
"""
Text display for time. "annotate_text" is used instead of "draw.text" because by default,
(1) the text generated by "annotate_text" is centrally aligned in the middle of the screen,
and (2) the text is saved toghether with the recorded video while "draw.text" will not be saved.
"""
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.annotate_text_size = 26
camera.annotate_foreground = Color('black')

# Update the time after 1 second
while True:
    sleep(1)
    camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Example #13
0
from picamera import PiCamera, Color
from datetime import datetime

camera = PiCamera()
# take a series of shots without delay
#for i in range(3):
filename = datetime.today()
#filename2="{0:%Y}-{0:%m}-{0:%d}".format(now)
#camera.capture( '/root/Pictures/Example1/{0:%Y}-{0:%m}-{0:%d}'.format(now).jpg)
print(filename)
camera.annotate_background = Color("teal")
camera.annotate_foreground = Color("lightpink")
camera.annotate_text = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.annotate_text_size = 55
camera.capture('/root/Pictures/Example1/PhotoWithDataToday.jpg')
Example #14
0
def test_color_new_rgb():
    assert Color(r=1, g=0, b=0) == (1.0, 0.0, 0.0)
    assert Color(r=255, g=255, b=255) == (1.0, 1.0, 1.0)
    assert Color(red=1, green=0, blue=0.0) == (1.0, 0.0, 0.0)
    assert Color(red=255, green=255, blue=255) == (1.0, 1.0, 1.0)
Example #15
0
    sleep(0.1)
camera.stop_preview()

camera.start_preview()
for i in range(100):
    camera.annotate_text = "Contrast: %s" % i
    camera.contrast = i
    sleep(0.1)
camera.stop_preview()

camera.annotate_text_size = 86

from picamera import PiCamera, Color

camera.start_preview()
camera.annotate_background = Color('red')
camera.annotate_foreground = Color('green')
camera.annotate_text = "Dis Guy.... playing Fortnite"
sleep(5)
camera.stop_preview()

camera.start_preview()
camera.image_effect = 'colorswap'
sleep(5)
camera.capture('/home/pi/Desktop/colorswap.jpg')
camera.stop_preview()

camera.start_preview()
for effect in camera.IMAGE_EFFECTS:
    camera.image_effect = effect
    camera.annotate_text = "Effect: %s" % effect
from picamera import PiCamera, Color
from time import sleep

camera = PiCamera()

camera.resolution = (
    2592, 1944
)  # Resolucion maxima de las fotografias 2592 x 1944, la resolucion minima que se permite es 64 x 64.
camera.framerate = 15  # Para lograr la resolucion maxima se debe configurar los cuadros por segundo en "15".
camera.rotation = 0  # Rotar la imagen en 0, 90, 180 o 270 grados
camera.start_preview(
)  # Dentro del parentesis se puede escribir (alpha=200) lo que permite generar transparencia en la preview. Pero no en el video.
camera.exposure_mode = 'auto'  # Exposicion de la fotografia. Por defecto es "auto". Opciones (off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach, verylong, fixedfps, antishake, fireworks)
camera.awb_mode = 'auto'  # Balance de blacos. Por defecto es "auto". Opciones (off, auto, sunlight, cloudy, shade, tungsten, fluorescent, incandescent, flash, horizon)
camera.image_effect = 'none'  # Opciones de efectos en la fotografia. Por defecto es "none". (none, negative, solarize, sketch, denoise, emboss, oilpaint, hatch, gpen, pastel, watercolor, film, blur, saturation, colorswap, washedout, posterise, colorpoint, colorbalance, cartoon, deinterlace1, deinterlace2)
camera.annotate_background = Color('white')  # Color del background del texto
camera.annotate_foreground = Color('black')  # Color de las letras del texto
camera.annotate_text = "Escuela Las Cruces"  # Texto que llevara la fotografia
camera.annotate_text_size = 100  # Va de 6 a 160. Por defecto es 32.
camera.brightness = 50  #  Brillo de la fotografia (Por defecto es 50%). Va de 0 a  100.
camera.contrast = 50  #  Constraste de la fotografia (Por defecto es 50%). Va de 0 a  100.
for i in range(
        1
):  # El numero dentro del parentesis indica el numero de fotografias que se quieren tomar al correr el script
    sleep(5)
    camera.capture(
        '/home/pi/Desktop/image%s.jpg' % i
    )  #Direccion donde se guardaran las fotografias. Se nombraran como "imagen" y el numero que corresponde a el orden de toma.
camera.stop_preview()

# LOOPS DE AYUDA
Example #17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    cameraW = 640
    cameraH = 480

    def clampW(x):
        return clamp(0, x, cameraW - 1)

    def clampH(x):
        return clamp(0, x, cameraH - 1)

    ov = np.zeros((cameraH, cameraW, 3), dtype=np.uint8)
    ov[:, :, :] = 0

    frameTime = time.time() * 1000
    overlayInterval = 100

    with picamera.PiCamera(resolution=(cameraW, cameraH),
                           framerate=30) as camera:
        camera.start_preview(alpha=255)
        camera.annotate_foreground = Color('black')
        camera.annotate_background = Color('white')
        overlay = camera.add_overlay(ov.tobytes(), layer=3, alpha=64)
        try:
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (width, height), Image.ANTIALIAS)
                start_time = time.time()
                results = classify_image(interpreter, image)
                elapsed_ms = (time.time() - start_time) * 1000
                stream.seek(0)
                stream.truncate()
                bestIdx = np.argmax(results[2])
                label = labels[results[1][bestIdx] + 1]
                prob = results[2][bestIdx]
                top = int(np.round(results[0][bestIdx][0] * cameraH))
                left = int(np.round(results[0][bestIdx][1] * cameraW))
                bottom = int(np.round(results[0][bestIdx][2] * cameraH))
                right = int(np.round(results[0][bestIdx][3] * cameraW))
                ov[:, :, :] = 0
                if top >= 0 and top < cameraH:
                    ov[top, clampW(left):clampW(right), :] = 0xff
                if bottom >= 0 and bottom < cameraH:
                    ov[bottom, clampW(left):clampW(right), :] = 0xff
                if left >= 0 and left < cameraW:
                    ov[clampH(top):clampH(bottom), left, :] = 0xff
                if right >= 0 and right < cameraW:
                    ov[clampH(top):clampH(bottom), right, :] = 0xff
                if time.time() * 1000 - frameTime > overlayInterval:
                    overlay.update(ov.tobytes())
                    frameTime = time.time() * 1000
                #pdb.set_trace()
                camera.annotate_text = '%s (%.1f%%)\n%.1fms' % (
                    label, prob * 100, elapsed_ms)
        finally:
            camera.remove_overlay(overlay)
            camera.stop_preview()
Example #18
0
def annotate_picture(camera, camera_config):
    camera.annotate_text_size = 14
    camera.annotate_foreground = Color('black')
    camera.annotate_background = Color('white')
    camera.annotate_text = json.dumps({k: v for k, v in camera_config.items() if 'preview' not in k}, indent=4)
Example #19
0
def main():
    logger.info('Pi-Cam daemon starting...')
    timeout = 1
    local_ip = '127.0.0.1'
    local_port = 1301
    buffer_size = 1024

    daemon_killer = GracefulKillDaemon()
    udp_server = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
    udp_server.bind((local_ip, local_port))
    udp_server.setblocking(0)
    logger.info('UDP server up and listening on {}:{}'.format(local_ip, local_port))

    supported_commands = ['start_preview\([\s]*\)', \
        'stop_preview\([\s]*\)', \
        'resolution[\s]*=[\s]*\(\d{,4}[\s]*\,[\s]*\d{,4}\)', \
        'annotate_text_size[\s]*=[\s]*\d{,3}', \
        'annotate_text[\s]*=[\s]*\"[\w\s\%\.\-:;!@#$%&*()+=<>/]*\"', \
        'brightness[\s]*=[\s]*\d{,3}', \
        'rotation[\s]*=[\s]*\d{,3}', \
        'framerate[\s]*=[\s]*\d{,3}', \
        'contrast[\s]*=[\s]*\d{,3}', \
        'iso[\s]*=[\s]*\d{,3}', \
        'capture\([\s]*\"[\w\%/\-\.]*\"[\s]*\)', \
        'start_recording\([\s]*\"[\w\%/\-\.]*\"[\s]*\)', \
        'stop_recording\([\s]*\)', \
        'annotate_background[\s]*=[\s]*Color\([\s]*\"[a-z]*\"[\s]*\)', \
        'annotate_background[\s]*=[\s]*None', \
        'annotate_foreground[\s]*=[\s]*Color\([\s]*\"[a-z]*\"[\s]*\)', \
        'image_effect[\s]*=[\s]*\"[\w]*\"[\s]*', \
        # image_effect: none, negative, solarize, sketch, denoise, emboss, oilpaint, hatch, gpen, pastel, watercolor, film
        # blur, saturation, colorswap, washedout, posterise, colorpoint, colorbalance, cartoon, deinterlace1, deinterlace2
        'exposure_mode[\s]*=[\s]*\"[a-z]*\"[\s]*', \
        # exposure_mode: off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach, verylong, fixedfps, antishake, fireworks
        'awb_mode[\s]*=[\s]*\"[a-z]*\"[\s]*', \
        # awb_mode: off, auto, sunlight, cloudy, shade, tungsten, fluorescent, incandescent, flash, horizon
        ]   
    #text_to_annotate = ' %H:%M:%S on %Y %B %d '
    text_to_annotate = ''
    recording = False

    with PiCamera() as camera:
        camera.rotation = 180
        camera.framerate = 20
        camera.resolution = (2592, 1440)
        camera.start_preview()
        camera.annotate_text_size = 40
        camera.annotate_background = Color('black')
        camera.annotate_foreground = Color('white')

        while not(daemon_killer.kill_now):
            camera.annotate_text = datetime.datetime.now().strftime(text_to_annotate)
            ready = select.select([udp_server], [], [], timeout)
            if ready[0]:
                server_response = 'OK'
                received = udp_server.recvfrom(buffer_size)
                message = received[0].decode().strip()
                address = received[1]
                logger.info('Message from Client {}: "{}"'.format(address, message))
                match = False
                for expression in supported_commands:               
                    if re.match(expression, message):
                        match = True
                        if (message.startswith('annotate_text ')) or (message.startswith('annotate_text=')):
                            message = message[message.find('=') + 1:].strip()
                            text_to_annotate = message.replace('"', '')
                            break
                        if message.startswith('stop_recording'):
                            if not(recording):
                                server_response = 'There is no recording in progress'
                                break
                        elif message.startswith('capture'):
                            message = message[message.find('(') + 1:].strip()
                            message = message.strip(')').strip()
                            message = ''.join(['capture(datetime.datetime.now().strftime(', message, '))'])
                        elif message.startswith('start_recording'):
                            message = message[message.find("(") + 1:].strip()
                            message = message.strip(')').strip()
                            message = message.strip('"').strip()
                            if (message.find('.h264') != (len(message) - 5)) and (message.find('.mjpeg') != (len(message) - 6)):
                                message = ''.join([message, '.h264'])
                            message = ''.join(['start_recording(datetime.datetime.now().strftime("', message, '"))'])
                        try:
                            exec(''.join(['camera.', message]))
                        except (ValueError, TypeError) as e:
                            server_response = '{}'.format(e)
                            logger.warning(server_response)
                        else:
                            if message.startswith('start_recording'):
                                recording = True
                        break
                if not(match):
                    server_response = 'Unsupported command'
                bytes_to_send = str.encode(server_response)    
                udp_server.sendto(bytes_to_send, address)
        camera.stop_preview()
    logger.info('Bye!')
Example #20
0
def main():
    ##################################################
    # Initialise LCD
    ## LCD = LCD_1in8.LCD()
    LCD = ST7735()
    ## Lcd_ScanDir = LCD_1in8.SCAN_DIR_DFT
    ## LCD.LCD_Init(Lcd_ScanDir)
    LCD.begin()
    ## screenbuf = Image.new("RGB", (LCD.LCD_Dis_Column, LCD.LCD_Dis_Page), "WHITE")
    screenbuf = Image.new("RGB", (DISPLAY_WIDTH, DISPLAY_HEIGHT), "WHITE")
    draw = ImageDraw.Draw(screenbuf)
    draw.text((33, 22), 'LCD Demo', fill="BLUE", font=FONT_SMALL)
    ## LCD.LCD_PageImage(screenbuf)
    LCD.display(screenbuf)

    ##################################################

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    cameraW = 640
    cameraH = 480

    frameTime = time.time() * 1000

    with picamera.PiCamera(resolution=(cameraW, cameraH),
                           framerate=30) as camera:
        camera.start_preview(alpha=255)
        camera.annotate_foreground = Color('black')
        camera.annotate_background = Color('white')
        try:
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (width, height), Image.ANTIALIAS)
                start_time = time.time()
                results = classify_image(interpreter, image)
                elapsed_ms = (time.time() - start_time) * 1000
                stream.seek(0)
                stream.truncate()

                msg = ""
                for i in range(10):
                    label = labels[results[1][i] + 1]
                    prob = clamp(0, results[2][i], 1)
                    top = clamp(0, results[0][i][0], 1)
                    left = clamp(0, results[0][i][1], 1)
                    bottom = clamp(0, results[0][i][2], 1)
                    right = clamp(0, results[0][i][3], 1)
                    msg += (
                        "{0:20} {1:3.1f}% {2:3.3f} {3:3.3f} {4:3.3f} {5:3.3f} {6: 5.1f}ms\n"
                        .format(label, prob * 100, top, left, bottom, right,
                                elapsed_ms))
                draw.rectangle([(0, 0), (160, 128)], fill="WHITE")
                ## LCD.LCD_PageImage(screenbuf)
                screenbuf.paste(image.resize((DISPLAY_WIDTH, DISPLAY_HEIGHT)))
                # draw.rectangle([(0,0),(160,128)], outline = "RED")
                draw.text((0, 0), msg, fill="BLUE", font=FONT_SMALL)
                LCD.display(screenbuf)
                msg += ("--------------------------------------------------\n")
                print(msg)

                #pdb.set_trace()

                bestIdx = np.argmax(results[2])
                label = labels[results[1][bestIdx] + 1]
                prob = clamp(0, results[2][bestIdx], 1)
                top = clamp(0, results[0][bestIdx][0], 1)
                left = clamp(0, results[0][bestIdx][1], 1)
                bottom = clamp(0, results[0][bestIdx][2], 1)
                right = clamp(0, results[0][bestIdx][3], 1)
                camera.annotate_text = '%s (%.1f%%)\n%.1fms' % (
                    label, prob * 100, elapsed_ms)
        finally:
            camera.stop_preview()
            LCD.LCD_Clear()
Example #21
0
from picamera import PiCamera, Color
from time import sleep

# Modified from ch5listing5.py of Page 33 of TheMagPi 'Official Camera Guide' PDF

# NOTE: To make picamera preview show when viewing over VNC...
# Go to the VNC icon on the taskbar, click the Menu, Options > Troubleshooting
# and tick 'Enable Direct Capture Mode', and apply the changes

camera = PiCamera()
sleep(2)  # allow the camera to start up fully

camera.start_preview(fullscreen=False, window=(50, 150, 1024, 576))
for effect in camera.IMAGE_EFFECTS:
    for awbmode in camera.AWB_MODES:
        camera.awb_mode = awbmode
        camera.image_effect = effect
        camera.annotate_background = Color('black')
        camera.annotate_text = "Effect: {0}, AWB Mode: {1}".format(
            effect, awbmode)
        camera.capture('/home/pi/Camera/img_effect_{0}_awbmode_{1}.jpg'.format(
            effect, awbmode))
        sleep(2)

camera.stop_preview()
camera.close(
)  # to turn off the camera (and its status LED) when the script has completed
Example #22
0

def encoder1():
    return 8 * GPIO.input(26) + 4 * GPIO.input(19) + 2 * GPIO.input(
        13) + GPIO.input(6)


# create a instance of Picamera
camera = PiCamera()
# rotate camera 180 degree
camera.rotation = 180
##camera.framerate=15
# preview window size
camera.resolution = (500, 1100)
camera.annotate_text_size = 160
camera.annotate_background = Color('green')
camera.annotate_foreground = Color('yellow')

camera.start_preview(alpha=255, fullscreen=False, window=(0, 0, 500, 1100))
sleep(3)
# different for loops are using different filters or change different settings
for i in range(5):  # from selfie0 to selfie4
    sleep(3)
    camera.capture('/home/pi/pro/selfie%s.jpg' % i)  #1600x1200 1.2MB

# take a video
camera.start_recording('/home/pi/pro/firstvideo.h264')  # video
sleep(5)
camera.stop_recording()

##for i in range (100): %change brightness and annotate
Example #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height',
                        type=int,
                        required=True,
                        help='Input height.')
    parser.add_argument('--input_width',
                        type=int,
                        required=True,
                        help='Input width.')
    parser.add_argument('--input_layer',
                        required=True,
                        help='Name of input layer.')
    parser.add_argument('--output_layer',
                        required=True,
                        help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.1,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='Keep at most top_k labels.')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument('--show_fps',
                        action='store_true',
                        default=False,
                        help='Shows end to end FPS.')
    args = parser.parse_args()

    model = inference.ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        if args.preview:
            camera.start_preview()

        with inference.CameraInference(model) as camera_inference:
            for result in camera_inference.run(args.num_frames):
                processed_result = process(result, labels, args.output_layer,
                                           args.threshold, args.top_k)
                send_signal_to_servos(processed_result[0])
                message = get_message(processed_result, args.threshold,
                                      args.top_k)
                if args.show_fps:
                    message += '\nWith %.1f FPS.' % camera_inference.rate
                print(message)

                if args.preview:
                    camera.annotate_foreground = Color('black')
                    camera.annotate_background = Color('white')
                    # PiCamera text annotation only supports ascii.
                    camera.annotate_text = '\n %s' % message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

        if args.preview:
            camera.stop_preview()
Example #24
0
def addTextNight(texto):
    global camera
    # camera.annotate_background = Color('white')
    camera.annotate_foreground = Color('white')
    camera.annotate_text_size = 50
    camera.annotate_text = texto
Example #25
0
from picamera import PiCamera, Color
from time import sleep

with PiCamera() as camera:
    camera.start_preview()
    camera.annotate_text_size = 50
    camera.annotate_background = Color('green')
    camera.annotate_foreground = Color('white')
    camera.annotate_text = " Hello World "
    sleep(5)
    camera.capture("picamera_02.jpg")
    camera.stop_preview()
Example #26
0
from picamera import Color

led = RGBLED(12, 20, 16)  # or whatever pins you’re using
led.color = Color('#ffffff')  # supports HTML color specs
led.color = Color('violet')  # or CSS color names
led.color = Color(255, 127, 0)  # or 0-255 bytes
led.color = Color(0.0, 0.5, 0.0)  # or 0-1 floats
led.color = Color(hue=0, saturation=0.5, lightness=0.5)
Example #27
0
 def set_annotations(self):
     self.camera.annotate_background = Color('blue')
     self.camera.annotate_foreground = Color('yellow')
     self.camera.annotate_text = "Bongji Cam"
     self.camera.annotate_text_size = 50
# Quay servo de bao san sang
Servos.rotate(0)

# Phat am thanh de bao san sang
soundfile = sounds_dirr + 'ready.mp3'
playSound(soundfile)

print("Press Button!!")

while True:
    # Neu button dang nhan
    if Button.isPressed():
        print("Button is pressed")
        rawCapture = PiRGBArray(camera, size=(720, 480))
        camera.start_preview()
        camera.annotate_background = Color("blue")
        camera.start_recording(video_output_file)
        i = 0
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            print(i)
            # 15 frame lay 1 frame xu ly
            if (i % 10 == 0):
                test_image = rawCapture.array
                # Xu ly anh
                start_time1 = time.time()
                test_image2 = test_image

                test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
                test_image = cv2.resize(test_image, (128, 128))
Example #29
0
from picamera import PiCamera, Color
from time import sleep

camera = PiCamera()

camera.start_preview()
camera.annotate_text_size = 50
camera.annotate_background = Color('blue')
camera.annotate_foreground = Color('yellow')
camera.annotate_text = " Hello world "
sleep(5)
camera.capture('/home/pi/Desktop/text.jpg')
camera.stop_preview()
def register():
    folder_dir = "/home/pi/Face_Recognition_Door_Lock/dataset/Known/"
    # 해당하는 폴더가 없을 경우 생성해줌
    if os.path.isdir(folder_dir) == False:
        os.mkdir(folder_dir)

    people_name = input("유저 이름: ")
    if people_name.find('Unknown') > -1 or people_name.find('unknown') > -1:
        print("[Info]: Can not using name 'unknown'")
        print("[Info] return the menu")
        pass

    elif people_name == "" or people_name.find(' ') > -1:
        print("[Info]: The name can't contain 'blank'")
        print("[Info]: return the menu")
        pass

    else:
        save_path = folder_dir + people_name + "/"
        print("저장할 폴더의 경로는 " + save_path + " 입니다.")

        # 해당하는 폴더가 없을 경우 생성해줌
        if os.path.isdir(save_path) == False:
            os.mkdir(save_path)

        with PiCamera() as camera:
            camera.rotation = 90
            camera.annotate_text_size = 100
            camera.annotate_background = Color('red')
            camera.annotate_foreground = Color('yellow')

            camera.start_preview()
            camera.brightness = 50
            camera.exposure_mode = 'auto'

            for i in range(5, 0, -1):
                camera.annotate_text = str(i)
                time.sleep(1)
            camera.annotate_text = ""

            for i in range(10):
                time.sleep(0.5)
                camera.capture(save_path + people_name + '%s.jpg' % i)

            camera.annotate_text = "Finish!"
            time.sleep(2)
            camera.stop_preview()

            encodings_path = "/home/pi/Face_Recognition_Door_Lock/encoding/"
            if os.path.isdir(encodings_path) == False:
                os.mkdir(encodings_path)

            #datasets에 들어있는 이미지 list
            imagePaths = list(paths.list_images(save_path))

            #얼굴 embedding vector 데이터를 저장할 knownEncodings 리스트와, 학습한 얼굴 이름을 저장할 knownNames 리스트
            knownEncodings = []
            knownNames = []

            # dataset의 경로 상에 있는 이미지를 모두 가져옴
            for (i, imagePath) in enumerate(imagePaths):
                print("[INFO] processing image {}/{}".format(
                    i + 1, len(imagePaths)))
                name = imagePath.split(os.path.sep)[-2]

                image = cv2.imread(imagePath)
                rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                # 얼굴의 박스 영역을 찾아줌
                boxes = face_recognition.face_locations(rgb, model="hog")

                if not boxes:
                    print("[ERROR] Face does not detected")
                    pass

                # 박스 영역의 얼굴 임베딩 데이터를 추출
                encodings = face_recognition.face_encodings(rgb, boxes)

                # loop over the encodings
                for encoding in encodings:
                    knownEncodings.append(encoding)
                    knownNames.append(name)

            if not knownEncodings:
                print("[Error] Can't detect faces in all images")

            else:
                # 얼굴 임베딩 데이터와 이름을 pickle 파일로 저장
                print("[INFO] serializing encodings...")
                data = {"encodings": knownEncodings, "names": knownNames}

                with open(encodings_path + people_name + ".pkl", "wb") as f:
                    f.write(pickle.dumps(data))
                f.close()

            print("[Info] Delete images used for encoding")
            rmtree(save_path)