Example #1
0
#!/usr/bin/python3

# Mark Bradley
# 2019-12-08
# Simple buzzer script
# pin is the i/o pin the buzzer is connected to.
# duration is thetime to sound the buzzer forin seconds.

from gpiozero import Buzzer
from time import sleep

pin = 12  # GPIO pin to connect buzzer to.
duration = 5  # Time to sound buzzer in seconds

buzzer = Buzzer(pin)

buzzer.on()
sleep(duration)
buzzer.off()

print("All done!")
Example #2
0
import boto3
from flask import Flask, render_template, request
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from time import sleep
import json
from gpiozero import Button, Buzzer
import MySQLdb
import Adafruit_DHT
import threading
import datetime

dynamodb = boto3.resource('dynamodb')
drinkpreparationtime = dynamodb.Table('drinkpreparationtime')

button = Button(13, pull_up=False)
bz = Buzzer(26)
timestamp = ""
firealarm = False
currentorderid = ""

try:
    db = MySQLdb.connect("localhost", "ca2user", "password", "orders")
    curs = db.cursor()
    print("Successfully connected to database!")
except:
    print("Error connecting to mySQL database")


# Custom MQTT message callback
def customCallback(client, userdata, message):
    bz.on()
import argparse
import datetime
import time
import tkinter as tk
from typing import Union

import cv2
import imutils
import numpy as np
from imutils.video import VideoStream, FPS
from gpiozero import LED, Buzzer

blueLED = LED(17)
redLED = LED(27)
greenLED = LED(22)
buzzer = Buzzer(10)

# Construct the argument parse and parse the arguments
ap = argparse.ArgumentParser(
    description="Detect objects in a real time video stream")
ap.add_argument("-p",
                "--prototxt",
                required=True,
                help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m",
                "--model",
                required=True,
                help="path to Caffe pre-trained model")
ap.add_argument(
    "-c",
    "--confidence",
Example #4
0
 def __init__(self, buzzPin, quietMode=False):
     self.pin = buzzPin
     self.buzz = Buzzer(buzzPin)
     self.quietMode = quietMode
Example #5
0
import os
import time
from slackclient import SlackClient
from gpiozero import Buzzer

## Define our config values

# What is our min dish count to alarm on?
min_dishes = 2

# Define areas we want to ignore
# First value is the x range, second is the y range
ignore_list = ["339-345,257-260"]

# Set the GPIO our buzzer is connected to
buzzer = Buzzer(21)

# Set how long we want to buzz
buzz_seconds = 180

# Set our timestamp
time_stamp = time.strftime("%Y%m%d%H%M%S")

# Set our circle detection variables
circle_sensitivity = 40  # Larger numbers increase false positives
min_rad = 30  # Tweak this if you're detecting circles that are too small
max_rad = 75  # Tweak if you're detecting circles that are too big (Ie: round sinks)

# Cropping the image allows us to only process areas of the image
# that should have images. Set our crop values
crop_left = 0
Example #6
0
#!/usr/bin/env python
"""
Controls the SunFounder active buzzer module.

This program was written on a Raspberry Pi using the Geany IDE.
"""
from time import sleep
from gpiozero import Buzzer

buzzer = Buzzer(pin=17, active_high=True, initial_value=False)


def activate_buzzer():
    """
    Activates the buzzer
    """
    sleep_speed = 0.5

    buzzer.on()
    sleep(sleep_speed)
    buzzer.off()
    sleep(sleep_speed)


def stop():
    """
    Releases resources and exits.
    """
    print("\nStopping program.")
    buzzer.off()
    buzzer.close()
Example #7
0
 def buzz_init(self, buzzPin):
     self.buzz = Buzzer(buzzPin)
Example #8
0
import threading
import RPi.GPIO as GPIO
import PLC
from time import sleep
from gpiozero import LED, Button
from signal import pause
from gpiozero import LightSensor, Buzzer

ldr = LightSensor(pin=4, queue_len=2, charge_time_limit=22, threshold=0.955)
led = LED(17)
buzzer = Buzzer(22)
button = Button(2)

p_exit = 0;
def led_exit():
	global p_exit
	p_exit = 1
button.when_pressed = led_exit

lock = threading.Lock()


GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN)
GPIO.setup(27, GPIO.IN)




infrared_on = False
def infrared_detect():
Example #9
0
from gpiozero import Button, LED, Buzzer
from time import sleep

buzzer = Buzzer(15)
led_red = LED(25)
led_yellow = LED(8)
led_green = LED(7)
button = Button(21)

while True:
    button.wait_for_press()
    led_green.on()
    sleep(1)
    led_yellow.on()
    sleep(1)
    led_red.on()
    sleep(1)
    led_green.off()
    led_yellow.off()
    led_red.off()
from gpiozero import Buzzer, DistanceSensor
from time import sleep

buzzer = Buzzer(12)
sensor = DistanceSensor(echo=24, trigger=23)

while True:
    dist = sensor.distance * 100
    # print(dist)
    if dist >= 20:
        continue
    elif dist >= 15:
        buzzer.beep(on_time=0.3, off_time=0.3, n=1, background=False)
    elif dist >= 10:
        buzzer.beep(on_time=0.3, off_time=0.3, n=2, background=False)
    elif dist < 10:
        buzzer.on()
    else:
        buzzer.off()
    sleep(0.5)
from picamera import PiCamera
from gpiozero import DistanceSensor, LED, Buzzer
from time import sleep
from datetime import datetime

camera = PiCamera()
camera.rotation = 180

azul = LED(25)
som = Buzzer(24)

sensor = DistanceSensor(echo=18, trigger=23, max_distance=0.5)


def fotografar():
    azul.on()
    som.on()

    hora = datetime.now()
    camera.capture(str(hora) + ".jpg")
    print("Foto capturada: " + str(hora))

    sleep(0.5)
    azul.off()
    som.off()


sleep(1)

while True:
    sensor.wait_for_in_range()
from gpiozero import Buzzer
import time

buz = Buzzer(21)

buz.on()
time.sleep(3)
buz.off()
Example #13
0
# Pi-Stop Traffic lights
l1_red = LED(21)
l1_amber = LED(20)
l1_green = LED(16)
l2_red = LED(26)
l2_amber = LED(19)
l2_green = LED(13)
# Buttons
b1 = Button(14)
b2 = Button(15)
# Walk/don't walk indicator
m1 = RGBLED(11, 9, 10)
m2 = RGBLED(7, 8, 25)
# buzzer
bz = Buzzer(17)

time_since_last_red = 0


def stop_go_seq():  # Traffic lights change from red to green
    print("Going sequence")
    global time_since_last_red
    l1_amber.off()
    l1_green.off()
    l1_red.on()
    l2_amber.off()
    l2_green.off()
    l2_red.on()
    sleep(1)
    l1_amber.on()
Example #14
0
system('clear')
print('Just a sec...')
import tweepy
from email.message import EmailMessage
import imghdr
import smtplib as email
from picamera import PiCamera
from gpiozero import Button, Buzzer
from time import sleep
import json
print('Done :)')
sleep(0.75)
system('clear')

button = Button(4)
buzz = Buzzer(3)
buzz.off()
cam = PiCamera()
TWITTER = 1
EMAIL = 2
BOTH = 3
CONFIG = json.loads(open('config.json').read())

def main():
    system('clear')
    
    onTwitter = ' '
    handle = ''
    burst = ' '
    postOn = ''
    while postOn not in ['1', '2', '3']:
Example #15
0
from gpiozero import Button, Buzzer
from time import sleep

#switch variable pin no 2
switch = Button(2)

#buzzer variable pin no 16
buzzer = Buzzer(16)

#buzzer function that plays buzzer in pattren
def buzzerBeep():
    buzzer.beep(0.05,0.05,5, False)
    sleep(0.3)
    buzzer.beep(0.3,0.3,2, False)
    sleep(0.3)
    buzzer.beep(0.05,0.05,5, False)

#call "buzzerBeep" function when switch release
#switch.when_released = buzzerBeep
while True:
    if switch.is_pressed:
        buzzerBeep()
class IntelligentDriverSystem():

    button_start = Button(2)
    button_reset = Button(3)
    buzzer = Buzzer(4)
    button_flag = 0
    collectError =0

    # args={'encodings':'encodings_test_v5.pickle','detection_method':'hog'}
    args={'encodings':'data/encoding_test.pickle','detection_method':'hog'}


    def __init__(self):
        from pygame import mixer
        mixer.init()
        mixer.music.load('begin.mp3')
        mixer.music.play()
        addIdentity=AddIdentity()
        lcd = Adafruit_CharLCD()
        lcd.clear()  
        Time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
        lcd.message("Welcome! time is\n"+Time)

        self.buzzer.blink(0.1)
        time.sleep(3)    
        self.buzzer.off()

        # print("Enter the password")
        # button_begin_result = self.start_up()

        while True:
            print("Enter the password")
            button_begin_result = self.start_up()
            if button_begin_result==2:
                print("Enter the main program.")
                self.collectError = 0
                break
            elif button_begin_result==1:
                print("Enter the add program.")
                self.collectError = 0
                addIdentity.Add(lcd)
                break
            else:
                self.collectError += 1
                print("Error {:d} times.Please enter the password again.".format(self.collectError))
                errormessage="Error {:d} times.\nPlease try again.".format(self.collectError)

                lcd.clear()
                lcd.message(errormessage)

                if self.collectError >= 3:
                    self.buzzer.blink(0.1)
                    time.sleep((self.collectError-2)*5.0)
                    self.buzzer.off()


        while True:
            print("print the white button to start the program.")
            if self.button_start.is_pressed:
            # button_begin_result = self.button_start()
            # print('button_begin_result',button_begin_result)
            # print('button_begin_result.type',type(button_begin_result))
            # if (button_begin_result==1) or (button_begin_result == 2):
                mixer.music.stop()
                self.buzzer.blink(0.5)
                time.sleep(1)
                self.buzzer.off()
                

                print("***************BEGIN****************")
                print("[INFO] STEP ONE - face recognition")

                lcd.clear()
                lcd.message("face recognition")

                # driverface_recognition(args={'encodings':'encodings_test_v3.pickle','output':'output/webcam_face_recognition_output.avi','display':1,'detection_method':'hog'})
                # driverface_recognition(args={'encodings':'encodings_test_v4.pickle','output': None,'display':1,'detection_method':'hog'})
                self.driverface_recognition(Adafruit_CharLCD = lcd)
                
                if self.button_flag:
                    
                    continue

                print("[INFO] waiting...  ")
                lcd.message("waiting...")

                time.sleep(1.0)  #延迟

                lcd.clear()
                lcd.message("fatigue detection")

                print("[INFO] STEP TWO - fatigue detection")

                self.fatigue_detection(Adafruit_CharLCD = lcd)

                lcd.clear()

                print("***************END****************")

            #elif self.button_start() == 1:
            # elif button_begin_result== 1:
             
            #     print("enter addidentity")
            #     #addIdentity=AddIdentity()
            #     addIdentity.Add()
            else:
                
                print("[INFO] waiting...  ")
                lcd.clear()
                lcd.message("waiting...")
                time.sleep(1)
                if self.buzzer.is_active:
                   self.buzzer.off()

    def start_up(self):
        print('=========================')
        print('into button start')
        keypad1=KeyPad()
        namestrs1=keypad1.getStr()
        print("panding")
        if namestrs1=='88888888':
            print("enter 88888888")
            return 2
        elif namestrs1=='666':
            print("enter 666")
            return 1
        else:
            print("failed")
            return 0

    # def button_addIdentity(self):
    #     keypad2=KeyPad()
    #     namestrs2=keypad2.getStr()
    #     if namestrs2=='666':
    #         return True
    #     else:
    #         return False

    # def button_reset(self):
    #     keypad2=KeyPad()
    #     namestrs2=keypad2.getStr()
    #     if namestrs2=='D':
    #         return True
    #     else:
    #         return False

    # #计算嘴的长宽比,euclidean(u, v, w=None)用于计算两点的欧几里得距离
    # def mouthRatio(self,mouth):
    #     left=dis.euclidean(mouth[2],mouth[10])
    #     mid=dis.euclidean(mouth[3],mouth[9])
    #     right=dis.euclidean(mouth[4],mouth[8])
    #     horizontal=dis.euclidean(mouth[0],mouth[6])

    #     return 10.0*horizontal/(3.0*left+4.0*mid+3.0*right)

    #计算眼睛的长宽比
    def eyesRatio(self,eye):
        left = dis.euclidean(eye[1], eye[5])
        right = dis.euclidean(eye[2], eye[4])
        horizontal = dis.euclidean(eye[0], eye[3])
        
        return 2.0*horizontal/(left+right)

    #人脸识别
    def driverface_recognition(self,Adafruit_CharLCD):   
        #引入全局变量,检测按键状态
        # global button_flag
        #人脸识别过程中,驾驶者出现在摄像头前的(时间?)
        Judge = 0     
        #人脸识别过程中,设置的驾驶者出现在摄像头前的(时间?)的阈值
        #若时间达到阈值,则弹出循环进行下一步操作,否则继续执行循环
        flag  = 3     


        # load the known faces and embeddings
        print("[INFO] loading encodings...")
        data = pickle.loads(open(self.args["encodings"], "rb").read())
        #print(data)

        # initialize the video stream and pointer to output video file, then
        # allow the camera sensor to warm up
        print("[INFO] starting video stream...")
        cap = cv2.VideoCapture(0)

        #调节摄像头帧率,目前来看帧率越低越好控制
        cap.set(cv2.CAP_PROP_FPS,2)

        print("[INFO] initializing...")
        # writer = None
        time.sleep(2.0)

        # loop over frames from the video file stream
        while True:
            # grab the frame from the threaded video stream
            if self.button_reset.is_pressed:
            # if self.button_reset():

                Adafruit_CharLCD.clear()
                Adafruit_CharLCD.message("over!")
                time.sleep(1)
                self.button_flag = 1
                mixer.init()
                mixer.music.load('xiaoge.mp3')
                mixer.music.play()
                break
            else:
                self.button_flag = 0

            ret,frame = cap.read()
                
            # convert the input frame from BGR to RGB then resize it to have
            # a width of 750px (to speedup processing)

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            # rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            rgb = imutils.resize(frame, width=320)
            r = frame.shape[1] / float(rgb.shape[1])
            #print("r: " + str(r))
            
            # detect the (x, y)-coordinates of the bounding boxes
            # corresponding to each face in the input frame, then compute
            # the facial embeddings for each face
            boxes = face_recognition.face_locations(rgb, model= self.args["detection_method"])
            encodings = face_recognition.face_encodings(rgb, boxes)
            names = []

            # loop over the facial embeddings
            for encoding in encodings:
                # attempt to match each face in the input image to our known
                # encodings
                matches = face_recognition.compare_faces(data["encodings"],
                          encoding , tolerance=0.5)
                name = "Unknown"

                # check to see if we have found a match
                if True in matches:
                    # find the indexes of all matched faces then initialize a
                    # dictionary to count the total number of times each face
                    # was matched
                    matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    # loop over the matched indexes and maintain a count for
                    # each recognized face face
                    for i in matchedIdxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1

                    # determine the recognized face with the largest number
                    # of votes (note: in the event of an unlikely tie Python
                    # will select first entry in the dictionary)
                    name = max(counts, key=counts.get)
                
                # update the list of names
                names.append(name)

            # loop over the recognized faces
            # for ((top, right, bottom, left), name) in zip(boxes, names):
            #     # rescale the face coordinates
            #     top = int(top * r)
            #     right = int(right * r)
            #     bottom = int(bottom * r)
            #     left = int(left * r)

                # draw the predicted face name on the image
                # cv2.rectangle(frame, (left, top), (right, bottom),
                #     (0, 255, 0), 2)
                # y = top - 15 if top - 15 > 15 else top + 15
                # cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                #     0.75, (0, 255, 0), 2)

            # if the video writer is None *AND* we are supposed to write
            # the output video to disk initialize the writer
            # if writer is None and args["output"] is not None:
            #     fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            #     writer = cv2.VideoWriter(args["output"], fourcc, 20,
            #         (frame.shape[1], frame.shape[0]), True)

            # if the writer is not None, write the frame with recognized
            # faces t odisk
            # if writer is not None:
            #     writer.write(frame)
            
            print("检测到",names)
            if(len(names) > 0):
                namestrs=""
                for namestr in names:
                    namestrs+=namestr+','            
                Adafruit_CharLCD.clear()
                # Adafruit_CharLCD.message(str(names[0])+str(Judge))
                Adafruit_CharLCD.message(namestrs)
                
            else:
                Adafruit_CharLCD.clear()
                Adafruit_CharLCD.message('NO RECOGNITION')
            #lcd.clear()
            #lcd.message(str(names[0]))
            #判定:如果检测到脸且持续存在,则Judge持续计数,否则,Judge自动归零
            if ("Unknown" not in names) & (len(names)>0):
                Judge = Judge+1
            else:
                Judge = 0;

            # check to see if we are supposed to display the output frame to
            # the screen
            # if args["display"] > 0:
            #cv2.imshow("Frame", frame)

            #key = cv2.waitKey(1) & 0xFF
            #if key == ord("Q"):  exit()


            #识别判定,识别熟人弹出循环;识别生人,死循环
            if (Judge > flag) & ("Unknown" not in names) & (len(names)>0):
                self.buzzer.blink(0.1)
                Adafruit_CharLCD.clear()
                Adafruit_CharLCD.message("hello!" + name)
                time.sleep(2)
                self.buzzer.off()
                break
            
            # Judge=Judge+1

            #停止


        # do a bit of cleanup
        #cv2.destroyAllWindows()

        # vs.stop()

        # check to see if the video writer point needs to be released
        # if writer is not None:
        #     writer.release()

    #倦意检测
    def fatigue_detection(self,Adafruit_CharLCD):
        #眼睛长宽比的阈值,如果超过这个值就代表眼睛长/宽大于采集到的平均值,默认已经"闭眼"
        eyesRatioLimit=0
        #数据采集的次数
        collectCountInterval=40
        #数据采集的计数,采集collectCountInterval次然后取平均值
        collectCount=0
        #用于数据采集的求和
        collectSum=0
        #是否开始检测
        startCheck=False
        #统计"闭眼"的次数
        eyesCloseCount=0

        #初始化dlib
        detector=dlib.get_frontal_face_detector()
        predictor=dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

        #获取面部各器官的索引
        #左右眼
        (left_Start,left_End)=face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (right_Start,right_End)=face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
        # #嘴
        # (leftMouth,rightMouth)=face_utils.FACIAL_LANDMARKS_IDXS['mouth']
        # #下巴
        # (leftJaw,rightJaw)=face_utils.FACIAL_LANDMARKS_IDXS['jaw']
        # #鼻子
        # (leftNose,rightNose)=face_utils.FACIAL_LANDMARKS_IDXS['nose']
        # #左右眉毛
        # (left_leftEyebrow,left_rightEyebrow)=face_utils.FACIAL_LANDMARKS_IDXS['left_eyebrow']
        # (right_leftEyebrow,right_rightEyebrow)=face_utils.FACIAL_LANDMARKS_IDXS['right_eyebrow']

        #开启视频线程,延迟
        print("[INFO] starting video stream...")
        cap = cv2.VideoCapture(0)
        #调节摄像头帧率
        cap.set(cv2.CAP_PROP_FPS,2)   
        #调整摄像头分辨率
        # ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH,240)
        # ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT,130)
        
        #初始化
        print("[INFO] initializing...")
        time.sleep(2.0)
        
        #循环检测
        while True:
            
            
            #对每一帧进行处理,设置宽度并转化为灰度图
            ret,frame = cap.read()
            #print('collect data')
            frame = imutils.resize(frame, width=320)
            #转换为GRAY颜色空间
            img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            #检测灰度图中的脸
            faces = detector(img, 0)
            for k in faces:
                #确定面部区域的面部特征点,将特征点坐标转换为numpy数组
                shape = predictor(img, k)
                shape = face_utils.shape_to_np(shape)

                #左右眼
                leftEye = shape[left_Start:left_End]
                rightEye = shape[right_Start:right_End]
                leftEyesVal = self.eyesRatio(leftEye)
                rightEyesVal = self.eyesRatio(rightEye)
                #凸壳
                leftEyeHull = cv2.convexHull(leftEye)
                rightEyeHull = cv2.convexHull(rightEye)
                #绘制轮廓
                cv2.drawContours(img, [leftEyeHull], -1, (0, 0, 0), 1)
                cv2.drawContours(img, [rightEyeHull], -1, (0, 0, 0), 1)
                #取两只眼长宽比的的平均值作为每一帧的计算结果
                eyeRatioVal = (leftEyesVal + rightEyesVal) / 2.0

                # #嘴
                # mouth=shape[leftMouth:rightMouth]
                # mouthHull=cv2.convexHull(mouth)
                # cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)

                # #鼻子
                # nose=shape[leftNose:rightNose]
                # noseHull=cv2.convexHull(nose)
                # cv2.drawContours(frame, [noseHull], -1, (0, 255, 0), 1)

                # #下巴
                # jaw=shape[leftJaw:rightJaw]
                # jawHull=cv2.convexHull(jaw)
                # cv2.drawContours(frame, [jawHull], -1, (0, 255, 0), 1)

                # #左眉毛
                # leftEyebrow=shape[left_leftEyebrow:left_rightEyebrow]
                # leftEyebrowHull=cv2.convexHull(leftEyebrow)
                # cv2.drawContours(frame, [leftEyebrowHull], -1, (0, 255, 0), 1)

                # #右眉毛
                # rightEyebrow=shape[right_leftEyebrow:right_rightEyebrow]
                # rightEyebrowHull=cv2.convexHull(rightEyebrow)
                # cv2.drawContours(frame, [rightEyebrowHull], -1, (0, 255, 0), 1)

                if collectCount < collectCountInterval:
                    collectCount+=1
                    collectSum+=eyeRatioVal
                    # cv2.putText(img, "DATA COLLECTING", (10, 10),cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 0), 1)
                    print("[INFO] DATA COLLECTING...  ")

                    Adafruit_CharLCD.clear()
                    Adafruit_CharLCD.message("DATA COLLECTING")

                    startCheck=False
                else:
                    if not startCheck:
                        eyesRatioLimit=collectSum/(1.0*collectCountInterval)
                        # Adafruit_CharLCD.clear()
                        Adafruit_CharLCD.clear()
                        Adafruit_CharLCD.message("fatigue detection")

                        print('眼睛长宽比均值',eyesRatioLimit)
                        # eyesRatioLimit = round(eyesRatioLimit,2)
                        # AverageEyesRatio = str(eyesRatioLimit)
                    startCheck=True

                # Adafruit_CharLCD.clear()
                # Adafruit_CharLCD.message("fatigue detection")

                if startCheck:
                    #如果眼睛长宽比大于之前检测到的阈值,则计数,闭眼次数超过50次则认为已经"睡着"
                    if eyeRatioVal > 1.11*eyesRatioLimit:
                        eyesCloseCount += 1
                        if eyesCloseCount >= 22:
                            # cv2.putText(frame, "SLEEP!!!", (580, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
                            # cv2.putText(img, "SLEEP!!!",   (60, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 0), 1)
                            # if eyesCloseCount%2 == 0 :

                            #Adafruit_CharLCD.clear()
                            #Adafruit_CharLCD.message(str(eyesCloseCount)+'/n sleep!')
                            #time.sleep(0.1)
                            #Adafruit_CharLCD.message("sleep!")
                            self.buzzer.blink(0.3)
                            #time.sleep(0.5)
                        elif eyesCloseCount >= 4:
                            # cv2.putText(img, "EXHAUSTED!", (60, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 0), 1)
                            # if eyesCloseCount%2 == 0 :                        
                            
                            #Adafruit_CharLCD.clear()
                            #Adafruit_CharLCD.message('close'+str(eyesCloseCount)+'/n exhausted!')
                            #time.sleep(0.1)
                          #  Adafruit_CharLCD.message('close'+"exhausted!")
                            self.buzzer.blink(0.1)
                            
                            #time.sleep(0.5)
                        elif eyesCloseCount >= 0:
                            # cv2.putText(img, "WIDE AWAKE", (60, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 0), 1)
                            #
                            #Adafruit_CharLCD.clear()
                            #Adafruit_CharLCD.message('close'+str(eyesCloseCount)+'/n Wake!')
                            #time.sleep(0.1)
                            print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                            #time.sleep(0.5)
                            print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))                            
                            
                            # Adafruit_CharLCD.message("Wake!")
                            #print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                            #print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                            self.buzzer.off()
                        # alarm_beep(eyesCloseCount)

                    else:  
                        eyesCloseCount = 0

                    print('眼睛实时长宽比:{:.2f} '.format(eyeRatioVal/eyesRatioLimit))
                    # eyeRatioVal = round(eyeRatioVal,2)
                    # Adafruit_CharLCD.clear()
                    #Adafruit_CharLCD.message('av:'+AverageEyesRatio+'\n now:'+str(eyeRatioVal))
                    
                    #眼睛长宽比
                    # cv2.putText(img, "EYES_RATIO: {:.2f}".format(eyeRatioVal), (20, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 0), 1)
                    #闭眼次数
                    # cv2.putText(img,"EYES_COLSE: {}".format(eyesCloseCount),(40,30),cv2.FONT_HERSHEY_SIMPLEX,0.2,(0,0,0),1)

                    #通过检测嘴的长宽比检测有没有打哈欠,后来觉得没什么用
                    # cv2.putText(frame,"MOUTH_RATIO: {:.2f}".format(mouthRatio(mouth)),(500, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            #cv2.imshow("Frame", img)

            #key = cv2.waitKey(1) & 0xFF
            #停止
            #if key == ord("S"):  break

            if self.button_reset.is_pressed :
            # if self.button_reset() :
                
                print('program is over')
                mixer.init()
                mixer.music.load('xiaoge.mp3')
                mixer.music.play()
                break
            

        cap.release()
Example #17
0
red_pin = 17
orange_pin = 16
green_pin = 21
button_pin = 2
buzzer_pin = 18

# GPIO pin
led_red = LED(red_pin)
led_orange = LED(orange_pin)
led_green = LED(green_pin)

# Button
button = Button(button_pin)

# Buzzer
buzzer = Buzzer(buzzer_pin)

while True:

    if button.is_pressed:
        
        print("On")

        # Buzzer on
        buzzer.on()

        # Orange on and off
        led_orange.on()
        sleep(0.5)
        led_orange.off()
        
 def __init__(self, gpio: int):
     self.__bz__ = Buzzer(gpio)
     self.__bz__.off()
# cmd로 sub 확인
# mosquitto_sub -v -h localhost -t iot/#
PORT = 5000

kp = Keypad()
PASSWORD = "******"
confirm = ""  # 비밀번호와 일치시키는 확인 변수
t = None
b_press = False
counter = 0  # 비밀번호 3회오류시 noti 발생확인을 위한 변수
timeout = 3  # 3초후에 리셋 함수 부르기 위해 만듬
door = ""
mag_msg = ""

client = mqtt.Client()
bz = Buzzer(16)

GPIO.setwarnings(False)
SERVO = 24
pi = pigpio.pi()
pi.set_servo_pulsewidth(SERVO, 700)  # 초기 0도

button = Button(20)

camera_pin = 1
# cap = cv2.VideoCapture(camera_pin)
image_cj = []


def to_jpg(frame, quality=60):  # (변환할 이미지)
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
from unicodedata import normalize
from signal import pause
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# /Configuração do Rasbian/Raspberry Pi

# Configuração e declaração dos LEDs
from gpiozero import LED
led1 = LED(16)
led2 = LED(12)
led3 = LED(7)
# /Configuração e declaração dos LEDs

# Configuração e declaração do buzzer
from gpiozero import Buzzer
buzzer = Buzzer(24)
# /Configuração e declaração do buzzer

# Funções para notificação com LED e buzzer
def NotificaOK():
    led1.blink(on_time=0.25,off_time=0.1,n=2)
    buzzer.blink(on_time=0.25,off_time=0.1,n=2,background=False)

def NotificaLeitura():
    led2.blink(on_time=0.5,off_time=0.5,n=1)
    buzzer.blink(on_time=0.25,off_time=0.25,n=1,background=False)

def NotificaErro():
    led3.blink(on_time=0.5,off_time=0.1,n=2)
    buzzer.blink(on_time=0.5,off_time=0.1,n=2,background=False)
# /Funções para notificação com LED e buzzer
Example #21
0
 def set_pin(self, newPin):
     self.pin = newPin
     self.buzz = Buzzer(newPin)
Example #22
0
import board
import busio
from digitalio import DigitalInOut
from gpiozero import Buzzer
import time

buzzer = Buzzer(17)

from adafruit_pn532.i2c import PN532_I2C
# from adafruit_pn532.spi import PN532_SPI
# from adafruit_pn532.uart import PN532_UART

# I2C connection:
i2c = busio.I2C(board.SCL, board.SDA)

reset_pin = DigitalInOut(board.D6)
req_pin = DigitalInOut(board.D12)
pn532 = PN532_I2C(i2c, debug=False, reset=reset_pin, req=req_pin)

ic, ver, rev, support = pn532.firmware_version
print("Found PN532 with firmware version: {0}.{1}".format(ver, rev))

# Configure PN532 to communicate with MiFare cards
pn532.SAM_configuration()

print("Waiting for RFID/NFC card...")
while True:
    # Check if a card is available to read
    uid = pn532.read_passive_target(timeout=0.5)
    print(".", end="")
    # Try again if no card is available.
Example #23
0
from gpiozero import LED, Buzzer, Button
from time import sleep
from signal import pause
import datetime
import sys

btn = Button(6)
buzz = Buzzer(17)


def setStop():
    global stopped
    print stopped
    stopped = True


def _2beep():

    global stopped

    btn.when_pressed = setStop

    for count in range(0, 6):
        if (stopped != True):
            print("button pressed")
            buzz.on()
            sleep(0.2)
            buzz.off()
            sleep(0.2)
            buzz.on()
            sleep(0.2)
Example #24
0
#!/usr/bin/env python3
########################################################################
# Filename    : Doorbell3.py
# Description : Make doorbell with buzzer and button
# Author      : Rosario Paolella
# modification: 2021/01/23
########################################################################
from gpiozero import Buzzer, Button
from signal import pause

print('Program is starting...')

led = Buzzer(17)
button = Button(18)


def onButtonPressed():
    led.on()
    print("Button is pressed, led turned on >>>")


def onButtonReleased():
    led.off()
    print("Button is released, led turned on <<<")


button.when_pressed = onButtonPressed
button.when_released = onButtonReleased

pause()
Example #25
0
from gpiozero import Buzzer
from time import sleep

buzzer = Buzzer(27)

while True:
    buzzer.on()
    sleep(1)
    buzzer.off()
    sleep(1)

    print(f'Could not connect to Raspberry Pi at {os.environ["REMOTEPI"]}')

camLED = LED(24)
servo = Servo(18, 1, pin_factory=remote_factory)
RGBLed = RGBLED(27, 23, 25)
screen = LCD()

systems = {
    'fan': "T",
    'servo': "T",
    'keypad': "T",
    'camera': "T",
    'sensor': "T",
}

buzzer = Buzzer(4)
remote_buzzer = Buzzer(17, pin_factory=remote_factory)
fan = DigitalOutputDevice(22, pin_factory=remote_factory)


def armSystem():
    # Arm all systems
    for system in systems:
        systems[system] = "T"

    servo.max()


def disarmSystem():
    # Disarm all systems
    for system in systems:
import dht11
import smtplib
import time
from threading import Thread
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask import Flask, render_template, redirect
from RPi import GPIO
from gpiozero import LightSensor, Buzzer

app = Flask(__name__)

LASER_PIN = 10
ldr = LightSensor(12)
buzzer = Buzzer(4)

on = '#2FD22F'
off = '#D22F2F'

# Each status will change color based on if it's on or off.
# It also gives more visual info to the user.
security = off
temp_threshold = 81  # change this to whatever you want
temperature = off
warning = False
toaddr = input('Enter your email: ')


def security():
    global security, ldr, buzzer, on, off
Example #28
0
from gpiozero import Buzzer
from time import sleep

buz = Buzzer(26)

while True:
    buz.on()
    sleep(1)
    buz.off()
    sleep(1)
Example #29
0
"""
import argparse
import dbus
from time import sleep

from gpiozero import LED
from gpiozero import Buzzer

import iterate

# constants

led1 = LED(22)
led2 = LED(23)
led3 = LED(24)
buzz = Buzzer(5)

DBUS_SYS_BUS = dbus.SystemBus()

DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'

BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DEVICE_IFACE = 'org.bluez.Device1'
CHAR_IFACE = 'org.bluez.GattCharacteristic1'

BEEP_TIME = 0.25


class microbit:
Example #30
0
## otherwise the buzzer sounds.

## To set up PlayHAT, please visit https://github.com/4tronix/PlayHAT

import time
from neopixel import *
from gpiozero import Button, Buzzer
from signal import pause

btnG = Button(17)
btnR = Button(4)
btnB = Button(22)

btnY = Button(27)

beeper = Buzzer(23)

MyList = []
MyCheckList = []

# LED strip configuration:
LED_COUNT = 9  # Number of LED pixels.
LED_PIN = 18  # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000  # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5  # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255  # Set to 0 for darkest and 255 for brightest
LED_INVERT = False  # True to invert the signal (when using NPN transistor level shift)


def colorWipe(strip, color, wait_ms=50):
    """Wipe color across display a pixel at a time."""