Exemplo n.º 1
0
def capture_image():
    ret, image = cap.read()
    print("capturing video")
    currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%d-%m-%Y %H:%M:%S') 
    print(currentTime)
    grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(grayImage, 1.1, 5 )
    print(faces)
    #print(faces.shape)
    for (x,y,w,h) in faces:
        cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),1)
    cv2.imshow('Image with faces',image)
    face_detect = len(faces)
    if face_detect >= 1:
        maskStatus = 'no'
        print("lenfaces is more than 1") 
    elif face_detect == 0:
        print("lenfaces is 0")
        maskStatus = 'yes'
    print("frame number is: " + str(frame))
    fileLoc = "/home/pi/Images/image{}.jpg".format(frame)
    #print(fileLoc)
    img_flip = cv2.flip(image, 1) # flip image horizontally on y axis before saving
    cv2.imwrite(fileLoc, img_flip) 
    storeFileFB.store_file(fileLoc)
    storeFileFB.push_db(fileLoc, currentTime, face_detect, maskStatus)
    cv2.destroyAllWindows()
    print("Number of faces detected: " + str(face_detect))
    return face_detect
Exemplo n.º 2
0
def capture_image():
    image = cap.read()
    # ret, image = cap.read()
    print("capturing video")
    currentTime = datetime.datetime.fromtimestamp(
        time.time()).strftime('%d-%m-%Y %H:%M:%S')
    print(currentTime)
    grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # convert to grayscale
    faces = face_cascade.detectMultiScale(grayImage, 1.1, 6)  # detect faces
    maskStatus = 'No'  # initialize mask status to no mask

    if (len(faces)
        ) == 0:  # if length of faces numpy array is 0 no faces detected
        maskStatus = 'No'  # Pir activated, No face detected
        print("No faces detected")
        print("Mask Status: " + str(maskStatus))
    else:
        for (x, y, w, h) in faces:
            image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0),
                                  1)
            roi_grayImage = grayImage[y:y + h, x:x + w]
            roi_color = image[y:y + h, x:x + w]
            smile = smile_cascade.detectMultiScale(roi_grayImage, 1.7, 20)
            # print("print smile co-ords ")
            # print(smile.shape)
            if (len(smile)) == 0:  #  face detected but no smile detected
                maskStatus = 'Yes'  # Pir activated, face detected, no smile
                print("face detected but no smile")
                print("Mask Status: " + str(maskStatus))
            else:
                for (sx, sy, sw, sh) in smile:
                    cv2.rectangle(roi_color, (sx, sy), (sx + sw, sy + sh),
                                  (255, 0, 0), 1)
                    print("face and smile detected")
                    maskStatus = 'No'  # Pir activated, face & smile detected
                    print("Mask Status: " + str(maskStatus))

    cv2.imshow('Image with faces', image)
    face_detect = len(faces)
    print("frame number is: " + str(frame))
    fileLoc = "/home/pi/Images/image{}.jpg".format(frame)
    #print(fileLoc)
    img_flip = cv2.flip(image,
                        1)  # flip image horizontally on y axis before saving
    cv2.imwrite(fileLoc, img_flip)
    storeFileFB.store_file(fileLoc)
    storeFileFB.push_db(fileLoc, currentTime, face_detect, maskStatus)
    cv2.destroyAllWindows()
    print("Number of faces detected: " + str(face_detect))
    print("Mask Status being returned: " + str(maskStatus))
    # return face_detect
    return str(maskStatus)
Exemplo n.º 3
0
def SNDEventHandler(sensor): #creates the SNDEventHandler method which takes in the value of sensor
	print ("Sound Detected!") #prints message to the console
	SNDdetected = 1 #sets SNDdetected value to 1
	buzzer.play(Tone(note='A5')) #sends message to the passive buzzer to play note A5
	if (SNDdetected == 1): #if a sound is detected
		camera.capture(fileLoc) #camera takes a photo and saves the image to the fileLoc folder defined above
		storeFileFB.store_file(fileLoc) #triggers store_file method from storeFileFB script using value of fileLoc which stores the captured image in firebase storage
		storeFileFB.push_db(fileLoc, currentTime)  #triggers push_db method from storeFileFB script using value of fileLoc and current time which pushes the captured image to the firebase realtime database 
	time.sleep(5) #sleep for 5 seconds while buzzer is ringing away
	buzzer.stop() #stops the buzzer
	conn = urlopen(baseURL + '&field1=%s' % (SNDdetected)) #triggers a URL using the Thingspeak URL above which reads in the new value of SNDdetected triggering the ThingTweet to be posted.
	print(conn.read()) #information is printed to the URL thus triggering the ThingTweet
	conn.close() #temp connection to trigger URL is then closed
Exemplo n.º 4
0
def write_gp_pin_handler(pin, value):
    frame = 1
    if value[0]=="1":
        print("Camera on")
        print (value)
        currentTime = datetime.datetime.now().strftime("%H:%M:%S")
        camera.rotation = 90        
        camera.start_preview()
        sleep(3)
        fileLoc = f'/home/pi/labs/week9/blynk/img/frame{frame}.jpg'
        camera.capture(fileLoc)
        camera.stop_preview()
        print(f'frame taken at {currentTime}') # print frame number to console
        storeFileFB.store_file(fileLoc)
        storeFileFB.push_db(fileLoc, currentTime)
        frame+=1
Exemplo n.º 5
0
def collision(stdDevX, stdDevY, stdDevZ, maxX, maxY, maxZ, x, y, z):
     if (x > (maxX + (stdDevX * 2))) or (y > (maxY + (stdDevY * 2))) or (z > (maxZ + (stdDevZ *2))):
         sense.clear(255, 0, 0)
         time.sleep(2)
         print("Collision Detected")
         camera.stop_recording()
         convertFiles.updateVideos()
         botbox2.postTelegram()
         for file in glob.glob('*.mp4'):
           timeStamp = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
           print("copying: ", file)
           shutil.copy(file, './backup')
           storeFileFB.push_db('./backup', timeStamp)
           storeFileFB.store_file(file)
           sense.clear()
           sys.exit("Closing blackbox...")
     else:
         print("No accident detected")
         return True
Exemplo n.º 6
0
from picamera import PiCamera
import datetime
import storeFileFB

camera = PiCamera()
camera.start_preview()
frame = 1

fileLoc = f'/home/pi/week10/img/frame{frame}.jpg'  # set location of image file and current time
currentTime = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")

camera.capture(fileLoc)  # capture image and store in fileLoc
print(f'frame {frame} taken at {currentTime}')  # print frame number to console
storeFileFB.store_file(fileLoc)
storeFileFB.push_db(fileLoc, currentTime)
frame += 1
Exemplo n.º 7
0
def pet_detector(frame):

    # Use globals for the control variables so they retain their value after function exits
    global detected_inside, detected_outside
    global inside_counter, outside_counter
    global pause, pause_counter

    frame_expanded = np.expand_dims(frame, axis=0)

    # Perform the actual detection by running the model with the image as input
    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: frame_expanded})

    # Draw the results of the detection (aka 'visulaize the results')
    vis_util.visualize_boxes_and_labels_on_image_array(
        frame,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8,
        min_score_thresh=0.40)

    # Draw couch box
    cv2.rectangle(frame,TL_inside,BR_inside,(20,20,255),3)
    cv2.putText(frame,"Couch box",(TL_inside[0]+10,TL_inside[1]-10),font,1,(20,255,255),3,cv2.LINE_AA)
    
    # Check the class of the top detected object by looking at classes[0][0].
    # If the top detected object is a dog (18),
    # find its center coordinates by looking at the boxes[0][0] variable.
    # boxes[0][0] variable holds coordinates of detected objects as (ymin, xmin, ymax, xmax)
    if (((int(classes[0][0]) == 17) or (int(classes[0][0] == 18) or (int(classes[0][0]) == 88))) and (pause == 0)):
        x = int(((boxes[0][0][1]+boxes[0][0][3])/2)*IM_WIDTH)
        y = int(((boxes[0][0][0]+boxes[0][0][2])/2)*IM_HEIGHT)

        # Draw a circle at center of object
        cv2.circle(frame,(x,y), 5, (75,13,180), -1)

        # If object is in inside box, increment inside counter variable
        if ((x > TL_inside[0]) and (x < BR_inside[0]) and (y > TL_inside[1]) and (y < BR_inside[1])):
            inside_counter = inside_counter + 1


    # If dog has been detected inside for more than 1 frames, set detected_inside flag
    # and send a text to the phone.
    if inside_counter >= 1:
        detected_inside = True
        #camera.start_preview()
        pic = 1
        fileLoc = f'/home/pi/frame{pic}.jpg' # set location of image file and current time
        currentTime = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
        camera.capture(fileLoc) # capture image and store in fileLoc
        print(f'frame {pic} taken at {currentTime}') # print frame number to console
        storeFileFB.store_file(fileLoc)
        storeFileFB.push_db(fileLoc, currentTime)
        pic += 1
        message = client.messages.create(
            body = 'Fugitive sighted... http://www.down-boy.glitch.me/',
            from_=twilio_number,
            to=my_number
            )
        inside_counter = 0
        outside_counter = 0
        # Pause pet detection by setting "pause" flag
        pause = 1


    # If pause flag is set, draw message on screen.
    if pause == 1:
        if detected_inside == True:
            cv2.putText(frame,'He is on the couch again!',(int(IM_WIDTH*.1),int(IM_HEIGHT*.5)),font,3,(0,0,0),7,cv2.LINE_AA)
            cv2.putText(frame,'He is on the couch again!',(int(IM_WIDTH*.1),int(IM_HEIGHT*.5)),font,3,(95,176,23),5,cv2.LINE_AA)


        # Increment pause counter until it reaches 30 (for a framerate of 1.5 FPS, this is about 20 seconds),
        # then unpause the application (set pause flag to 0).
        pause_counter = pause_counter + 1
        if pause_counter > 30:
            pause = 0
            pause_counter = 0
            detected_inside = False
            detected_outside = False

    # Draw counter info
    cv2.putText(frame,'Detection counter: ' + str(max(inside_counter,outside_counter)),(10,100),font,0.5,(255,255,0),1,cv2.LINE_AA)
    cv2.putText(frame,'Pause counter: ' + str(pause_counter),(10,150),font,0.5,(255,255,0),1,cv2.LINE_AA)

    return frame
Exemplo n.º 8
0
q = 0

def writeData(dB):
    # Sending the data to thingspeak in the query string
    conn = urlopen(baseURL + '&field1=%s' % (dB))
    conn.close()

while True:
    ret = dev.ctrl_transfer(0xC0,4,0,0,200)
    dB = round((ret[0]+((ret[1]&3)*256))*0.1+30,2)
    writeData(dB)
    currentTime = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
    time.sleep(1)
    msg="{'dB':'"+str(dB)+"'}"
    print (msg)
    storeFileFB.push_db(dB, currentTime)

    myArray.pop(0)
    myArray.append(dB)
    print(myArray)
    avg = sum(myArray)/len(myArray)


    if q > 4:
     pythonArray.avgVol(avg)
     q = 0
    else:
     q += 1

    print(q)
Exemplo n.º 9
0
                                             COLORDEPTH - 1)])

    # scale and save
    image.resize(
        (NX * args.scale, NY * args.scale),
        Image.BICUBIC,
    ).save(args.output)
    # This reopens image adds highest temp text and saves the new file
    img = Image.open('/home/pi/Assignment2/amg88xx_still.jpg')
    draw = ImageDraw.Draw(img)
    draw.text((10, 10),
              'Highest Temp = {0:0.2f} *C'.format(hot),
              font=fnt,
              fill=(0, 0, 0))
    currentTime = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
    # changes the filename after text is added and increments the file name by 1
    filename = "images/frame{}.jpg"
    counter = 0
    while os.path.isfile(filename.format(counter)):
        counter += 1
    filename = filename.format(counter)
    img.save(filename, quality=100, subsampling=0)
    # Cretes and sends email
    emailServer.text = f'Hi,\n the attached image was taken today at {currentTime} \n Remember In these Uncertain times please proceed with cation. \n Your Visitors temperature is = {hot} *C. \n Our View the image Click https://near-candied-munchkin.glitch.me'
    emailServer.send_mail('*****@*****.**', '*****@*****.**',
                          'Door Event', emailServer.text, filename)
    print(f'frame {counter} taken at {currentTime}')
    # Pushes file to firebase
    storeFileFB.store_file(filename)
    storeFileFB.push_db(filename, currentTime)
Exemplo n.º 10
0
            camera.stop_recording()
            print(f'latestToday taken at {currentTime}'
                  )  # print frame number to console

            #----------------------------------------------------------------------------
            # Convert the video from .h264 to MP4 format with a shell command
            #----------------------------------------------------------------------------
            command = "MP4Box -add " + videoLoc + " " + videoLocMp4
            call([command], shell=True)
            print("Video converted")

            #----------------------------------------------------------------------------
            # Store the MP4 video on firebase storage, and the
            # name and time of the file the firebase realtime DB
            #----------------------------------------------------------------------------
            videoURL = storeFileFB.store_file(videoLocMp4) + URL_token
            storeFileFB.push_db(videoLocMp4, currentTime, child.childInRoom,
                                adult.adultInRoom)
            print("files saved on firebase")

            #----------------------------------------------------------------------------
            # Remove both video formats from the local Pi storage
            #----------------------------------------------------------------------------
            commandDel = "rm" + " " + videoLocMp4 + " " + videoLoc
            call([commandDel], shell=True)
            print("Files deleted from local img folder")

            time.sleep(30)

        time.sleep(10)