예제 #1
0
	def _process(self, output):
		#response to red cross output
		if output == "cross":
			DRAW._drawCross()
			self.state_label.configure(text="DEACTIVATED", fg="red")
			self.message_label.configure(text="", fg="red")
		#resposne to red right arrow output
		elif output == "right_arrow":
			DRAW._draw_rigth_arrow()
			self.state_label.configure(text="DEACTIVATED:")
			self.message_label.configure(text="> waiting for user input...")
		#response to left green arrow output
		elif output == "left_arrow":
			DRAW._draw_left_arrow()
			self.state_label.configure(text="ACTIVATED:")
			self.message_label.configure(text="> Waiting for user input...")
		#response to empty red circle output
		elif output == "empty_circle_red":
			self.state_label.configure(text="DEACTIVATED:")
			self.message_label.configure(text="Monitor will become active\n in {} seconds.".format(GUI.SLEEPTIME/1000))
			DRAW._draw_empty_circle()
			#used .after to avoid the freezing of the GUI
			self.after(GUI.SLEEPTIME, DRAW._draw_full_circle)
			self.after(GUI.SLEEPTIME, self.change_to_activated)
		elif output == "full_circle_green":
			DRAW._draw_full_circle()
			self.state_label.configure(text="ACTIVATED")
			self.message_label.configure(text="")
			# print(self.fsm.state)
		elif output == "alarmed":
			SEND.sendEmail()
			SEND.sendTweet()
			CAM.take_pic()
예제 #2
0
def main_pipeline(port):
    """
    Runs main pipeline: motion -> take picture -> classify -> action if the input on pin_motion is raising high.
    :param port: pin of the motion sensor. In this case pin_motion. format: int
    :return:
    """
    global pap_flag

    logger("INFO: motion detected")

    if pap_flag:
        logger("WARNING: main_pipeline is already running")
        return

    pap_flag = 1
    img = take_pic()
    predicted_class = inference.predict(img, model)

    if predicted_class == 2:
        logger("INFO: pigeon detected")
        rotate_motor(rotation_time)
    if predicted_class == 0:
        logger("INFO: human detected")
        time.sleep(180)
    if predicted_class == 1:
        logger("INFO: no one detected")

    inference.save_image(img, predicted_class)
    pap_flag = 0

    return
예제 #3
0
파일: main.py 프로젝트: mexidron/api-client
    def make_job(self,path):

        job_total_duration = 10 #math.ceil(float(self.cord_lenght)/config.DRONE_SPEED)
        job_current_duration = 0

        while (job_current_duration <= job_total_duration ):
            base_path = path + "/" + str(self.batea_id) + "_" + str(self.cord_id) + "_" + str(job_current_duration)
            camera.take_pic( base_path + ".jpeg")
            job_current_duration += self.period

            #guardamos datos de la foto en json
            current_altitude = job_current_duration*config.DRONE_SPEED
            json_response = { 'id_batea' : self.batea_id,
                          'id_cuerda' : 2,
                          'destino' : self.cord_destination,
                          'z' : current_altitude,
                          'image' :  str(self.batea_id) + "_" + str(self.cord_id) + "_" + str(job_current_duration) + ".jpeg"
                          }
            data = json.dumps(json_response)
            with open(base_path + '.json', 'w') as outfile:
                json.dump(data, outfile)

            import time
            time.sleep(self.period)
import cv2
import time

import config
import face
import camera

model = cv2.createEigenFaceRecognizer()
model.load(config.TRAINING_FILE)

print "Model loaded ..."

while True:
    filename = config.TEST_FILE

    camera.take_pic(filename)

    img = cv2.imread(filename)
    bw_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(bw_img)

    if result is None:
        print "No face detected ... :("
    else:
        x, y, w, h = result
        crop = face.resize(face.crop(bw_img, x, y, w, h))
        label, confidence = model.predict(crop)
        print "label ... '%s'" % label, "confidence ... '%s'" % confidence

    time.sleep(5)
예제 #5
0
z = 0
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--cascade",
                required=True,
                help="path to where the face cascade resides")
ap.add_argument("-e",
                "--encodings",
                required=True,
                help="path to serialized db of facial encodings")
args = vars(ap.parse_args())

# Taking the picture of the one in front of the door
print("[INFO] Take a picture...")
camera.take_pic()

# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(args["encodings"], "rb").read())
detector = cv2.CascadeClassifier(args["cascade"])

# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)

# start the FPS counter
fps = FPS().start()
find_face(cascadePath,image_to_detect)
images,labels=get_images_and_labels(path,faceCascade)
lcd.clear()
lcd.message("Loading.....")
cv2.destroyAllWindows()
recognizer.train(images,np.array(labels))


while True:
        i=GPIO.input(23)
        if i==1:
                lcd.clear()
                lcd.message("Stop,let me know you")
                time.sleep(2)
                take_pic(camera,image_to_detect)
                lcd.clear()
                lcd.message("finding....you")
                nbr_predicted,conf=recog(path1,faceCascade,recognizer)
                try :
                    nbr_predicted=int(nbr_predicted)
                    yes=names_to_num(nbr_predicted,conf)                    
                    cv2.imshow("you are",facefind)
                    cv2.waitKey(1000)
                except:
                    yes="face not found"
            print yes
            lcd.clear()
            lcd.message(yes)
            time.sleep(5)
	else:
import cv2
import time

import config
import camera

pic_nr = 0

while True:
    img_name = config.CAPTURE_DIR + str(pic_nr) + ".jpg"
    camera.take_pic(img_name)
    pic_nr = pic_nr + 1
    time.sleep(60)