def action(self): tts.say("Scanning for humans") cascPath = constants.CLASSIFIER_FILE faceCascade = cv2.CascadeClassifier(cascPath) video = Pyro4.Proxy("PYRONAME:video.frame") Pyro4.config.SERIALIZERS_ACCEPTED = set( ['pickle', 'json', 'marshal', 'serpent']) Pyro4.config.SERIALIZER = 'pickle' frame = video.get_frame() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=constants.SCALE_FACTOR, minNeighbors=constants.MIN_NEIGHBORS, minSize=(constants.MIN_WIDTH, constants.MIN_HEIGHT), flags=cv2.CASCADE_SCALE_IMAGE) video._pyroRelease() if (type(faces) != tuple and faces.size > 0): tts.say("Face detected") self.next = recognizeState.RecognizeState( data=[frame, faces, self.data]) else: self.next = wanderState.WanderState()
def action(self): Pyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent']) Pyro4.config.SERIALIZER = 'pickle' audio = Pyro4.Proxy("PYRONAME:audio.text") test = "" if(type(audio) != type(None) and audio != None): try: text = audio.get_text() print(text) except UnboundLocalError as ule: print("No text received") except NameError as ne: print("No text received") else: print("No text received") if(text == "yes"): tts.say("Good") time.sleep(1) tts.say("Fix it or be destroyed") time.sleep(1) self.next = waitState.WaitState() else: time.sleep(1) self.next = requestAckState.RequestAckState()
def action(self): for i in range(3): r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: text = r.recognize_google(audio) except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}" .format(e)) if (text == "yes"): break if (text == "yes"): tts.say("Good") time.sleep(1) tts.say("Fix it or be destroyed") time.sleep(1) self.next = waitState.WaitState() else: time.sleep(1) self.next = requestAckState.RequestAckState()
def action(self): tts.say("Scanning for humans") cascPath = constants.CLASSIFIER_FILE faceCascade = cv2.CascadeClassifier(cascPath) video_capture = cv2.VideoCapture(0) # Throw away the first few frames, they can be blank for i in range(5): video_capture.read() ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=constants.SCALE_FACTOR, minNeighbors=constants.MIN_NEIGHBORS, minSize=(constants.MIN_WIDTH, constants.MIN_HEIGHT), flags=cv2.CASCADE_SCALE_IMAGE ) video_capture.release() if(type(faces) != tuple and faces.size > 0): tts.say("Face detected") self.next = recognizeState.RecognizeState(data=[frame, faces, self.data]) else: self.next = wanderState.WanderState()
def action(self): tts.say("Watching for build failures") log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) app = Flask(__name__) app.add_url_rule("/", view_func=self.wait, methods=[ 'POST', ]) app.run() self.next = wanderState.WanderState(data=self.post_body)
def action(self): tts.say("Identifying human") recognized = False frame, faces, post_body = self.data for (x, y, w, h) in faces: # This looks like what I would need if I wasn't writing the file #x, y, w, h = vision.resize(x, y, w, h) #face = frame[y:y+h, x:x+w] #resized = cv2.resize(face, (constants.WIDTH, constants.HEIGHT)) #grayscale = cv2.cvtColor(resized, cv2.COLOR_RGB2GRAY) person = vision.find_person(self.Y, self.pca, self.X_pca) print("Recognized person as '", person, "'. Culprit was '", post_body, "'.") if(person == post_body): recognized = True if(recognized): self.next = requestIdentityState.RequestIdentityState(data=[person, self.data]) else: self.next = wanderState.WanderState()
def action(self): tts.say("I know, I was just being polite") time.sleep(1) tts.say("You have broken the build") time.sleep(1) self.next = requestAckState.RequestAckState()
print("Please enter a name") exit() NAME = sys.argv[1] faceCascade = cv2.CascadeClassifier(constants.CASC_PATH) video_capture = cv2.VideoCapture(0) if not os.path.exists(constants.TRAIN_DIR): os.makedirs(constants.TRAIN_DIR) if not os.path.exists(constants.TRAIN_DIR + '/' + NAME): os.makedirs(constants.TRAIN_DIR + '/' + NAME) tts.say("Hello, I am the build butler") tts.say("Welcome to the team") tts.say("To begin, please look at the camera with a neutral expression") tts.say( "Once your face is selected, slowly change your expression, lighting and tilt" ) i = 0 while True: # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=constants.SCALE_FACTOR,
def action(self): person, data = self.data tts.say("Are you " + person) self.next = pyroConfirmIdentityState.PyroConfirmIdentityState(data=self.data)
def action(self): tts.say("Initiated pathfinding") time.sleep(1) self.next = pyroDetectState.PyroDetectState(data=self.data)
def wait(self): self.shutdown_server() tts.say("Received build failure alert") self.post_body = "chris" return "Alert received"