コード例 #1
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
    def __init__(self, **kwargs):
        super(gestureWidget, self).__init__(**kwargs)

        self.play = False
        self.sensor = Sensors()
        self.pose = Poses()
        self.robot = Robot()
        self.faces = faceRecognition()
        self.classify = Classify()
        self.pose_model = self.pose.get_model()
        self.humans_in_environment = 0
        self.sensor_method = None
        self.aux_info = False
        self.skip = True
        self.face_locations = None
        self.face_names = None
        self.humans = []
        self.settings = SettingsPopUp()
        self.fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        self.VideoWriter = None

        # # resets image
        if os.path.isfile(constants.IMAGE_PATH +
                          'foo.png') and os.path.isfile(constants.IMAGE_PATH +
                                                        'foo1.png'):
            os.remove(constants.IMAGE_PATH + "foo.png")
            shutil.copy(constants.IMAGE_PATH + 'foo1.png',
                        constants.IMAGE_PATH + 'foo.png')
コード例 #2
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
    def __init__(self, **kwargs):
        super(AddGesturePopUp, self).__init__(**kwargs)
        self.pose = Poses()
        self.classify = Classify()
        self.sensor = Sensors()
        self.temp_queue = []
        self.Count = constants.COUNTDOWN
        self.sensor_method = self.sensor.get_method()
        self.pose_model = None

        if self.sensor_method is not None:
            Clock.schedule_interval(self.liveFeed, 0.1)
        else:
            message = MessagePopup(str("Not connected to sensor"))
            message.open()
コード例 #3
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
 def __init__(self, **kwargs):
     super(SettingsPopUp, self).__init__(**kwargs)
     self.faces = faceRecognition()
     self.sensor = Sensors()
     self.robot = Robot()
     self.classify = Classify()
     self.gesture = None
     self.index = 0
     self.skip = True
     self.sensor_method = None
     self.faceCount = 0
コード例 #4
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
 def __init__(self, **kwargs):
     super(AutoCaptureFacesPopup, self).__init__(**kwargs)
     self.faces = faceRecognition()
     self.sensor = Sensors()
     self.sensor_method = self.sensor.get_method()
     self.img_count = 0
     self.counter = 0
     self.interval = 8
     self.image = None
     self.instrDict = {
         0: "up",
         2: "up and to the right",
         4: "right",
         6: "down and to the right",
         8: "down",
         10: "down and to the left",
         12: "left",
         14: "up and to the left",
     }
     if self.sensor_method is not None:
         Clock.schedule_interval(self.liveFeed, 0.1)
     else:
         message = MessagePopup(str("Not connected to sensor"))
         message.open()
コード例 #5
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
class gestureWidget(Widget):
    # Main App Widget
    def __init__(self, **kwargs):
        super(gestureWidget, self).__init__(**kwargs)

        self.play = False
        self.sensor = Sensors()
        self.pose = Poses()
        self.robot = Robot()
        self.faces = faceRecognition()
        self.classify = Classify()
        self.pose_model = self.pose.get_model()
        self.humans_in_environment = 0
        self.sensor_method = None
        self.aux_info = False
        self.skip = True
        self.face_locations = None
        self.face_names = None
        self.humans = []
        self.settings = SettingsPopUp()
        self.fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        self.VideoWriter = None

        # # resets image
        if os.path.isfile(constants.IMAGE_PATH +
                          'foo.png') and os.path.isfile(constants.IMAGE_PATH +
                                                        'foo1.png'):
            os.remove(constants.IMAGE_PATH + "foo.png")
            shutil.copy(constants.IMAGE_PATH + 'foo1.png',
                        constants.IMAGE_PATH + 'foo.png')

    def update(self, sensor):
        # Main loop of the code
        image = self.sensor.get_sensor_information(self.sensor_method)
        # ret, image = self.cap.read()

        if image is not None:
            # Get pose / joint information
            points = self.pose.get_points(self.pose_model, image)

            # Get facial information
            if self.skip:
                self.face_locations, self.face_names = self.faces.identify_faces(
                    image)
            self.skip = not self.skip

            if points is not None and self.face_names is not None and self.face_locations is not None:
                im_height, im_width = image.shape[:2]

                # Assigns identities and skeletons to human object
                self.humans = self.pose.assign_face_to_pose(
                    points, self.humans, self.face_locations, self.face_names,
                    im_height, im_width)

                if self.humans is not None:

                    # Plot user identities and (optional) poses
                    image = self.pose.plot_faces(image, self.humans, im_height,
                                                 im_width)
                    if self.settings.ids.aux_info.text == 'Display Auxilary Info: True':
                        image = self.pose.plot_pose(image, self.humans,
                                                    im_height, im_width)

                    # Update each respective queue and classify gestures
                    image = cv2.putText(image, "The prediction is:",
                                        (im_width - 175, 30),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 0, 0), 1)
                    for i, human in enumerate(self.humans):
                        # if human.identity == "Unknown":
                        #      continue

                        human.classify.add_to_queue(human.current_pose)
                        human.prediction = human.classify.classify_gesture()
                        image = cv2.putText(image, human.prediction,
                                            (im_width - 175, 45 + i * 15),
                                            cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                            (0, 0, 0), 1)

                        # Make call to robot
                        self.robot.determine_robot_response(
                            human.identity, human.prediction)

            cv2.imwrite(constants.IMAGE_PATH + 'foo.png', image)
            self.ids.image_source.reload()

            if self.settings.ids.record_session.text == 'Display Auxilary Info: True':
                self.VideoWriter.write(image)

    def playPause(self):
        # Defines behavior for play / pause button

        if self.ids.status.text == "Stop":
            self.ids.status.text = "Play"
            self.ids.status.background_color = [1, 1, 1, 1]
            self.sensor.__del__()
            self.sensor_method = None
            self.VideoWriter.release()
            Clock.unschedule(self.update)
        else:
            if self.sensor_method is None:
                self.sensor_method = self.sensor.get_method()

            if self.sensor_method is not None:
                self.VideoWriter = cv2.VideoWriter('./tests/output.avi',
                                                   self.fourcc, 5.0,
                                                   (640, 480))
                self.cap = cv2.VideoCapture('test_vid.mp4')
                self.ids.status.text = "Stop"
                self.ids.status.background_color = [0, 1, 1, 1]
                self.skip = True
                Clock.schedule_interval(self.update, 0.1)
            else:
                #TODO:: Write popup for error message
                errorBox = MessagePopup(str("Could not find available sensor"))
                errorBox.open()
                print("Could not find available sensor")

    def close(self):
        # close app
        App.get_running_app().stop()

    def setting(self):
        # Display settings popup window
        self.settings = SettingsPopUp()
        self.popup = Popup(title='Settings',
                           content=self.settings,
                           size_hint=(.6, .4))
        self.popup.open()

    def addGesturePopup(self):
        # display addGesture popup window
        addGesture = AddGesturePopUp()
        self.popup = Popup(title='Add Gesture',
                           content=addGesture,
                           size_hint=(.6, .6))
        self.popup.open()
コード例 #6
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
class AutoCaptureFacesPopup(BoxLayout):

    # Automatically adds users to the set of recognized users
    def __init__(self, **kwargs):
        super(AutoCaptureFacesPopup, self).__init__(**kwargs)
        self.faces = faceRecognition()
        self.sensor = Sensors()
        self.sensor_method = self.sensor.get_method()
        self.img_count = 0
        self.counter = 0
        self.interval = 8
        self.image = None
        self.instrDict = {
            0: "up",
            2: "up and to the right",
            4: "right",
            6: "down and to the right",
            8: "down",
            10: "down and to the left",
            12: "left",
            14: "up and to the left",
        }
        if self.sensor_method is not None:
            Clock.schedule_interval(self.liveFeed, 0.1)
        else:
            message = MessagePopup(str("Not connected to sensor"))
            message.open()

    def liveFeed(self, btn):
        image = self.sensor.get_sensor_information(self.sensor_method)
        if image is not None:
            cv2.imwrite(constants.IMAGE_PATH + 'addUser.png', image)
            self.ids.addUser.reload()

    def start_recording(self):
        # Starts the recording process and adds folderfor user
        if self.ids.start_recording.text == "Stop Recording":
            self.ids.start_recording.text = "Start Recording"
            self.ids.start_recording.background_color = [1, 1, 1, 1]
        else:
            # Set some button properties
            if self.ids.addUserLabel.text == "":  # requires user name
                message = MessagePopup(str("Please add user name"))
                message.open()
            else:
                # check if there is already a folder with user images
                if os.path.exists(constants.FACE_DATASET_PATH +
                                  self.ids.addUserLabel.text):
                    message = MessagePopup(
                        str("User with that name already exist. \n Please enter a different name"
                            ))
                    message.open()
                else:
                    self.ids.start_recording.text = "Stop Recording"
                    self.ids.start_recording.disabled = True
                    self.ids.start_recording.background_color = [0, 1, 1, 1]

                    # creates directory to store user photos
                    os.makedirs(constants.FACE_DATASET_PATH +
                                self.ids.addUserLabel.text)

                    if self.sensor_method is None:
                        self.sensor_method = self.sensor.get_method()

                    if self.sensor_method is not None:
                        Clock.unschedule(self.liveFeed)
                        Clock.schedule_interval(self.autoCaptureFace, 0.1)
                    else:
                        message = MessagePopup(str("Not connected to sensor"))
                        message.open()

    def autoCaptureFace(self, btn):
        # Captures and writes facial files to disk
        if self.img_count >= constants.AVG_IMG_NUM_PER_USER:
            self.img_count = 0
            self.ids.addUserIntr.text = "Done taking photos - creating user embeddings"
            self.ids.start_recording.background_color = [1, 1, 1, 1]
            self.faces.make_dataset_embeddings()
            message = MessagePopup(str("Done adding new users"))
            message.open()
            return False

        image = self.sensor.get_sensor_information(self.sensor_method)

        if image is not None:

            face_locations = self.faces.find_faces(image)
            face_names = ['Unknown'] * len(
                face_locations)  # required for draw_faces function
            disp = self.faces.draw_faces(image, face_locations, face_names)

            if self.counter == 0:
                self.ids.addUserIntr.text = "Tilt head " + self.instrDict[
                    self.img_count] + ": 3 "
            elif self.counter == self.interval:
                self.ids.addUserIntr.text = "Tilt head " + self.instrDict[
                    self.img_count] + ": 2 "
            elif self.counter == self.interval * 2:
                self.ids.addUserIntr.text = "Tilt head " + self.instrDict[
                    self.img_count] + ": 1 "
            elif self.counter == self.interval * 3:
                self.ids.addUserIntr.text = "Tilt head " + self.instrDict[
                    self.img_count]
            elif self.counter == self.interval * 3 + 3:
                cv2.imwrite(
                    constants.FACE_DATASET_PATH + self.ids.addUserLabel.text +
                    '/' + self.ids.addUserLabel.text + str(self.img_count) +
                    '.png', image)
                cv2.imwrite(
                    constants.FACE_DATASET_PATH + self.ids.addUserLabel.text +
                    '/' + self.ids.addUserLabel.text +
                    str(self.img_count + 1) + '.png', image)
                self.img_count += 2
                self.counter = -1

            cv2.imwrite(constants.IMAGE_PATH + 'addUser.png', disp)
            self.ids.addUser.reload()
            self.counter += 1
コード例 #7
0
ファイル: app.py プロジェクト: mxj5897/alfrd-msd
class AddGesturePopUp(BoxLayout):
    # Adds gesture to gesture dictionary
    def __init__(self, **kwargs):
        super(AddGesturePopUp, self).__init__(**kwargs)
        self.pose = Poses()
        self.classify = Classify()
        self.sensor = Sensors()
        self.temp_queue = []
        self.Count = constants.COUNTDOWN
        self.sensor_method = self.sensor.get_method()
        self.pose_model = None

        if self.sensor_method is not None:
            Clock.schedule_interval(self.liveFeed, 0.1)
        else:
            message = MessagePopup(str("Not connected to sensor"))
            message.open()

    def liveFeed(self, btn):
        image = self.sensor.get_sensor_information(self.sensor_method)
        if image is not None:
            cv2.imwrite(constants.IMAGE_PATH + 'temp.png', image)
            self.ids.add_source.reload()

    def addGesture(self):
        # Determines save gesture button behavior
        #TODO:: Sanitize inputs
        self.classify.add_to_dictionary([self.temp_queue],
                                        self.ids.gestureLabel.text)

    def set_count(self, btn):
        # Sets and displays the countdown
        font = cv2.FONT_HERSHEY_SIMPLEX
        if self.Count > 0:
            self.Count = self.Count - 1
            image = cv2.imread(constants.IMAGE_PATH + 'temp1.png')
            height, width = image.shape[:2]
            image = cv2.putText(image, str(self.Count),
                                (int(width / 2) - 30, int(height / 2)), font,
                                7, (22, 22, 205), 10, cv2.LINE_AA)
            cv2.imwrite(constants.IMAGE_PATH + 'temp.png', image)
            self.ids.add_source.reload()
        else:
            Clock.schedule_interval(self.update_recording, 0.1)
            self.Count = constants.COUNTDOWN
            return False

    def start_recording(self):
        # Determines start recording button behavior
        if self.ids.start_recording.text == "Stop Recording":
            self.ids.start_recording.text = "Start Recording"
            self.ids.start_recording.background_color = [1, 1, 1, 1]
            self.sensor.__del__()
            self.ids.add_source.reload()
            # Clock.unschedule(self.update_recording)
        else:
            self.temp_queue = []
            self.ids.start_recording.text = "Stop Recording"
            self.ids.start_recording.disabled = True
            self.ids.start_recording.background_color = [0, 1, 1, 1]
            self.pose_model = self.pose.get_model()

            if self.sensor_method is None:
                self.sensor_method = self.sensor.get_method()
            if self.sensor_method is not None:
                Clock.unschedule(self.liveFeed)
                Clock.schedule_interval(self.set_count, 1)
            else:
                message = MessagePopup(str("Not connected to the sensor"))
                message.open()
                self.ids.start_recording.disabled = False
                self.ids.start_recording.background_color = [1, 1, 1, 1]

    def update_recording(self, btn):
        # Main loop for the adding gesture
        # Determines key points and saves them to the temp queue
        if len(self.temp_queue) >= constants.QUEUE_MAX_SIZE:
            self.ids.start_recording.background_color = [1, 1, 1, 1]
            self.ids.start_recording.disabled = False
            Clock.schedule_interval(self.liveFeed, 0.1)
            return False

        image = self.sensor.get_sensor_information(self.sensor_method)

        if image is not None:
            im_height, im_width = image.shape[:2]
            points = self.pose.get_points(self.pose_model, image)

            # plot points for user feedback
            humans = self.pose.assign_face_to_pose(points, [], [], [],
                                                   im_height, im_width)
            image = self.pose.plot_pose(image, humans, im_height, im_width)

            if points is not None:
                self.temp_queue.append(points)

            cv2.imwrite(constants.IMAGE_PATH + 'temp.png', image)
            self.ids.add_source.reload()