コード例 #1
0
ファイル: run.py プロジェクト: jayhack/LeapReceiver
    def record_gesture (self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message ("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message ("Now we will begin recording " + str(max_examples) + " examples of this gesture, " + str(gesture_name) + ". Press Enter when ready.")
        sys.stdin.readline ()

        record_gesture = Gesture (gesture_name)



        #--- initialize parameters ---
        is_recording        = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame ()
            record_gesture.add_frame (frame)

            if record_gesture.is_full ():


                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message ("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ### 
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                  line = sys.stdin.readline()
                  if line:
                    print_message ("### Started Recording ###")
                    is_recording = True
コード例 #2
0
    def record_gesture(self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message("Now we will begin recording " + str(max_examples) +
                      " examples of this gesture, " + str(gesture_name) +
                      ". Press Enter when ready.")
        sys.stdin.readline()

        record_gesture = Gesture(gesture_name)

        #--- initialize parameters ---
        is_recording = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame()
            record_gesture.add_frame(frame)

            if record_gesture.is_full():

                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ###
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = sys.stdin.readline()
                    if line:
                        print_message("### Started Recording ###")
                        is_recording = True
コード例 #3
0
ファイル: run.py プロジェクト: jayhack/leap_synth
    def synth_main(self):

        ### Step 1: start the max patch ###
        self.max_interface.send_gesture('Start')

        ### Step 2: initialize local data ###
        print_message("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model()
        observed_gesture = Gesture()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame()
            observed_gesture.add_frame(frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position,
             palm_orientation) = self.get_position_and_orientation(frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture(
                    observed_gesture)
                if classification_results:
                    prediction = classification_results[0]
                    prediction_prob = classification_results[1]
                    print_message("Prediction: " + str(prediction) +
                                  " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear()

            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture(send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state(frame.hands[0])
コード例 #4
0
ファイル: run.py プロジェクト: jayhack/leap_synth
    def synth_main (self):
        
        ### Step 1: start the max patch ###
        self.max_interface.send_gesture ('Start')

        ### Step 2: initialize local data ###
        print_message ("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model ()
        observed_gesture = Gesture ()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame ()
            observed_gesture.add_frame (frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position, palm_orientation) = self.get_position_and_orientation (frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full ():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture (observed_gesture)
                if classification_results:
                    prediction = classification_results [0]
                    prediction_prob = classification_results [1]
                    print_message("Prediction: " + str(prediction) + " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear ()



            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture (send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state (frame.hands[0])
コード例 #5
0
    def synth_main_discrete(self):

        self.gesture_recognizer.load_model()

        print_message("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture()

            ### Step 2: start the recording ###
            self.record_countdown()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full():
                frame = self.get_frame()
                observed_gesture.add_frame(frame)

            ### Step 4: stop the recording and classify ###
            print_message("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture(observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline()
コード例 #6
0
ファイル: run.py プロジェクト: jayhack/LeapReceiver
    def synth_main_discrete (self):

        self.gesture_recognizer.load_model ()

        print_message ("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture ()

            ### Step 2: start the recording ###
            self.record_countdown ()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full ():
                frame = self.get_frame ()                
                observed_gesture.add_frame (frame)

            ### Step 4: stop the recording and classify ###
            print_message ("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture (observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline ()