示例#1
0
    def load_gestures_of_type(self, gesture_type):

        ### Step 1: initialize list of gestures of this type ###
        self.gestures[gesture_type] = []

        ### Step 2: get all filenames from the appropriate gesture_dir ###
        gesture_filenames = [
            os.path.join(self.gesture_dirs[gesture_type], g)
            for g in os.listdir(self.gesture_dirs[gesture_type])
        ]

        ### Step 3: for each filename, load in a gesture from it and add to list ###
        for gesture_filename in gesture_filenames:

            ### --- create the gesture --- ###
            new_gesture = Gesture(name=gesture_type,
                                  observations_filepath=gesture_filename)

            ### --- make sure it is full/clean --- ###
            if not new_gesture.is_full():
                print_error("Loading Gestures",
                            "Encountered a gesture that is not yet full")

            ### --- add to the list of gestures --- ###
            self.gestures[gesture_type].append(new_gesture)
示例#2
0
    def SaveGesture(self, filename, points):
        name = Gesture.ParseName(filename)
        counts = self.NumGestures
        for i in range(counts):
            if self.__gestures[i].Name == name:
                del self.__gestures[i]
                break
        newPrototype = Gesture(name, points)
        self.__gestures.append(newPrototype)

        p0 = points[0]
        pn = points[len(points) -1]

        success = True
        try:
            ges = ET.Element("Gesture")
            ges.attrib["Name"] = name
            ges.attrib["NumPts"] = "%d" % len(points)
            ges.attrib["Millseconds"] = "%d" % (pn.T - p0.T)
            ges.attrib["AppName"] = "WUW01"
            ges.attrib["AppVer"] = "1.0.0.0"
            ges.attrib["Date"] = datetime.now().strftime("%A, %B %d, %Y")
            ges.attrib["TimeOfDay"] = datetime.now().strftime("%I:%M:%S %p")
            
            for p in points:
                q = ET.SubElement(ges, "Point")
                q.attrib["X"] = "%f" % p.X
                q.attrib["Y"] = "%f" % p.Y
                q.attrib["T"] = "%f" % p.T

            tree = ET.ElementTree(ges)
            tree.write(filename, pretty_print = True, xml_declaration = True, encoding = "UTF-8")
        except Exception:
            success = false
        return success
示例#3
0
 def __init__(self, detector_window, classifier_window):
     self.detector_window = detector_window
     self.classifier_window = classifier_window
     self.detector = Detector()
     self.detector.load()
     self.classifier = Classifier()
     self.classifier.load()
     self.gesture_sequence = Gesture()
示例#4
0
    def record_gesture (self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message ("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message ("Now we will begin recording " + str(max_examples) + " examples of this gesture, " + str(gesture_name) + ". Press Enter when ready.")
        sys.stdin.readline ()

        record_gesture = Gesture (gesture_name)



        #--- initialize parameters ---
        is_recording        = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame ()
            record_gesture.add_frame (frame)

            if record_gesture.is_full ():


                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message ("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ### 
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                  line = sys.stdin.readline()
                  if line:
                    print_message ("### Started Recording ###")
                    is_recording = True
示例#5
0
def InteralVarianceFinder(dataSet,
                          samplingRes,
                          DistanceMethod,
                          gestureRepCount=30,
                          trials=30):
    totalTrialCount = 0
    totalDistance = 0
    maxDistance = 0

    #Setting up
    allGestures = []
    # import all gestures into an Array of Arrays
    for gesSet in dataSet:
        allGestures.append([])
        for n in range(gestureRepCount):
            allGestures[len(allGestures) - 1].append(
                Gesture(GetPoints(gesSet[0][n]), gesSet.attrib["GroupName"],
                        samplingRes))

    randIndex = []
    for n in range(gestureRepCount):
        randIndex.append(n)

    set1 = []
    set2 = []

    for n in range(int(gestureRepCount / 2)):
        set1.append(None)
        set2.append(None)

    for trial in range(trials):
        print("trial: " + str(trial))

        for gesSetId in range(len(allGestures)):
            # random list to choose the training samples randomly
            random.shuffle(randIndex)
            for repId in range(gestureRepCount):
                if repId < gestureRepCount / 2:
                    set1[repId] = (allGestures[gesSetId][randIndex[repId]])
                else:
                    set2[repId - int(gestureRepCount / 2)] = (
                        allGestures[gesSetId][randIndex[repId]])

        for gesId in range(len(set1)):
            distance = DistanceMethod(set1[gesId].Points, set2[gesId].Points,
                                      100)
            #print(matchName + " - " + checkSet[gesId].Name)
            totalTrialCount += 1
            totalDistance += distance
            maxDistance = max(maxDistance, distance)

    findings = {
        "samplingRes": samplingRes,
        "trialCount": trials,
        "gestureRepCount": gestureRepCount,
        "totalTrialCount": totalTrialCount,
        "totalDistance": totalDistance,
        "maxDistance": maxDistance
    }
    return findings
示例#6
0
    def record_gesture(self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message("Now we will begin recording " + str(max_examples) +
                      " examples of this gesture, " + str(gesture_name) +
                      ". Press Enter when ready.")
        sys.stdin.readline()

        record_gesture = Gesture(gesture_name)

        #--- initialize parameters ---
        is_recording = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame()
            record_gesture.add_frame(frame)

            if record_gesture.is_full():

                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ###
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = sys.stdin.readline()
                    if line:
                        print_message("### Started Recording ###")
                        is_recording = True
示例#7
0
 def med(self, game):
     keyboard = Controller()
     gesture = Gesture(game, 480, 480, pnet=self.pnet)
     with keyboard.pressed(Key.ctrl):
         with keyboard.pressed(Key.alt):
             keyboard.press(Key.right)
             keyboard.release(Key.right)
     self.game_loop(gesture)
	def load_gestures_of_type (self, gesture_type):

		### Step 1: initialize list of gestures of this type ###
		self.gestures[gesture_type] = []

		### Step 2: get all filenames from the appropriate gesture_dir ###
		gesture_filenames = [os.path.join (self.gesture_dirs[gesture_type], g) for g in os.listdir (self.gesture_dirs[gesture_type])]

		### Step 3: for each filename, load in a gesture from it and add to list ###
		for gesture_filename in gesture_filenames:

			### --- create the gesture --- ###
			new_gesture = Gesture (name=gesture_type, observations_filepath=gesture_filename)

			### --- make sure it is full/clean --- ###
			if not new_gesture.is_full ():
				print_error ("Loading Gestures", "Encountered a gesture that is not yet full")

			### --- add to the list of gestures --- ###
			self.gestures[gesture_type].append (new_gesture)
示例#9
0
class Model(object):
    def __init__(self, detector_window, classifier_window):
        self.detector_window = detector_window
        self.classifier_window = classifier_window
        self.detector = Detector()
        self.detector.load()
        self.classifier = Classifier()
        self.classifier.load()
        self.gesture_sequence = Gesture()

    def __call__(self, hand_landmarks):
        self.gesture_sequence.push(hand_landmarks)
        if len(self.gesture_sequence) > self.classifier_window:
            self.gesture_sequence.drop_first()
        tail_detect_vector = self.gesture_sequence.data(-self.detector_window)
        detector_predict = self.detector.predict(tail_detect_vector)
        if detector_predict:
            tail_classify_vector = self.gesture_sequence.data(
                -self.classifier_window)
            classifier_predict = self.classifier.predict(tail_classify_vector)
            if classifier_predict != 'No gesture':
                return classifier_predict
            else:
                return None
        else:
            return None
示例#10
0
    def ReadGesture(self, root):
        if root.tag != "Gesture":
            return None
        name = root.get("Name")

        points = []
        plist = root.findall("Point")
        for p in plist:
            qx = string.atof(p.get("X"))
            qy = string.atof(p.get("Y"))
            qt = string.atoi(p.get("T"))
            q = PointR(qx,qy,qt)
            points.append(q)
        return Gesture(name, points)
示例#11
0
    def synth_main_discrete(self):

        self.gesture_recognizer.load_model()

        print_message("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture()

            ### Step 2: start the recording ###
            self.record_countdown()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full():
                frame = self.get_frame()
                observed_gesture.add_frame(frame)

            ### Step 4: stop the recording and classify ###
            print_message("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture(observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline()
示例#12
0
    def synth_main_discrete (self):

        self.gesture_recognizer.load_model ()

        print_message ("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture ()

            ### Step 2: start the recording ###
            self.record_countdown ()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full ():
                frame = self.get_frame ()                
                observed_gesture.add_frame (frame)

            ### Step 4: stop the recording and classify ###
            print_message ("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture (observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline ()
示例#13
0
文件: run.py 项目: jayhack/leap_synth
    def synth_main(self):

        ### Step 1: start the max patch ###
        self.max_interface.send_gesture('Start')

        ### Step 2: initialize local data ###
        print_message("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model()
        observed_gesture = Gesture()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame()
            observed_gesture.add_frame(frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position,
             palm_orientation) = self.get_position_and_orientation(frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture(
                    observed_gesture)
                if classification_results:
                    prediction = classification_results[0]
                    prediction_prob = classification_results[1]
                    print_message("Prediction: " + str(prediction) +
                                  " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear()

            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture(send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state(frame.hands[0])
示例#14
0
文件: run.py 项目: jayhack/leap_synth
    def synth_main (self):
        
        ### Step 1: start the max patch ###
        self.max_interface.send_gesture ('Start')

        ### Step 2: initialize local data ###
        print_message ("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model ()
        observed_gesture = Gesture ()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame ()
            observed_gesture.add_frame (frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position, palm_orientation) = self.get_position_and_orientation (frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full ():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture (observed_gesture)
                if classification_results:
                    prediction = classification_results [0]
                    prediction_prob = classification_results [1]
                    print_message("Prediction: " + str(prediction) + " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear ()



            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture (send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state (frame.hands[0])
示例#15
0
def DoTesting(dataSet,
              traSampleCount,
              samplingRes,
              TesterMethod,
              gestureRepCount=30,
              trials=10):
    totalTrialCount = 0
    totalMatchCount = 0

    oneTrialCount = 0
    minMatchCount = len(dataSet) * gestureRepCount
    maxMatchCount = 0

    totalCheckTime = 0
    minCheckTime = 10
    maxCheckTime = 0

    totalExecTime = 0

    #Setting up
    allGestures = []
    # import all gestures into an Array of Arrays
    for gesSet in dataSet:
        allGestures.append([])
        for n in range(gestureRepCount):
            allGestures[len(allGestures) - 1].append(
                Gesture(GetPoints(gesSet[0][n]), gesSet.attrib["GroupName"],
                        samplingRes))

    randIndex = []
    for n in range(gestureRepCount):
        randIndex.append(n)

    # training data
    trainingSet = []
    # data to check
    checkSet = []

    for gesSetId in range(len(allGestures)):
        for repId in range(gestureRepCount):
            if repId < traSampleCount:
                trainingSet.append(None)
            else:
                checkSet.append(None)
    #/Setting up

    for trial in range(trials):
        triStart = time.time()
        print("trial: " + str(trial))

        traCounter = 0
        checkCounter = 0
        for gesSetId in range(len(allGestures)):
            # random list to choose the training samples randomly
            random.shuffle(randIndex)
            for repId in range(gestureRepCount):
                if repId < traSampleCount:
                    trainingSet[traCounter] = (
                        allGestures[gesSetId][randIndex[repId]])
                    traCounter += 1
                else:
                    checkSet[checkCounter] = (
                        allGestures[gesSetId][randIndex[repId]])
                    checkCounter += 1

        oneTrialCount = len(checkSet)
        curMatchCount = 0
        for gesId in range(len(checkSet)):
            if (gesId % min(int((len(checkSet) / 5) + 1), 50) == 0):
                print("tra sampl #:" + str(traSampleCount) + " - re sampl #:" +
                      str(samplingRes) + " - trial id:" + str(trial) +
                      " - gesture id: " + str(gesId) + "/" +
                      str(len(checkSet)))
            start = time.time()
            matchName = TesterMethod(checkSet[gesId], trainingSet)
            end = time.time()
            #if (gesId % 50 == 0): print("Average time:" + str(end-start))
            checkTime = end - start
            totalCheckTime += checkTime
            minCheckTime = min(minCheckTime, checkTime)
            maxCheckTime = max(maxCheckTime, checkTime)
            #print(matchName + " - " + checkSet[gesId].Name)
            if (matchName == checkSet[gesId].Name):
                curMatchCount += 1
            #else:
            #print (matchName + " - " + checkSet[gesId].Name)

        totalTrialCount += oneTrialCount
        totalMatchCount += curMatchCount
        minMatchCount = min(minMatchCount, curMatchCount)
        maxMatchCount = max(maxMatchCount, curMatchCount)
        triEnd = time.time()
        trialTime = triEnd - triStart
        totalExecTime += trialTime
        print("Est Time Remaining: " + "{:.3f}".format(trialTime *
                                                       (trials - trial - 1)) +
              " - trial time: " + "{:.3f}".format(trialTime))
    findings = {
        "traSampleCount": traSampleCount,
        "samplingRes": samplingRes,
        "trialCount": trials,
        "gestureRepCount": gestureRepCount,
        "totalTrialCount": totalTrialCount,
        "oneTrialCount": oneTrialCount,
        "totalMatchCount": totalMatchCount,
        "maxMatchCount": maxMatchCount,
        "minMatchCount": minMatchCount,
        "totalCheckTime": totalCheckTime,
        "totalExecTime": totalExecTime,
        "minCheckTime": minCheckTime,
        "maxCheckTime": maxCheckTime
    }
    return findings
示例#16
0
import queue
import threading

import cv2
from Capture import Capture
from Client import Client
from Gesture import Gesture

que = queue.Queue()
gesture_rec = Gesture("modelv13.h5")
camera = Capture()
client = Client('127.0.0.1', 12345)
data = {'status':''}
client.get_data(que)
frame_count = 0
display_msg = False
display_time = False
display_info = False
time = 0
bullets = 0
rounds = 0
player_move = ''
opp_move = ''
msg = ""
stat_data = dict()

if not que.empty():
    data = que.get()

while True:
    try: