コード例 #1
0
def main():
    modelfile = open(sys.argv[2])

    model = pickle.load(modelfile)
    f = fe.FeatureExtractor(sys.argv[1])
    f.setStartPoint()

    detected = False
    labels = model.labels

    while 1:
        try:
            imbgr = np.array(fe.get_video())
            imdepth = np.array(fe.get_depth())

            f.addPoint(time.time(),imbgr,imdepth)

            obs = np.nan_to_num(f.getFeatures())

            prediction = model.detect(obs)
            if prediction is not None:
                cv2.putText(imbgr,labels[prediction],(5,50),cv2.FONT_HERSHEY_COMPLEX,2,(0,0,255),5)
                print labels[prediction]

            cv2.imshow("Demo",imbgr)

        except KeyboardInterrupt:
            break
        if cv.WaitKey(10) == 32:
            break
コード例 #2
0
ファイル: adative.py プロジェクト: jgblight/fydp
def main():
    
    #autocalibrate(sys.argv[1])
    f = extract.FeatureExtractor(sys.argv[1])
    f.setStartPoint()
    imbgr = np.array(extract.get_video())
    mean  = get_mean(imbgr,f)
    while 1:
        try:
            imbgr = np.array(extract.get_video())
            mask = get_mask(imbgr,mean)

            print np.max(mask)
            print np.min(mask)
            mask = mask.astype('uint8')
            print np.max(mask)
            print np.min(mask)
            mask = f.markers['right'].blobSmoothing(mask)
            cv2.imshow('Demo', mask)

        except KeyboardInterrupt:
            break
        if cv.WaitKey(10) == 27:
            break
コード例 #3
0
ファイル: demo.py プロジェクト: jgblight/fydp
    modelfile = open(sys.argv[2])

    model = pickle.load(modelfile)
    labels = pickle.load(modelfile)
    running = deque([])

    with open(sys.argv[1]) as csvfile:
        reader = csv.reader(csvfile)
        low = [ float(x) for x in reader.next()]
        high = [ float(x) for x in reader.next()]

    green = fe.colourFilter(tuple(low),tuple(high))

    while 1:
        try:
            imbgr = fe.get_video()
            hull = green.getColourHull(imbgr)
            if len(hull):
                features = fe.getFeatureVector(hull,['central'])

                imbgrclass = model.predict([features])
                imbgrclass = imbgrclass[0]
                
                if len(running) < 10:
                    sign = imbgrclass
                    running.append(sign)
                else:
                    _ = running.popleft()
                    running.append(imbgrclass)
                    sign = median(running)
コード例 #4
0
ファイル: demo_symposium.py プロジェクト: jgblight/fydp
        f = fe.FeatureExtractor("calibration.csv")
    else:
        f = fe.FeatureExtractor(sys.argv[1])

    # for i in range(10):
    for i, rand_sign in enumerate([1, 5, 8, 7, 9, 0, 2, 6]):
        try:
            # rand_sign = np.random.randint(0,len(labels))
            f.setStartPoint()
            detectedSign = 0
            countDown = 0

            while detectedSign < 30:
                try:

                    imbgr = np.array(fe.get_video())
                    img = np.copy(imbgr)

                    if i == 0 and countDown < 90:
                        imbgr = np.zeros((480, 640, 3))
                        if countDown < 30:
                            cv2.putText(imbgr, "3", (250, 250), cv2.FONT_HERSHEY_COMPLEX, 5, (255, 255, 255), 5)
                        elif countDown < 60:
                            cv2.putText(imbgr, "2", (250, 250), cv2.FONT_HERSHEY_COMPLEX, 5, (255, 255, 255), 5)
                        elif countDown < 90:
                            cv2.putText(imbgr, "1", (250, 250), cv2.FONT_HERSHEY_COMPLEX, 5, (255, 255, 255), 5)
                        cv2.imshow("Demo", imbgr)
                        countDown += 1

                    else:
                        if not detectedSign:
コード例 #5
0
ファイル: autocalibrate.py プロジェクト: jgblight/fydp
    def autocalibration(self, calibration_file, calibration_folder):
        g_m,r_m,b_m,c = self.get_features(calibration_folder)

        got_green = False
        got_blue = False
        got_red = False

        gcount = 0
        bcount = 0
        rcount = 0
        frame_count = 0

        glow,ghigh = self.get_start(c,'glow','ghigh')
        blow,bhigh = self.get_start(c,'blow','bhigh')
        rlow,rhigh = self.get_start(c,'rlow','rhigh')

        while 1:
            try:
                imbgr = np.array(fe.get_video())

                #print "green"
                got_green,gcount,glow,ghigh = self.optimize(imbgr,got_green,gcount,glow,ghigh,g_m,c,"glow","ghigh",0.05)
                #print "red"
                got_red,rcount,rlow,rhigh = self.optimize(imbgr,got_red,rcount,rlow,rhigh,r_m,c,"rlow","rhigh",0.05)
                #print "blue"
                got_blue,bcount,blow,bhigh = self.optimize(imbgr,got_blue,bcount,blow,bhigh,b_m,c,"blow","bhigh",0.05,blue=True)
                
                green = fe.colourFilter(glow,ghigh)   
                hull = green.getColourHull(imbgr)   
                if len(hull):
                    cv2.drawContours(imbgr,[hull],-1,(0,255,0),2) 

                red = fe.colourFilter(rlow,rhigh)   
                hull = red.getColourHull(imbgr)   
                if len(hull):
                    cv2.drawContours(imbgr,[hull],-1,(0,0,255),2) 

                blue = fe.colourFilter(blow,bhigh)   
                cnt = blue.getColourContours(imbgr)
                if len(cnt) >= 2:
                    cnt_sorted = sorted(cnt,key=cv2.contourArea)
                    cv2.drawContours(imbgr,[cnt_sorted[-1]],-1,(255,0,0),2)  
                    cv2.drawContours(imbgr,[cnt_sorted[-2]],-1,(255,0,0),2)  

                cv2.imshow("Demo",imbgr)

                if got_green and got_red and got_blue:
                    frame_count += 1

            except KeyboardInterrupt:
                break
            if cv.WaitKey(10) == 32:
                break
            if frame_count >= 30:
                break

        with open(calibration_file,'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(glow)
            writer.writerow(ghigh)
            writer.writerow(blow)
            writer.writerow(bhigh)
            writer.writerow(rlow)
            writer.writerow(rhigh)

        return glow,ghigh,blow,bhigh,rlow,rhigh
コード例 #6
0
    rightPast = deque([])
    leftPast = deque([])
    f = extract.FeatureExtractor(sys.argv[1])
    f.setStartPoint()


    #plt.ion()
    #fig = plt.figure()
    #plt.xlim([0,640])
    #plt.ylim([0,255])


    while 1:
        try:
            #cv.ShowImage('Depth', get_depth())
            imbgr = np.array(extract.get_video())
            imdepth = extract.get_depth()

            greenmoments,greenhull = f.getCentralMoments(imbgr,'right')
            redmoments,redhull = f.getCentralMoments(imbgr,'left')

            cv2.drawContours(imbgr,[greenhull],-1,(0,255,0),2)
            cv2.drawContours(imbgr,[redhull],-1,(0,0,255),2)

            feature = np.nan_to_num(f.addPoint(time.time(),imbgr,imdepth))
            print feature

            if feature.shape:
                cv2.circle(imbgr,(int(feature[14]),int(feature[15])),3,(0,0,255),4)
                cv2.circle(imbgr,(int(feature[16]),int(feature[17])),3,(0,255,0),4)
                #plt.scatter(feature[14],480 - feature[15],c='r')