示例#1
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    cps = CountsPerSec().start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        # frame = putIterationsPerSec(frame, cps.countsPerSec())
        '''prediction, frame = detect_object(
                yolo,
                frame,
                
                postfix=FLAGS.postfix,
            )'''
        video_shower.frame = frame
        cps.increment()
示例#2
0
def main():
    finger_detect = FingerDetection().start()
    key_listener = KeyListener().start()
    video_getter = VideoGet(0).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        frame = cv2.flip(frame, 1)
        img = normalizeFrame(frame, x0, y0)
        frame = drawDetectionArea(frame, x0, y0, width, (0, 255, 0))
        cv2.putText(frame, u"NONE = NONE, ONE = LEFT", (0, 80), font, 1.0,
                    (0, 0, 255), 2, 1)
        cv2.putText(frame, "TWO = RIGHT, FOUR = DOWN", (0, 120), font, 1.0,
                    (0, 0, 255), 2, 1)
        cv2.putText(frame, "UP = FIVE", (0, 160), font, 1.0, (0, 0, 255), 2, 1)
        finger_detect.frame = img
        prediction = handDetect(frame, finger_detect.detect(), x0, y0)

        # if prediction == "NONE" or prediction == "FIVE":
        key_listener.prediction = prediction
        key_listener.handle()

        video_shower.frame = frame
示例#3
0
def predict_video(data_dir, sess, image_shape, logits, keep_prob, input_image):
    print('Predicting Video...')
    
    video_getter = VideoGet(data_dir, sess, image_shape, logits, keep_prob, input_image).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        video_shower.frame = frame
def threadDetect(args):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """
    modelname = 'TFModels/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync_2018_07_03/frozen_inference_graph.pb'
    configname = 'TFModels/ssd_mobilenet_v1_ppn_coco.pbtxt'
    classnames = 'TFModels/coconew.names'
    garbageclasses = [
        "shoe", "hat", "eye glasses", "frisbee",
        "bottle", "plate", "wine glass", "cup", "fork", "spoon", "bowl",
        "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "fruit",
        "hotdog", "pizza", "donut", "cake",
        "vase", "scissors", "toothbrush", "cardboard", "napkin",
        "net", "paper", "plastic", "straw"
    ]
    source = args['source']
    resize_factor = args['resize_factor']
    grayscale = args['grayscale']
    
    with open(classnames, 'rt') as f:
        classes = f.read().rstrip('\n').split('\n')
    cvNet = cv2.dnn.readNetFromTensorflow(modelname, configname)
    threshold = 0.3

    video_getter = VideoGet(source).start()
    object_detector = ObjectDetect(cvNet, threshold, classes, garbageclasses, video_getter).start()
    object_tracker = ObjectTrack(video_getter, object_detector, resize_factor, grayscale).start()
    video_shower = VideoShow(video_getter, object_detector, object_tracker).start()
    
    cps = CountsPerSec().start()

    while True:
        if video_getter.stopped or object_detector.stopped or video_shower.stopped or object_tracker.stopped:
            video_shower.stop()
            video_getter.stop()
            object_detector.stop()
            object_tracker.stop()
            break

        time.sleep(1)
示例#5
0
def threadVideoShow(source=0):
    """
    Dedicated thread for showing video frames with VideoShow object.
    Main thread grabs video frames.
    """

    cap = cv2.VideoCapture(source)
    (grabbed, frame) = cap.read()
    video_shower = VideoShow(frame).start()
    cps = CountsPerSec().start()

    while True:
        (grabbed, frame) = cap.read()
        if not grabbed or video_shower.stopped:
            video_shower.stop()
            break

        # frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame
        cps.increment()
示例#6
0
def read_zed(sess, image_shape, logits, keep_prob, input_image):
    count = 0
    video_zed = VideoZed(sess, image_shape, logits, keep_prob, input_image).start()

    while type(video_zed.frame) == type(None):
      if count == 3:
        exit("Error to open ZED")
      print("Waiting for zed")
      count+=1
      time.sleep(1)
      
    video_shower = VideoShow(video_zed.frame).start()

    while True:
        if video_zed.stopped or video_shower.stopped:
            video_shower.stop()
            video_zed.stop()
            break

        frame = video_zed.frame
        video_shower.frame = frame
示例#7
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    cps = CountsPerSec().start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame
示例#8
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame, '1').start()

    video_getter2 = VideoGet('http://192.168.1.4:8080/video').start()
    video_shower2 = VideoShow(video_getter2.frame, '2').start()
    cps = CountsPerSec().start()

    while True:
        # if video_getter.stopped or video_shower.stopped:
        #     video_shower.stop()
        #     video_getter.stop()
        #     break

        frame = video_getter.frame
        frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame

        frame2 = video_getter2.frame
        frame2 = putIterationsPerSec(frame2, cps.countsPerSec())
        video_shower2.frame = frame2
        cps.increment()
示例#9
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    save_image = SaveImage(video_getter.frame)
    save_image.start()
    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            save_image.stop()
            #save_image.join()
            break

        frame = video_getter.frame
        video_shower.frame = frame
        save_image.frame = frame
示例#10
0
def gesture_guy():
    model = load_model('my_model.h5')
    model.load_weights('model_weights.h5')

    gb = GestureBrain(model)
    print("start ======")
    counter = 0
    i = 0
    cap = cv2.VideoCapture(0)
    (grabbed, frame) = cap.read()
    video_shower = VideoShow(frame).start()
    mode_i = None
    action2 = None
    while True:
        (grabbed, frame) = cap.read()
        if not grabbed or video_shower.stopped:
            video_shower.stop()
            break
        cv2.putText(frame, "Current Gesture: {}".format(action2), (10, 450),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 255, 255))
        cv2.putText(frame, "Mode: {}".format(mode_i), (10, 400),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 255, 255))
        video_shower.frame = frame
        counter += 1
        screen = np.array(cv2.resize(frame, (88, 50)))

        gb.push_img(screen)
        if counter != 40:
            continue
        else:
            counter = 0

        mode = [
            'Recognition mode', 'Explorer mode', 'Photo mode', 'Video mode'
        ]
        selectMode = mode[i]
        print(selectMode)
        action = gb.regonize()
        print("predict type :", action)
        if selectMode == mode[0]:
            if action == 0:
                print(labels_want[0])
            elif action == 1:
                print(labels_want[1])
            elif action == 2:
                print(labels_want[2])
            elif action == 3:
                print(labels_want[3])
            elif action == 4:
                print(labels_want[4])
            elif action == 5:
                print(labels_want[5])
            elif action == 6:
                print(labels_want[6])
            elif action == 7:
                print(labels_want[7])
            elif action == 8:
                print(labels_want[8])
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
            elif action == 12:
                print(labels_want[12])
        if selectMode == mode[2]:
            if action == 0:
                print(labels_want[0])
                pyautogui.press('left')
            elif action == 1:
                print(labels_want[1])
                pyautogui.press('right')
            elif action == 4:
                print(labels_want[4])
                pyautogui.press('left')
            elif action == 5:
                print(labels_want[5])
                pyautogui.press('right')
            elif action == 6:
                print(labels_want[6])
#                 pyautogui.hotkey('altleft','f4')
            elif action == 7:
                print(labels_want[7])
                pyautogui.hotkey('enter')
            elif action == 8:
                print(labels_want[8])
                pyautogui.hotkey('ctrl', '+')
            elif action == 9:
                print(labels_want[9])
                pyautogui.hotkey('ctrl', '-')

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
                pyautogui.press('winleft')

        if selectMode == mode[3]:
            if action == 0:
                print(labels_want[0])
                video_seekb(0.4)
            elif action == 1:
                print(labels_want[1])
                video_seekf(0.4)
            elif action == 4:
                print(labels_want[4])
                vol_up(0.8)
            elif action == 5:
                print(labels_want[5])
                vol_down(0.8)
            elif action == 6:
                print(labels_want[6])
                pyautogui.press('space')
            elif action == 7:
                print(labels_want[6])
                pyautogui.press('space')
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])

        if selectMode == mode[1]:
            if action == 0:
                print(labels_want[0])
                pyautogui.press('left')
            elif action == 1:
                print(labels_want[1])
                pyautogui.press('right')
            elif action == 2:
                print(labels_want[2])
                pyautogui.press('down')
            elif action == 3:
                print(labels_want[3])
                pyautogui.press('up')
            elif action == 4:
                print(labels_want[4])
                pyautogui.press('enter')
            elif action == 5:
                print(labels_want[5])
                pyautogui.press('backspace')
            elif action == 6:
                print(labels_want[6])
#                 pyautogui.hotkey('altleft','f4')
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
                pyautogui.press('winleft')


#         if selectMode == mode[4]:
#             if action == 0 :
#                 print(labels_want[0])
#                 pyautogui.press('left')
#             elif action == 1 :
#                 print(labels_want[1])
#                 pyautogui.press('right')
#             elif action == 2 :
#                 print(labels_want[2])
#                 pyautogui.press('down')
#             elif action == 3 :
#                 print(labels_want[3])
#                 pyautogui.press('up')
#             elif action == 4 :
#                 print(labels_want[4])
#                 pyautogui.press('pagedown')
#             elif action == 5 :
#                 print(labels_want[5])
#                 pyautogui.press('pageup')

#             elif action == 10:
#                 print(labels_want[10])
#                 i += 1
#                 if i != 5 :
#                     continue
#                 else:
#                     i = 0

#             elif action == 11:
#                 print(labels_want[10])
#                 pyautogui.press('winleft')

#         cv2.putText(frame, "{}".format(action),(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
        mode_i = mode[i]
        action2 = labels_want[action[0]]
示例#11
0
def threadBoth(source=1):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """
    video_getter = VideoGet(source).start()
    SIFT = SIFTThread(video_getter.frame).start()
    video_shower = VideoShow(video_getter.frame).start()
    fgbg = cv.createBackgroundSubtractorMOG2()

    frameCount = 0
    oldPoint = None

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        SIFT.frame = frame
        # apply mask to extract forgeound object
        fgmask = fgbg.apply(frame)
        # preprocessing: blurring and eroding to remove small finnicky bits
        kernel = np.ones((5, 5), np.uint8)
        morph = cv.medianBlur(fgmask, 13)
        morph = cv.erode(morph, kernel, iterations=3)

        # get the contours
        _, contours, _ = cv.findContours(morph, cv.RETR_TREE,
                                         cv.CHAIN_APPROX_SIMPLE)
        if len(contours) != 0:
            # find the biggest countour by area
            c = max(contours, key=cv.contourArea)
            area = cv.contourArea(c)
            # make sure contour area is bigger than threshold, don't want little contours
            if area > threshold_area:
                # determine the top-most extreme points along the contour
                extTop = tuple(c[c[:, :, 1].argmin()][0])
                # assign first value
                if (oldPoint is None):
                    oldPoint = extTop
                # if the finger stays for a few frames, we know someone is pointing
                if ((oldPoint[0] - 5 <= extTop[0] <= oldPoint[0] + 5)
                        and (oldPoint[1] - 5 <= extTop[1] <= oldPoint[1] + 5)):
                    if (extTop[0] != 0 and extTop[1] != 0):
                        frameCount += 1
                        if frameCount == 20:
                            frameCount = 0
                            SIFT.run()
                            #print([SIFT.newCoords.astype(int)])
                            for i in range(len(SIFT.zone.coords)):
                                res = cv.pointPolygonTest(SIFT.zone.coords[i],
                                                          extTop,
                                                          measureDist=False)
                                print(res)
                                if (res == 1):
                                    webbrowser.open(SIFT.zone.zones[i][1],
                                                    new=2)
                    oldPoint = extTop
                else:
                    frameCount = 0
                    oldPoint = extTop
                # draw a circle at the top-most point
                cv.circle(frame, extTop, 8, (100, 0, 255), -1)

        video_shower.frame = frame
示例#12
0
    def startDetecting(self):
        
        capture = VideoGet(0).start()
        video_shower = VideoShow(capture.frame).start()   

        while capture.isOpened():
            if capture.stopped or video_shower.stopped:
                video_shower.stop()
                capture.stop()
                break
            #Chụp khung hình từ camera
            frame = capture.frame
            
            # Nhận dữ liệu tay từ cửa sổ phụ hình chữ nhật  
            cv2.rectangle(frame,(100,100),(300,300),(0,255,0),0)
            crop_image = frame[100:300, 100:300]
            
            #1.
            # Áp dụng Gaussian blur
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exMask = executor.submit(self.maskHSV, crop_image)
                mask = exMask.result()

            #2.
            # Tìm đường viền (contours)
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exContours = executor.submit(self.findContours, mask)
                contours, hierarchy = exContours.result()
            
            #3.
            try:
                contour = max(contours, key = lambda x: cv2.contourArea(x))
                
                x,y,w,h = cv2.boundingRect(contour)
                cv2.rectangle(crop_image,(x,y),(x+w,y+h),(0,0,255),0)
                
                hull = cv2.convexHull(contour)
                
                drawing = np.zeros(crop_image.shape,np.uint8)
                cv2.drawContours(drawing,[contour],-1,(0,255,0),0)
                cv2.drawContours(drawing,[hull],-1,(0,0,255),0)
                
                hull = cv2.convexHull(contour, returnPoints=False)
                defects = cv2.convexityDefects(contour,hull)
                
                count_defects = 0
            
                for i in range(defects.shape[0]):
                    s,e,f,d = defects[i,0]
                    start = tuple(contour[s][0])
                    end = tuple(contour[e][0])
                    far = tuple(contour[f][0])

                    # 4.
                    #angle = 360;
                    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                        exAngle = executor.submit(self.findAngle, start, end, far)
                        angle = exAngle.result()

                    if angle <= 90:
                        count_defects += 1
                        cv2.circle(crop_image,far,1,[0,0,255],-1)

                    cv2.line(crop_image,start,end,[0,255,0],2)

                if count_defects >= 4:
                    pyautogui.press('space')
                    cv2.putText(frame,"JUMP", (450,110), cv2.FONT_HERSHEY_SIMPLEX, 2, 2, 2)

            except:
                pass

            video_shower.frame = frame

            if cv2.waitKey(1) == ord('q'):
                capture.release()
                cv2.destroyAllWindows()
                break 
示例#13
0
import numpy as np
import rpyc
import cv2
from VideoShow import VideoShow

conn = rpyc.connect("localhost", 18861)

y = conn.root.threadBoth(0)

print(len(y))
nparr = np.fromstring(y[0], np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

video_shower = VideoShow(img).start()

for i in y:
    nparr = np.fromstring(i, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    video_shower.frame = img
示例#14
0
    def __init__(self, camera=0, video_show=False, video_streaming=False, video_rec=False):
        path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config/configuration.ini")
        self.config = SafeConfigParser()
        self.config.read(path)
        self.running = True
        try:
            self.video_getter = VideoGet(self.config.getint('video_get','camera_address')).start()
        except CameraError:
            raise CameraError
        self.locked=0
        self.err_x_pix=0
        self.err_y_pix=0
        self.err_x_m=0
        self.err_y_m=0
        self.dist=0
        self.psi_err=0
        self.theta_err=0

        f = self.config.getfloat('recognition_thread','focal_length')  # focal length [m]
        w = self.config.getfloat('recognition_thread','sensor_width')  # sensor width [m]
        h = self.config.getfloat('recognition_thread','sensor_height')  # sensor height [m]

        self.resw = self.config.getfloat('video_get','width')  # dimensioni (larghezza) in pixel del frame da analizzare
        self.resh = self.config.getfloat('video_get','height')  # dimensioni (altezza) in pixel del frame da analizzare

        self.KNOWN_DISTANCE = self.config.getfloat('recognition_thread','known_distance')  # [m] measured
        self.KNOWN_RADIUS = self.config.getfloat('recognition_thread','known_radius')  # [m] measured
        self.KNOWN_W = w / f * self.KNOWN_DISTANCE  # Horizontal Field of View - calculated - if possible use measurement
        self.KNOWN_H = h / f * self.KNOWN_DISTANCE  # Vertical Field of View - calculated - if possible use measurement
        self.radius_cal = self.resw / self.KNOWN_W * self.KNOWN_RADIUS  # pixel according to resizedframe width - calculated without calibration through immagine_calibrazione.jpg

        self.lowerBoundList = [ self.config.getint('recognition_thread','lowerH'), self.config.getint('recognition_thread','lowerS'), self.config.getint('recognition_thread','lowerV')]
        self.upperBoundList = [self.config.getint('recognition_thread','upperH'), self.config.getint('recognition_thread','upperS'), self.config.getint('recognition_thread','upperV')]
        self.memory_frame = memory_class.Memory_Frame()
        self.reference_contour = contours_filter_library.reference_contour(self.config.get('recognition_thread','sagoma_path'))

        activation_flag=0
        if video_show:
            activation_flag = 1
        if video_streaming:
            activation_flag += 2
        if video_rec:
            activation_flag += 4

        if activation_flag == 0:
            self.start_recognize()
        elif activation_flag == 1:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.start_showing()
        elif activation_flag == 2:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream()
        elif activation_flag == 3:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream_and_show()
        elif activation_flag == 4:
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_rec()
        elif activation_flag == 5:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.video_rec = VideoWriterThreaded()
            self.start_showing_and_rec()
        elif activation_flag == 6:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_rec()
        elif activation_flag == 7:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_show_and_rec()
示例#15
0
class RecognitionThread:
    def __init__(self, camera=0, video_show=False, video_streaming=False, video_rec=False):
        path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config/configuration.ini")
        self.config = SafeConfigParser()
        self.config.read(path)
        self.running = True
        try:
            self.video_getter = VideoGet(self.config.getint('video_get','camera_address')).start()
        except CameraError:
            raise CameraError
        self.locked=0
        self.err_x_pix=0
        self.err_y_pix=0
        self.err_x_m=0
        self.err_y_m=0
        self.dist=0
        self.psi_err=0
        self.theta_err=0

        f = self.config.getfloat('recognition_thread','focal_length')  # focal length [m]
        w = self.config.getfloat('recognition_thread','sensor_width')  # sensor width [m]
        h = self.config.getfloat('recognition_thread','sensor_height')  # sensor height [m]

        self.resw = self.config.getfloat('video_get','width')  # dimensioni (larghezza) in pixel del frame da analizzare
        self.resh = self.config.getfloat('video_get','height')  # dimensioni (altezza) in pixel del frame da analizzare

        self.KNOWN_DISTANCE = self.config.getfloat('recognition_thread','known_distance')  # [m] measured
        self.KNOWN_RADIUS = self.config.getfloat('recognition_thread','known_radius')  # [m] measured
        self.KNOWN_W = w / f * self.KNOWN_DISTANCE  # Horizontal Field of View - calculated - if possible use measurement
        self.KNOWN_H = h / f * self.KNOWN_DISTANCE  # Vertical Field of View - calculated - if possible use measurement
        self.radius_cal = self.resw / self.KNOWN_W * self.KNOWN_RADIUS  # pixel according to resizedframe width - calculated without calibration through immagine_calibrazione.jpg

        self.lowerBoundList = [ self.config.getint('recognition_thread','lowerH'), self.config.getint('recognition_thread','lowerS'), self.config.getint('recognition_thread','lowerV')]
        self.upperBoundList = [self.config.getint('recognition_thread','upperH'), self.config.getint('recognition_thread','upperS'), self.config.getint('recognition_thread','upperV')]
        self.memory_frame = memory_class.Memory_Frame()
        self.reference_contour = contours_filter_library.reference_contour(self.config.get('recognition_thread','sagoma_path'))

        activation_flag=0
        if video_show:
            activation_flag = 1
        if video_streaming:
            activation_flag += 2
        if video_rec:
            activation_flag += 4

        if activation_flag == 0:
            self.start_recognize()
        elif activation_flag == 1:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.start_showing()
        elif activation_flag == 2:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream()
        elif activation_flag == 3:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream_and_show()
        elif activation_flag == 4:
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_rec()
        elif activation_flag == 5:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.video_rec = VideoWriterThreaded()
            self.start_showing_and_rec()
        elif activation_flag == 6:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_rec()
        elif activation_flag == 7:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_show_and_rec()


    def calc_and_print_err(self, frame, x, y, radius, cframeX, cframeY, wframe, hframe):
        dist = self.KNOWN_DISTANCE * self.radius_cal / radius
        # disegna il cerchio che racchiude il pallone
        cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 1)
        err_x_pix = x - cframeX  # pixel
        err_y_pix = y - cframeY  # pixel

        W = self.KNOWN_W / self.KNOWN_DISTANCE * dist  # [m]
        H = self.KNOWN_H / self.KNOWN_DISTANCE * dist  # [m]
        risolW = W / wframe  # [m/pixel]
        risolH = H / hframe  # [m/pixel]
        err_x_m = risolW * err_x_pix  # [m]

        err_y_m = risolH * err_y_pix  # [m]
        psi_err = 180 / math.pi * math.atan(err_x_m / dist)  # [deg]
        theta_err = 180 / math.pi * math.atan(err_y_m / dist)  # [deg]
        cv2.putText(frame, "White_balloon", (int(x) - 100, int(y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), 2)
        cv2.putText(frame, "dist=%.2fm" % (dist), (0, cframeY + 260), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(frame, "radius=%.2fm" % (radius * risolH), (0, cframeY + 230), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0), 2)
        cv2.putText(frame, "radius_pix=%.2fpix" % (radius), (0, cframeY + 200), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0),
                    2)
        cv2.putText(frame, "psi=%.2fdeg" % (psi_err), (0, cframeY + 170), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        cv2.putText(frame, "theta=%.2fdeg" % (theta_err), (0, cframeY + 140), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0), 2)
        cv2.putText(frame, "err_y=%.2fm" % (err_y_m), (0, cframeY + 110), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        cv2.putText(frame, "err_x=%.2fm" % (err_x_m), (0, cframeY + 80), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        return err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err

    # funzione di riconoscimento che restituisce le coordinate del centro inquadratura e, se rileva il pallone, restituisce le coordinate e il raggio del target
    def acquire_and_process_image(self, frame):

        #global KNOWN_DISTANCE, KNOWN_RADIUS, KNOWN_W, KNOWN_H, radius_cal, resw, resh

        kernelOpen = np.ones((5, 5))
        kernelClose = np.ones((49, 49))
        lowerBound = np.array(self.lowerBoundList)
        upperBound = np.array(self.upperBoundList)

        # flag aggancio bersaglio
        locked = 0

        hframe, wframe, channels = frame.shape
        # coordinate del centro inquadratura in pixel
        cframeX = int(wframe / 2)
        cframeY = int(hframe / 2)
        frame_center = (cframeX, cframeY)
        # disegna il centro inquadratura
        cv2.circle(frame, frame_center, 2, (0, 255, 0), 1)
        # filtro gaussiano
        blurred = cv2.GaussianBlur(frame, (7, 7), 0)
        # convert BGR to HSV
        frameHSV = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV)
        # create the Mask
        mask = cv2.inRange(frameHSV, lowerBound, upperBound)
        # morphology
        maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
        maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)

        maskFinal = maskClose

        contours, hieracy = cv2.findContours(maskFinal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        cv2.drawContours(frame, contours, -1, (255, 0, 0), 2)

        contours = contours_filter_library.contours_area_shape_filter(frame, contours, 350, 0.25, 0.95)

        cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)

        if len(contours) != 0:

            contours_circles = contours_filter_library.contours_matchShape_nearest_balloon(contours, self.reference_contour)

            if contours_circles != []:

                target = contours_circles
                x_box, y_box, w_box, h_box = cv2.boundingRect(target)

                cv2.rectangle(frame, (x_box, y_box), (x_box + w_box, y_box + h_box), (0, 0, 255), 2)

                radius = max(w_box, h_box) / 2
                x = x_box + w_box / 2
                y = y_box + h_box / 2

                memory_ballon = self.memory_frame.ballon_insert(x, y, radius)

                if memory_ballon:
                    x = memory_ballon[0]
                    y = memory_ballon[1]
                    radius = memory_ballon[2]

                    err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame, x, y,
                                                                                                          radius,
                                                                                                          cframeX,
                                                                                                          cframeY,
                                                                                                          wframe,
                                                                                                          hframe)

                    locked = 1
                    return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err
            else:
                memory_ballon = self.memory_frame.last_balloon()
                if memory_ballon:
                    err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame,
                                                                                                          memory_ballon[
                                                                                                              0],
                                                                                                          memory_ballon[
                                                                                                              1],
                                                                                                          memory_ballon[
                                                                                                              2],
                                                                                                          cframeX,
                                                                                                          cframeY,
                                                                                                          wframe,
                                                                                                          hframe)

                    locked = 1
                    return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err
        else:
            memory_ballon = self.memory_frame.last_balloon()
            if memory_ballon:
                err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame,
                                                                                                      memory_ballon[0],
                                                                                                      memory_ballon[1],
                                                                                                      memory_ballon[2],
                                                                                                      cframeX,
                                                                                                      cframeY, wframe,
                                                                                                      hframe)
                locked = 1
                return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err

        return frame, locked, [], [], [], [], [], [], []

    def ballonchecker(self):
        return self.locked,self.err_x_pix,self.err_y_pix, self.err_x_m, self.err_y_m, self.dist, self.psi_err, \
               self.theta_err

    def start_showing(self):
        prctl.set_name("RecognitionTread")
        # Create another thread to show/save frames
        def start_recognize_and_show_thread():
            prctl.set_name("Interno")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_show_thread,name='recognitionandshowThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_thread():
            prctl.set_name("RecognizeTreadIN")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_thread():
            prctl.set_name("Recognizestream")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_show(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_and_show_thread():
            prctl.set_name("RecognizestreamShow")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err = self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_and_show_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_showing_and_rec(self):
        prctl.set_name("RecognitionTread")
        # Create another thread to show/save frames
        def start_recognize_and_show_thread():
            prctl.set_name("Interno")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_show_thread,name='recognitionandshowThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_thread():
            prctl.set_name("RecognizeTreadIN")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_thread():
            prctl.set_name("Recognizestream")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_show_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_and_show_thread():
            prctl.set_name("RecognizestreamShow")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err = self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_and_show_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()


    def stop(self):
	self.video_getter.stop()
	try:
            self.video_shower.stop()
        except AttributeError:
            pass
	try:
	    self.up.close()
        except AttributeError:
            pass
        try:
	    self.video_rec.stop()
        except AttributeError:
            pass
        self.running = False
    
    def stop_recorder_and_start_new(self):
	try:
		old_video_rec = self.video_rec 
		self.video_rec = VideoWriterThreaded()
		old_video_rec.stop()
	except AttributeError:
            pass
    
    def start_recorder(self):
	try:
		self.video_rec.start_recording()
	except AttributeError:
            pass
示例#16
0
def thread_video(input):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Main thread shows video frames.
	"""

    video_getter = VideoGet(input).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        rgb_frame = frame[:, :, ::-1]

        # Find all the faces and face encodings in the current frame of video
        face_location = face_recognition.face_locations(rgb_frame)
        if len(face_location) == 0:
            pass
        elif len(face_location) > 1:
            pass
        else:
            unknown_face_encoding = face_recognition.face_encodings(
                rgb_frame, face_location)[0]
            index = utils.recognize_face(unknown_face_encoding,
                                         known_faces_encoding)
            name = known_names[index]
            cv2.putText(frame, name, (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

        top, right, bottom, left = face_location[0]
        face_height = bottom - top

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255))

        # Display the resulting frame
        #try:
        (x, y, w,
         h) = mouth_detection.mouth_detection_video(frame, detector, predictor)

        if h < 0.2 * face_height:
            cv2.putText(frame, "close", (30, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.7, (0, 0, 255), 2)
        else:
            cv2.putText(frame, "open", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            d = int(0.35 * h)
            roi = frame[y + d:y + h, x:x + w]
            #cv2.rectangle(frame, (x, y + int(0.2*h)), (x+w, y+h), (0, 255, 0), 2)
            (px, py, pw, ph) = utils.color_detection(roi)
            if pw != 0:
                cv2.rectangle(frame, (x + px, y + d + py),
                              (x + px + pw, y + d + py + ph), (0, 255, 0), 2)
            else:
                cv2.putText(frame, "no pill detected", (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        #except:
        #	pass
        video_shower.frame = frame
        fps.update()