Пример #1
0
#me.connect('person_rasp', host='mongodb://grupo14.duckdns.org:1226/Rasp')
#me.connect(host='mongodb://192.168.0.25:27017/Rasp', replicaset='rsdiseno')
#me.connect('Rasp')
# Connect to pymongo
# mongo_client_Rasp = MongoClient('mongodb://grupo14.duckdns.org:1226')
# db_Rasp = mongo_client_Rasp["Test1"]
# col_Rasp = db_Rasp["person"]
# print(f"[INFO] Conecting to DB Rasp with:{col_Rasp.read_preference}...")

time.sleep(1.0)

# initialize the video stream, then allow the camera sensor to warm up

## Start processes

video_getter = VideoGet(src=0, name='Video Getter')

#workers = [FrameProcessing(i, q_video, q_frame, detector, embedder, recognizer, le, fa) for i in range(1, 3)]

video_getter.start()
time.sleep(2.0)
#for item in workers:
#    item.start()

# print('[INFO] Starting VideoGet...')
time.sleep(3.0)

## Set ffmeg instance
pathIn = './SavedImages/13/'
## REVISAR SI HAY UN FORMATO QUE SEA MAS COMPRIMIMDO
pathOut = 'video_v1.avi'
Пример #2
0
net = cv2.dnn.readNet("yolov3_training_4000.weights", "yolov3_testing.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]

classes = ["not_whisteling", "whisteling"]

colors = np.random.uniform(0, 255, size=(len(classes), 3))

number_of_whistles = setup()

whistle_frame = 0
nowhistle_frame = 0
total_whistles = 0

video_getter = VideoGet().start()
img = video_getter.read()

height, width, channels = img.shape
font = cv2.FONT_HERSHEY_DUPLEX

while True:

    # Detecting objects

    blob = cv2.dnn.blobFromImage(img,
                                 0.00392, (416, 416), (0, 0, 0),
                                 True,
                                 crop=False)

    net.setInput(blob)
Пример #3
0
    def startDetecting(self):
        
        capture = VideoGet(0).start()
        video_shower = VideoShow(capture.frame).start()   

        while capture.isOpened():
            if capture.stopped or video_shower.stopped:
                video_shower.stop()
                capture.stop()
                break
            #Chụp khung hình từ camera
            frame = capture.frame
            
            # Nhận dữ liệu tay từ cửa sổ phụ hình chữ nhật  
            cv2.rectangle(frame,(100,100),(300,300),(0,255,0),0)
            crop_image = frame[100:300, 100:300]
            
            #1.
            # Áp dụng Gaussian blur
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exMask = executor.submit(self.maskHSV, crop_image)
                mask = exMask.result()

            #2.
            # Tìm đường viền (contours)
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exContours = executor.submit(self.findContours, mask)
                contours, hierarchy = exContours.result()
            
            #3.
            try:
                contour = max(contours, key = lambda x: cv2.contourArea(x))
                
                x,y,w,h = cv2.boundingRect(contour)
                cv2.rectangle(crop_image,(x,y),(x+w,y+h),(0,0,255),0)
                
                hull = cv2.convexHull(contour)
                
                drawing = np.zeros(crop_image.shape,np.uint8)
                cv2.drawContours(drawing,[contour],-1,(0,255,0),0)
                cv2.drawContours(drawing,[hull],-1,(0,0,255),0)
                
                hull = cv2.convexHull(contour, returnPoints=False)
                defects = cv2.convexityDefects(contour,hull)
                
                count_defects = 0
            
                for i in range(defects.shape[0]):
                    s,e,f,d = defects[i,0]
                    start = tuple(contour[s][0])
                    end = tuple(contour[e][0])
                    far = tuple(contour[f][0])

                    # 4.
                    #angle = 360;
                    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                        exAngle = executor.submit(self.findAngle, start, end, far)
                        angle = exAngle.result()

                    if angle <= 90:
                        count_defects += 1
                        cv2.circle(crop_image,far,1,[0,0,255],-1)

                    cv2.line(crop_image,start,end,[0,255,0],2)

                if count_defects >= 4:
                    pyautogui.press('space')
                    cv2.putText(frame,"JUMP", (450,110), cv2.FONT_HERSHEY_SIMPLEX, 2, 2, 2)

            except:
                pass

            video_shower.frame = frame

            if cv2.waitKey(1) == ord('q'):
                capture.release()
                cv2.destroyAllWindows()
                break 
Пример #4
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    # Facenet-based face recognizer
    dettect = Recognizer('yolov2')

    # Flag to choose which model to run
    face_flag = True
    yolosort = False

    writeVideo_flag = False

    # Open stream
    #video_capture = VideoGet("http://192.168.4.106:8080/video").start()
    video_capture = VideoGet("rtsp://192.168.100.1/encavc0-stream").start()

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    frame_no = 0

    while True:
        ret, frame = video_capture.update()
        frame_no += 1
        #print('frame no.', frame_no)
        if ret != True:
            break
        t1 = time.time()
        #frame = cv2.flip(frame, 1)
        #frame = cv2.resize(frame, (640, 360))

        # Face recognizer
        if face_flag:
            #pass
            dettect.recognize(frame)

        # Body recognizer
        if yolosort:
            image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
            boxs = yolo.detect_image(image)
            features = encoder(frame, boxs)

            # score to 1.0 here).
            detections = [
                Detection(bbox, 1.0, feature)
                for bbox, feature in zip(boxs, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                            (0, 255, 0), 2)

            for det in detections:
                bbox = det.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

        # Scalable window
        cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)
        cv2.imshow('Camera', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Keypress action
        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break
        if k == ord('t'):
            face_flag = not face_flag
            yolosort = not yolosort

    # Exiting
    video_capture.stop()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
Пример #5
0
            video_getter.e.set()  # без ожидания evt - закомментить
            frame = video_getter.read()
            frame = cv2.resize(frame, (768, 432))

            ret, jpeg = cv2.imencode('.jpg', frame)
            frame = jpeg.tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

            # time.sleep(0.035)

    @app.route('/video_feed')
    def video_feed():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    app.run(host='127.0.0.1', port=61)


if __name__ == '__main__':
    e = threading.Event()  # без ожидания evt - закомментить
    source = 'sunset.mp4'
    cap = cv2.VideoCapture(source)

    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    video_getter = VideoGet(source, e).start()
    # video_getter = VideoGet(source).start()                                          # без ожидания evt

    app = create_app()
Пример #6
0
    (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))

    return maxVal, t, startX, startY, endX, endY


if __name__ == "__main__":

    # read templates from folder
    templates = glob.glob("templates/*.png")

    templates_gray = [cv2.imread(template, 0) for template in templates]
    names = [(name.split("/")[-1]).split(".")[0] for name in templates]

    fW = 640
    fH = 480
    video_getter = VideoGet(0, fW, fH).start()
    # video_shower = VideoShow(video_getter.frame).start()

    cv2.namedWindow("MyVideo", cv2.WINDOW_AUTOSIZE)
    fgbg = cv2.createBackgroundSubtractorKNN()

    fourcc = cv2.VideoWriter_fourcc(*'MPEG')
    out = cv2.VideoWriter('myOutput.avi', fourcc, 9.0, (fW * 3 // 2, fH))

    while (True):
        start = int(round(time.time() * 1000))

        frame = video_getter.frame
        # video_shower.frame = frame
        fgmask = fgbg.apply(frame)
        if DEBUG:
from utils.joint_preprocess import *
import sys


import os, signal
pid = os.getpid()
print("\n")
print("PID : ", pid)
print("\n")

cv2.namedWindow("Behavior_detection", cv2.WINDOW_NORMAL)
#select the appropriate Source
source = 0

# Start the dedicated thread for video grabbing.
video_getter = VideoGet(source).start()
#cap = cv2.VideoCapture("trimmed_assault.mp4")

global poseEstimator
poseEstimator = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))

while True:
    frame = video_getter.frame
    #ret, frame = cap.read()
    frame = cv2.resize(frame, (settings.winWidth, settings.winHeight))
    humans = poseEstimator.inference(frame)
    frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False) # Humans are plotted on the frame.
    cv2.imshow("Behavior_detection", frame)
        
    #cv2.waitKey(1)
    if cv2.waitKey(10)==ord('q'):
Пример #8
0
    def __init__(self, camera=0, video_show=False, video_streaming=False, video_rec=False):
        path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config/configuration.ini")
        self.config = SafeConfigParser()
        self.config.read(path)
        self.running = True
        try:
            self.video_getter = VideoGet(self.config.getint('video_get','camera_address')).start()
        except CameraError:
            raise CameraError
        self.locked=0
        self.err_x_pix=0
        self.err_y_pix=0
        self.err_x_m=0
        self.err_y_m=0
        self.dist=0
        self.psi_err=0
        self.theta_err=0

        f = self.config.getfloat('recognition_thread','focal_length')  # focal length [m]
        w = self.config.getfloat('recognition_thread','sensor_width')  # sensor width [m]
        h = self.config.getfloat('recognition_thread','sensor_height')  # sensor height [m]

        self.resw = self.config.getfloat('video_get','width')  # dimensioni (larghezza) in pixel del frame da analizzare
        self.resh = self.config.getfloat('video_get','height')  # dimensioni (altezza) in pixel del frame da analizzare

        self.KNOWN_DISTANCE = self.config.getfloat('recognition_thread','known_distance')  # [m] measured
        self.KNOWN_RADIUS = self.config.getfloat('recognition_thread','known_radius')  # [m] measured
        self.KNOWN_W = w / f * self.KNOWN_DISTANCE  # Horizontal Field of View - calculated - if possible use measurement
        self.KNOWN_H = h / f * self.KNOWN_DISTANCE  # Vertical Field of View - calculated - if possible use measurement
        self.radius_cal = self.resw / self.KNOWN_W * self.KNOWN_RADIUS  # pixel according to resizedframe width - calculated without calibration through immagine_calibrazione.jpg

        self.lowerBoundList = [ self.config.getint('recognition_thread','lowerH'), self.config.getint('recognition_thread','lowerS'), self.config.getint('recognition_thread','lowerV')]
        self.upperBoundList = [self.config.getint('recognition_thread','upperH'), self.config.getint('recognition_thread','upperS'), self.config.getint('recognition_thread','upperV')]
        self.memory_frame = memory_class.Memory_Frame()
        self.reference_contour = contours_filter_library.reference_contour(self.config.get('recognition_thread','sagoma_path'))

        activation_flag=0
        if video_show:
            activation_flag = 1
        if video_streaming:
            activation_flag += 2
        if video_rec:
            activation_flag += 4

        if activation_flag == 0:
            self.start_recognize()
        elif activation_flag == 1:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.start_showing()
        elif activation_flag == 2:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream()
        elif activation_flag == 3:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream_and_show()
        elif activation_flag == 4:
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_rec()
        elif activation_flag == 5:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.video_rec = VideoWriterThreaded()
            self.start_showing_and_rec()
        elif activation_flag == 6:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_rec()
        elif activation_flag == 7:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_show_and_rec()
Пример #9
0
class RecognitionThread:
    def __init__(self, camera=0, video_show=False, video_streaming=False, video_rec=False):
        path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config/configuration.ini")
        self.config = SafeConfigParser()
        self.config.read(path)
        self.running = True
        try:
            self.video_getter = VideoGet(self.config.getint('video_get','camera_address')).start()
        except CameraError:
            raise CameraError
        self.locked=0
        self.err_x_pix=0
        self.err_y_pix=0
        self.err_x_m=0
        self.err_y_m=0
        self.dist=0
        self.psi_err=0
        self.theta_err=0

        f = self.config.getfloat('recognition_thread','focal_length')  # focal length [m]
        w = self.config.getfloat('recognition_thread','sensor_width')  # sensor width [m]
        h = self.config.getfloat('recognition_thread','sensor_height')  # sensor height [m]

        self.resw = self.config.getfloat('video_get','width')  # dimensioni (larghezza) in pixel del frame da analizzare
        self.resh = self.config.getfloat('video_get','height')  # dimensioni (altezza) in pixel del frame da analizzare

        self.KNOWN_DISTANCE = self.config.getfloat('recognition_thread','known_distance')  # [m] measured
        self.KNOWN_RADIUS = self.config.getfloat('recognition_thread','known_radius')  # [m] measured
        self.KNOWN_W = w / f * self.KNOWN_DISTANCE  # Horizontal Field of View - calculated - if possible use measurement
        self.KNOWN_H = h / f * self.KNOWN_DISTANCE  # Vertical Field of View - calculated - if possible use measurement
        self.radius_cal = self.resw / self.KNOWN_W * self.KNOWN_RADIUS  # pixel according to resizedframe width - calculated without calibration through immagine_calibrazione.jpg

        self.lowerBoundList = [ self.config.getint('recognition_thread','lowerH'), self.config.getint('recognition_thread','lowerS'), self.config.getint('recognition_thread','lowerV')]
        self.upperBoundList = [self.config.getint('recognition_thread','upperH'), self.config.getint('recognition_thread','upperS'), self.config.getint('recognition_thread','upperV')]
        self.memory_frame = memory_class.Memory_Frame()
        self.reference_contour = contours_filter_library.reference_contour(self.config.get('recognition_thread','sagoma_path'))

        activation_flag=0
        if video_show:
            activation_flag = 1
        if video_streaming:
            activation_flag += 2
        if video_rec:
            activation_flag += 4

        if activation_flag == 0:
            self.start_recognize()
        elif activation_flag == 1:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.start_showing()
        elif activation_flag == 2:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream()
        elif activation_flag == 3:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.start_recognize_and_stream_and_show()
        elif activation_flag == 4:
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_rec()
        elif activation_flag == 5:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.video_rec = VideoWriterThreaded()
            self.start_showing_and_rec()
        elif activation_flag == 6:
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_rec()
        elif activation_flag == 7:
            self.video_shower = VideoShow(self.video_getter.frame).start()
            self.up = Upstreamer("upstreaming thread", self.config.get('recognition_thread','server_address'), self.config.getint('recognition_thread','server_upstreaming_port'), False)
            self.up.start()
            self.video_rec = VideoWriterThreaded()
            self.start_recognize_and_stream_and_show_and_rec()


    def calc_and_print_err(self, frame, x, y, radius, cframeX, cframeY, wframe, hframe):
        dist = self.KNOWN_DISTANCE * self.radius_cal / radius
        # disegna il cerchio che racchiude il pallone
        cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 1)
        err_x_pix = x - cframeX  # pixel
        err_y_pix = y - cframeY  # pixel

        W = self.KNOWN_W / self.KNOWN_DISTANCE * dist  # [m]
        H = self.KNOWN_H / self.KNOWN_DISTANCE * dist  # [m]
        risolW = W / wframe  # [m/pixel]
        risolH = H / hframe  # [m/pixel]
        err_x_m = risolW * err_x_pix  # [m]

        err_y_m = risolH * err_y_pix  # [m]
        psi_err = 180 / math.pi * math.atan(err_x_m / dist)  # [deg]
        theta_err = 180 / math.pi * math.atan(err_y_m / dist)  # [deg]
        cv2.putText(frame, "White_balloon", (int(x) - 100, int(y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), 2)
        cv2.putText(frame, "dist=%.2fm" % (dist), (0, cframeY + 260), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(frame, "radius=%.2fm" % (radius * risolH), (0, cframeY + 230), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0), 2)
        cv2.putText(frame, "radius_pix=%.2fpix" % (radius), (0, cframeY + 200), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0),
                    2)
        cv2.putText(frame, "psi=%.2fdeg" % (psi_err), (0, cframeY + 170), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        cv2.putText(frame, "theta=%.2fdeg" % (theta_err), (0, cframeY + 140), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                    (0, 255, 0), 2)
        cv2.putText(frame, "err_y=%.2fm" % (err_y_m), (0, cframeY + 110), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        cv2.putText(frame, "err_x=%.2fm" % (err_x_m), (0, cframeY + 80), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
        return err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err

    # funzione di riconoscimento che restituisce le coordinate del centro inquadratura e, se rileva il pallone, restituisce le coordinate e il raggio del target
    def acquire_and_process_image(self, frame):

        #global KNOWN_DISTANCE, KNOWN_RADIUS, KNOWN_W, KNOWN_H, radius_cal, resw, resh

        kernelOpen = np.ones((5, 5))
        kernelClose = np.ones((49, 49))
        lowerBound = np.array(self.lowerBoundList)
        upperBound = np.array(self.upperBoundList)

        # flag aggancio bersaglio
        locked = 0

        hframe, wframe, channels = frame.shape
        # coordinate del centro inquadratura in pixel
        cframeX = int(wframe / 2)
        cframeY = int(hframe / 2)
        frame_center = (cframeX, cframeY)
        # disegna il centro inquadratura
        cv2.circle(frame, frame_center, 2, (0, 255, 0), 1)
        # filtro gaussiano
        blurred = cv2.GaussianBlur(frame, (7, 7), 0)
        # convert BGR to HSV
        frameHSV = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV)
        # create the Mask
        mask = cv2.inRange(frameHSV, lowerBound, upperBound)
        # morphology
        maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
        maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)

        maskFinal = maskClose

        contours, hieracy = cv2.findContours(maskFinal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        cv2.drawContours(frame, contours, -1, (255, 0, 0), 2)

        contours = contours_filter_library.contours_area_shape_filter(frame, contours, 350, 0.25, 0.95)

        cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)

        if len(contours) != 0:

            contours_circles = contours_filter_library.contours_matchShape_nearest_balloon(contours, self.reference_contour)

            if contours_circles != []:

                target = contours_circles
                x_box, y_box, w_box, h_box = cv2.boundingRect(target)

                cv2.rectangle(frame, (x_box, y_box), (x_box + w_box, y_box + h_box), (0, 0, 255), 2)

                radius = max(w_box, h_box) / 2
                x = x_box + w_box / 2
                y = y_box + h_box / 2

                memory_ballon = self.memory_frame.ballon_insert(x, y, radius)

                if memory_ballon:
                    x = memory_ballon[0]
                    y = memory_ballon[1]
                    radius = memory_ballon[2]

                    err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame, x, y,
                                                                                                          radius,
                                                                                                          cframeX,
                                                                                                          cframeY,
                                                                                                          wframe,
                                                                                                          hframe)

                    locked = 1
                    return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err
            else:
                memory_ballon = self.memory_frame.last_balloon()
                if memory_ballon:
                    err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame,
                                                                                                          memory_ballon[
                                                                                                              0],
                                                                                                          memory_ballon[
                                                                                                              1],
                                                                                                          memory_ballon[
                                                                                                              2],
                                                                                                          cframeX,
                                                                                                          cframeY,
                                                                                                          wframe,
                                                                                                          hframe)

                    locked = 1
                    return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err
        else:
            memory_ballon = self.memory_frame.last_balloon()
            if memory_ballon:
                err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err = self.calc_and_print_err(frame,
                                                                                                      memory_ballon[0],
                                                                                                      memory_ballon[1],
                                                                                                      memory_ballon[2],
                                                                                                      cframeX,
                                                                                                      cframeY, wframe,
                                                                                                      hframe)
                locked = 1
                return frame, locked, err_x_pix, err_y_pix, err_x_m, err_y_m, dist, psi_err, theta_err

        return frame, locked, [], [], [], [], [], [], []

    def ballonchecker(self):
        return self.locked,self.err_x_pix,self.err_y_pix, self.err_x_m, self.err_y_m, self.dist, self.psi_err, \
               self.theta_err

    def start_showing(self):
        prctl.set_name("RecognitionTread")
        # Create another thread to show/save frames
        def start_recognize_and_show_thread():
            prctl.set_name("Interno")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_show_thread,name='recognitionandshowThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_thread():
            prctl.set_name("RecognizeTreadIN")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_thread():
            prctl.set_name("Recognizestream")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_show(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_and_show_thread():
            prctl.set_name("RecognizestreamShow")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err = self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_and_show_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_showing_and_rec(self):
        prctl.set_name("RecognitionTread")
        # Create another thread to show/save frames
        def start_recognize_and_show_thread():
            prctl.set_name("Interno")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_show_thread,name='recognitionandshowThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_thread():
            prctl.set_name("RecognizeTreadIN")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_thread():
            prctl.set_name("Recognizestream")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err= self.acquire_and_process_image(frame)
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()

    def start_recognize_and_stream_and_show_and_rec(self):
        prctl.set_name("RecognizeTread")
        # Create another thread to show/save frames
        def start_recognize_and_stream_and_show_thread():
            prctl.set_name("RecognizestreamShow")
            while self.running:
                try:
                    frame = self.video_getter.frame
                    frame_ret, self.locked, self.err_x_pix, self.err_y_pix, self.err_x_m, self.err_y_m, self.dist,\
                    self.psi_err, self.theta_err = self.acquire_and_process_image(frame)
                    self.video_shower.frame = frame_ret
                    self.up.stream_frame(cv2.resize(frame_ret,(self.config.getint('recognition_thread','resize_width_for_upstreaming'),self.config.getint('recognition_thread','resize_height_for_upstreaming'))))
                    self.video_rec.update_frame(frame_ret)
                except AttributeError:
                    pass
        self.recording_thread = threading.Thread(target=start_recognize_and_stream_and_show_thread,name='recognitionThread', args=())
        self.recording_thread.daemon = True
        self.recording_thread.start()


    def stop(self):
	self.video_getter.stop()
	try:
            self.video_shower.stop()
        except AttributeError:
            pass
	try:
	    self.up.close()
        except AttributeError:
            pass
        try:
	    self.video_rec.stop()
        except AttributeError:
            pass
        self.running = False
    
    def stop_recorder_and_start_new(self):
	try:
		old_video_rec = self.video_rec 
		self.video_rec = VideoWriterThreaded()
		old_video_rec.stop()
	except AttributeError:
            pass
    
    def start_recorder(self):
	try:
		self.video_rec.start_recording()
	except AttributeError:
            pass
Пример #10
0
def thread_video(input):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Main thread shows video frames.
	"""

    video_getter = VideoGet(input).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        rgb_frame = frame[:, :, ::-1]

        # Find all the faces and face encodings in the current frame of video
        face_location = face_recognition.face_locations(rgb_frame)
        if len(face_location) == 0:
            pass
        elif len(face_location) > 1:
            pass
        else:
            unknown_face_encoding = face_recognition.face_encodings(
                rgb_frame, face_location)[0]
            index = utils.recognize_face(unknown_face_encoding,
                                         known_faces_encoding)
            name = known_names[index]
            cv2.putText(frame, name, (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

        top, right, bottom, left = face_location[0]
        face_height = bottom - top

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255))

        # Display the resulting frame
        #try:
        (x, y, w,
         h) = mouth_detection.mouth_detection_video(frame, detector, predictor)

        if h < 0.2 * face_height:
            cv2.putText(frame, "close", (30, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.7, (0, 0, 255), 2)
        else:
            cv2.putText(frame, "open", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            d = int(0.35 * h)
            roi = frame[y + d:y + h, x:x + w]
            #cv2.rectangle(frame, (x, y + int(0.2*h)), (x+w, y+h), (0, 255, 0), 2)
            (px, py, pw, ph) = utils.color_detection(roi)
            if pw != 0:
                cv2.rectangle(frame, (x + px, y + d + py),
                              (x + px + pw, y + d + py + ph), (0, 255, 0), 2)
            else:
                cv2.putText(frame, "no pill detected", (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        #except:
        #	pass
        video_shower.frame = frame
        fps.update()
Пример #11
0
def threadVideoGet(source):
    #x,y,w,h = 0,0,175,75

    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Main thread shows video frames.
    """
    number_of_whistles = setup()
    print(number_of_whistles)
    nowhistle_frame = 0
    whistle_frame = 0
    total_whistles = 0
    classes = {0: 'No Whistle',
            1: 'whistle'}
    np.set_printoptions(suppress=True)

    data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
    video_getter = VideoGet(source).start()
    ret,frame = video_getter.read()
    height, width, nchannels = frame.shape

    while ret:
        #if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
            #video_getter.stop()
            
            #break


        size = (224, 224)
        image = cv2.resize(frame, size, fx=0.5, fy=0.5, interpolation = cv2.INTER_AREA)
    #image = ImageOps.fit(image, size, Image.ANTIALIAS)
#
    ##turn the image into a numpy array
        image_array = np.asarray(image)

#
        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
        data[0] = normalized_image_array
        #prediction = classes[google_model.predict_classes(data).item()]
        prediction = google_model.predict(data)
        probability = round(prediction[0][1]*100,2)

        if prediction[0][1] > 0.60:
            test = "I think this cooker is whistling with {}% Probability ".format(probability)
            whistle_frame = whistle_frame + 1

        else:
            nowhistleprobability = 100 - probability
            test = "I think this cooker is NOT whistling with {}% Probability ".format(nowhistleprobability)
            nowhistle_frame = nowhistle_frame + 1
            whistle_frame = 0
        if nowhistle_frame > 10:
            if whistle_frame > 15:
                total_whistles = total_whistles + 1
                if total_whistles < number_of_whistles :
                    say = str(total_whistles)+" whistles done"
                    print(say)
                #save_audio(say)
                    Thread(target=save_audio, args=(say,whistle_frame)).start()
                #Thread(target=play_audio, args=(local_ip,fname,cast)).start()
                #pool.apply_async(play_audio, [local_ip,fname,cast], callback)
                #loop.run_until_complete(play_audio(local_ip,fname,cast))

                nowhistle_frame = 0
                whistle_frame = 0
        text = " Whistle count: {}".format(total_whistles)
        if total_whistles >= number_of_whistles:
            say = str(total_whistles)+" whistles done.Turn off the gas"
            Thread(target=save_audio, args=(say,whistle_frame)).start()



            #Thread(target=play_audio, args=(local_ip,fname,cast)).start()
            #pool.apply_async(play_audio, [local_ip,fname,cast], callback)
            #loop.run_until_complete(play_audio(local_ip,fname,cast))

 


    # Capture frames in the video 
  
    # describe the type of font 
    # to be used. 
        font = cv2.FONT_HERSHEY_SIMPLEX 

     #Use putText() method for 
    #inserting text on video
        
        cv2.putText(frame,  
                    test,  
                    (50, 50),  
                    font, 1,  
                    (139,0,0),  
                    3,  
                    cv2.LINE_4) 
    
        
        cv2.putText(frame,  
                    text,  
                    (40, 100),  
                    font, 1.5,  
                    (139,0,0),  
                    3,  
                    cv2.LINE_4) 
    
     #Display the resulting frame 
        cv2.imshow('video', frame) 
    
        ret,frame = video_getter.read() 
    # creating 'q' as the quit  
    # button for the video 
        if cv2.waitKey(1) & 0xFF == ord('q'): 
            break
        
  
# release the cap object 
    
#out.release()
# close all windows 
    cv2.destroyAllWindows() #
Пример #12
0
def main():
    # lcm
    lc = lcm.LCM()

    # Global Variables
    state = np.matrix('0.0;0.0;0.0;0.0')  # x, y, xd, yd,

    # P and Q matrices for EKF
    P = np.matrix('10.0,0.0,0.0,0.0; \
                0.0,10.0,0.0,0.0; \
                0.0,0.0,10.0,0.0; \
                0.0,0.0,0.0,10.0')

    Q = np.matrix('2.0,0.0,0.0,0.0; \
                0.0,2.0,0.0,0.0; \
                0.0,0.0,2.0,0.0; \
                0.0,0.0,0.0,2.0')

    measurement = np.matrix('0;0')
    np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)})

    # print basic info
    print('python ' + platform.python_version())
    print('opencv ' + cv2.__version__)
    print('numpy ' + np.version.version)

    # lauch getter thread
    print("Start sampling THREADED frames from webcam...")
    vs = VideoGet(src=0).start()

    stop_time = time.time() + TIME_SPAN
    start_time = time.time()

    prev_time = time.time()
    i = 0

    counter = 0

    while (True):
        if time.time() > stop_time:
            break

        now_time = time.time()
        dt = now_time - prev_time

        i += 1
        # run the model every 0.01 s
        if (dt > 0.001):
            prev_time = now_time

            state, P, J = run_EKF_model(state, P, Q, dt)

        # read camera
        # ret, frame = cap.read()
        frame = vs.read()
        ret = True
        print("frame: {}".format(i))
        if ret == True:

            # For initilization, process the whole image, otherwise, utilize the predicted position
            isCircleFound = 1
            if i < 10:
                mask, cimg, (x, y, r) = recognize_center_without_EKF(frame)
            else:
                mask, cimg, (x, y, r), isCircleFound = recognize_center(
                    frame, state[0], state[1])

            if isCircleFound == 0:
                counter += 1
            else:
                counter = 0

            # if i == 5:
            #     break
            # if x==0:
            #     continue
            measurement[0] = x
            measurement[1] = y
            if (measurement[0] != 0) and (measurement[1] != 0):
                print("run EKF")
                state, P = run_EKF_measurement(state, measurement, P)
            else:
                print("no motion detected, continue")

            if counter > 5:
                msg = camera_pose_xyt_t()
                msg.x = -1
                msg.y = -1
                lc.publish("CAMERA_POSE_CHANNEL", msg.encode())
                print("Ball went out of frame")
                break

            print("x: {}, y:{}. state 0: {}, state 1: {}".format(
                x, y, state[0], state[1]))
            # if(x != 0):
            #     cv2.circle(cimg, (int(x), int(y)), 50, (255), 5)
            #
            # if(state[0] != 0):
            #     cv2.circle(cimg, (int(state[0]),int(state[1])), 20, (255), 3)

            # pixel_coord = np.array([state[0],state[1],1])
            # world_2d_coord = transform_camera_to_2d(pixel_coord)
            # print(world_2d_coord)
            # cv2.imshow('all',cimg)
            msg = camera_pose_xyt_t()
            msg.x = state[0]
            msg.y = state[1]
            lc.publish("CAMERA_POSE_CHANNEL", msg.encode())

        # close
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        #     break

    print("Time {}, frames: {}".format(time.time() - start_time, i))