Esempio n. 1
0
def main(width=640, height=360, k=False):
    last_detected = datetime(1990, 1, 1)
    if k:
        cap = VideoCaptureAsync(0)
    else:
        cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    if k:
        cap.start()
    t0 = time.time()
    i = 0
    net = Detector(bytes("cfg/yolo-lite.cfg", encoding="utf-8"),
                   bytes("moirai.weights", encoding="utf-8"), 0,
                   bytes("obj.data", encoding="utf-8"))
    while True:
        r, frame = cap.read()
        if r:
            dark_frame = Image(frame)
            results = net.detect(dark_frame)
            del dark_frame

            for cat, score, bounds in results:
                x, y, w, h = bounds
                cv2.rectangle(frame, (int(x - w / 2), int(y - h / 2)),
                              (int(x + w / 2), int(y + h / 2)), (255, 0, 255))
            if len(results) > 0:
                if datetime.now() > last_detected + timedelta(seconds=6):
                    last_detected = datetime.now()
                    prob = results[0][1]
                    requests.post('http://192.168.6.219:8080/area/alert',
                                  data={
                                      "cam_id": 1,
                                      "prob": prob
                                  })
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
    if k:
        cap.stop()
    cv2.destroyAllWindows()
Esempio n. 2
0
def test(n_frames=500, width=1280, height=720, asyncr=False):
    if asyncr:
        cap = VideoCaptureAsync(0)
        print("If async")
    else:
        cap = cv2.VideoCapture(0)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        print("XD")
    if asyncr:
        cap.start()
    t0 = time.time()
    i = 0
    while i < n_frames:
        _, frame = cap.read()
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
        i += 1
        print('[i] Frames per second: {:.2f}, asyncr={}'.format(
            n_frames / (time.time() - t0), asyncr))
    if asyncr:
        cap.stop()
    cv2.destroyAllWindows()
Esempio n. 3
0
def test(n_frames=500, width=1280, height=720, async_flag=False):
    if async_flag:
        cap = VideoCaptureAsync(0)
        # does not support using captured video
        # cap = VideoCaptureAsync('407156.avi')
    else:
        cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    if async_flag:
        cap.start()
    t0 = time.time()
    i = 0
    while i < n_frames:
        _, frame = cap.read()
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
        i += 1
    print('[i] Frames per second: {:.2f}, async_flag={}'.format(
        n_frames / (time.time() - t0), async_flag))
    if async_flag:
        cap.stop()
    cv2.destroyAllWindows()
class CameraSource(object):
  def __init__(self, cameraSource, height, output_file=None, startFrame=0,
               async_read=False, outputToServer=False, capture_size=None):
    if async_read:
      self.camera = VideoCaptureAsync(cameraSource)
    else:
      self.camera = cv2.VideoCapture(cameraSource)

    if capture_size is not None:
      print(capture_size)
      self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, capture_size[0])
      self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, capture_size[1])

    if async_read:
      self.ORIGINAL_WIDTH = self.camera.width
      self.ORIGINAL_HEIGHT = self.camera.height
    else:
      self.ORIGINAL_WIDTH = int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
      self.ORIGINAL_HEIGHT = int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

    print('CameraSource')
    print('Requested capture size', capture_size)
    print('Actual capture size', self.ORIGINAL_WIDTH, self.ORIGINAL_HEIGHT)

    self.HEIGHT = height
    self.WIDTH = self.ORIGINAL_WIDTH * self.HEIGHT // self.ORIGINAL_HEIGHT
    self.WIDTH = self.WIDTH + self.WIDTH % 2  # Make it even.

    self.startFrame = startFrame
    self.nFrames = 0
    self.writer = VideoWriter(output_file)

    if async_read:
      self.camera.start()

    self.outputToServer = outputToServer
    if outputToServer:
      # https://robotpy.readthedocs.io/en/stable/vision/code.html
      pass
      #self.outputStream = CameraServer.getInstance().putVideo(
      #    'ProcessedVisionFrame', self.WIDTH, self.HEIGHT)


  def GetFrame(self):  
    # Processing on first call. 
    if self.nFrames == 0:
      # Skip some frames if requested.
      if self.startFrame > 0:
        skippedFrames = 0
        while True:
          ret, frame = self.camera.read()
          if not ret or frame is None:
            print('No more frames')
            return None
          skippedFrames += 1
          if skippedFrames >= self.startFrame:
            break
      # Start timer for first frame.
      self.startTime = time.time()

    # Get frame.
    frame = None
    frameTime = time.time() 
    if self.nFrames > 0 and self.nFrames % 50 == 0:
      print('FPS: ', self.nFrames / (frameTime - self.startTime))
    self.nFrames += 1
    ret, frame = self.camera.read()
    if ret and frame is not None:
      if frame.shape[0] != self.HEIGHT:
        frame = cv2.resize(frame, (self.WIDTH, self.HEIGHT))
    return frame


  def ImageSize(self):
    return (self.WIDTH, self.HEIGHT)


  def OutputFrameAndTestContinue(self, message, frame, height=None):
    self.writer.OutputFrame(frame)
    if self.outputToServer:
      self.outputStream.putFrame(frame)
    return ShowFrameAndTestContinue(message, frame, height)


  def __del__(self):
    self.camera.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
def test(n_frames=500, width=1280, height=720):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()

        cv2.imshow('Frame1', frame1)
        cv2.imshow('Frame2', frame2)
        cv2.imshow('Frame3', frame3)
        cv2.imshow('Frame4', frame4)

        cv2.waitKey(1) & 0xFF

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
Esempio n. 6
0
class Recognition():

    #parametros globales para la segunda ventana
    def distance(self, accuracy, name):
        #pasar dos encodings procesa el nivel de accuracy de cada uno y devuelve un loading bar
        load = accuracy * 270
        color = (0, 0, 255)
        image = np.zeros((40, 300, 3), np.uint8)
        cv2.rectangle(image, (0, 0), (300, 50), (255, 255, 255), cv2.FILLED)
        cv2.putText(image, name, (10, 15), cv2.FONT_HERSHEY_DUPLEX, 0.5,
                    (125, 125, 0), 1)
        cv2.rectangle(image, (10, 20), (int(load) + 15, 30), color, cv2.FILLED)
        return image

    def record_date_hour(self, name):
        #insert where name date
        date = strftime("%Y/%m/%d", gmtime())
        hour = strftime("%H:%M:%S", gmtime())
        try:
            connection = psycopg2.connect(
                "dbname=registros user=reddy password=123456 port=5432 host=localhost port=5432"
            )
        except:
            print("conexion exito")
        cursor = connection.cursor()
        postgres_insert_query = """ INSERT INTO deteccion (name, fecha, hora) VALUES (%s, %s, %s)"""

        fecha = "'" + date + "'"
        hora = "'" + hour + "'"
        record_to_insert = (name, fecha, hora)
        cursor.execute(postgres_insert_query, record_to_insert)
        connection.commit()
        cursor.close()
        connection.close()

    def dahua(self, name, actual_img, accuracy):
        path = "knowFaces/" + name.lower() + ".png"
        db_img = cv2.imread(path)
        db_img = cv2.resize(db_img, (150, 150), interpolation=cv2.INTER_CUBIC)
        un_img = np.concatenate((db_img, actual_img), axis=1)
        un_img = np.concatenate((un_img, self.distance(accuracy, name)),
                                axis=0)
        self.record_date_hour(name)
        if (self.first):
            self.first = False
            cv2.imshow("Board", un_img)
        else:
            final = np.concatenate((un_img, self.pastUnion), axis=0)
            cv2.imshow("Board", final)
        self.pastUnion = un_img

        return

    def face_enc(self, face_image, known_face_locations=None, num_jitters=1):
        """
        Given an image, return the 128-dimension face encoding for each face in the image.
        :param face_image: The image that contains one or more faces
        :param known_face_locations: Optional - the bounding boxes of each face if you already know them.
        :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
        :return: A list of 128-dimensional face encodings (one for each face in the image)
        """
        raw_landmarks = face_recognition.api._raw_face_landmarks(
            face_image, known_face_locations)
        return [
            np.array(
                face_encoder.compute_face_descriptor(face_image,
                                                     raw_landmark_set,
                                                     num_jitters))
            for raw_landmark_set in raw_landmarks
        ]

    def getEncodingFaces(self, know_persons):
        i = 1
        for imag in know_persons:
            image = face_recognition.load_image_file(imag.getImgSrc())
            #self.faces_encoding.append(face_recognition.face_encodings(image, num_jitters=100)[0])
            self.faces_encoding.append(
                self.face_enc(image, num_jitters=100)[0])

            self.face_names.append(imag.getNombre())
            i = i + 1
        return self.faces_encoding, self.face_names

    def __init__(self):
        self.pastUnion = 2
        self.first = True
        self.path = "knowFaces/reddy.png"
        self.db_img = cv2.imread(self.path)
        self.db_img = cv2.resize(self.db_img, (150, 150),
                                 interpolation=cv2.INTER_CUBIC)

        self.cap = VideoCaptureAsync()
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
        self.cap.start()

        frame_width = 1280
        frame_height = 720

        # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
        self.out = cv2.VideoWriter('outpy.avi',
                                   cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                   10, (1280, 720))

        self.face_record1 = "nadies"
        self.nombres = {}
        self.first = True
        self.accuracy = 2

        self.know_persons = getKnowPersonsFromDB()
        self.faces_encoding = []
        self.face_names = []
        self.known_face_encodings, self.known_face_names = self.getEncodingFaces(
            self.know_persons)
        self.face_locations = []
        self.face_encodings = []
        self.face_names = []
        self.process_this_frame = True

        while True:
            ret, frame = self.cap.read()
            small_frame = cv2.resize(frame, (0, 0), fx=1, fy=1)
            #mitad de la calidad
            rgb_small_frame = small_frame[:, :, ::-1]
            if self.process_this_frame:
                self.face_locations = face_recognition.face_locations(
                    rgb_small_frame, model="cnn")  #, model ="cnn")
                self.face_encodings = self.face_enc(rgb_small_frame,
                                                    self.face_locations,
                                                    num_jitters=100)
                self.face_names = []
                self.face_values = []

                for face_encoding in self.face_encodings:
                    self.matches = face_recognition.compare_faces(
                        self.known_face_encodings,
                        face_encoding,
                        tolerance=0.30)
                    self.name = "Unknown"
                    self.values = np.linalg.norm(self.known_face_encodings -
                                                 face_encoding,
                                                 axis=1)

                    if True in self.matches:
                        first_match_index = self.matches.index(True)
                        self.accuracy = self.values[
                            first_match_index]  #get the accuracy
                        self.name = self.known_face_names[first_match_index]
                    self.face_names.append(self.name)
                    self.face_values.append(self.accuracy)  #gui

            self.process_this_frame = not self.process_this_frame

            tratar = False
            for (top, right, bottom,
                 left), name, acc in zip(self.face_locations, self.face_names,
                                         self.face_values):
                """top *= 2
                right *= 2
                bottom *= 2
                left *= 2"""
                collight = (123, 123, 123)
                actual_img = cv2.resize(frame[top:bottom, left:right],
                                        (150, 150),
                                        interpolation=cv2.INTER_CUBIC)  #gui
                cv2.rectangle(frame, (left, top), (right, bottom), collight,
                              1)  #face bordes
                ##calcular el tamaño entre top y left
                vertical = bottom - top
                horizontal = right - left
                #draw contorns
                r = right
                l = left

                t = top
                b = bottom

                alpha = vertical / 4
                alpha = int(alpha)
                betha = 2 * alpha
                if (name == "Unknown"):
                    col = (255, 255, 255)

                else:
                    col = (241, 175, 14)
                cv2.line(frame, (l, t), (l, t + alpha), col, 2)
                cv2.line(frame, (l, t), (l + alpha, t), col, 2)

                cv2.line(frame, (r, t), (r - alpha, t), col, 2)
                cv2.line(frame, (r, t), (r, t + alpha), col, 2)

                cv2.line(frame, (l, b), (l + alpha, b), col, 2)
                cv2.line(frame, (l, b), (l, b - alpha), col, 2)

                cv2.line(frame, (r, b), (r - alpha, b), col, 2)
                cv2.line(frame, (r, b), (r, b - alpha), col, 2)

                alpha = 10
                cv2.line(frame, (l - alpha, t + betha), (l + alpha, t + betha),
                         collight, 1)
                cv2.line(frame, (r - alpha, t + betha), (r + alpha, t + betha),
                         collight, 1)
                cv2.line(frame, (l + betha, t - alpha), (l + betha, t + alpha),
                         collight, 1)
                cv2.line(frame, (l + betha, b - alpha), (l + betha, b + alpha),
                         collight, 1)

                #print("vertical: ", vertical)
                #print("horizontal: ", horizontal)

                #cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (123, 123, 123), cv2.FILLED) #space for name
                cv2.putText(frame, name, (left + 6, bottom - 6),
                            cv2.FONT_HERSHEY_DUPLEX, 0.70, (255, 255, 0), 1)
                print("nombres: ", self.nombres)

                try:
                    if (self.nombres[self.name] >= 1):
                        self.nombres[self.name] += 1
                    else:
                        self.nombres[self.name] = 1

                except:
                    print("entrando excepcion")
                    self.nombres[self.name] = 1

                if (name != "Unknown" and self.nombres[self.name] == 7):
                    if (self.face_record1 != self.name):
                        self.dahua(
                            self.name, actual_img,
                            acc)  #causa 50fps con 0.02 y el mas bajo 0.001
                        self.face_record1 = name
                    self.nombres[self.name] = 1
            #self.out.write(frame)
            cv2.imshow('Video', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        self.cap.stop()
        print(self.out.release())
        #out.release()
        cv2.destroyAllWindows()
Esempio n. 7
0
    parser.add_argument(
        '-c',
        '--colormode',
        type=str,
        default="bgr",
        help="RGB vs BGR color sequence. This is network dependent.")

    ARGS = parser.parse_args()

    # Create a VideoCapture object
    # camera = cv2.VideoCapture( ARGS.video )
    camera_1 = VideoCaptureAsync("rtsp://192.168.1.10/1")
    camera_2 = VideoCaptureAsync("rtsp://192.168.1.10/2")

    # Set camera resolution
    # camera.set( cv2.CAP_PROP_FRAME_WIDTH, 352 )
    # camera.set( cv2.CAP_PROP_FRAME_HEIGHT, 288 )
    camera_1.set(cv2.CAP_PROP_FRAME_WIDTH, 352)
    camera_1.set(cv2.CAP_PROP_FRAME_HEIGHT, 288)
    camera_2.set(cv2.CAP_PROP_FRAME_WIDTH, 352)
    camera_2.set(cv2.CAP_PROP_FRAME_HEIGHT, 288)

    # Load the labels file
    labels = [
        line.rstrip('\n') for line in open(ARGS.labels) if line != 'classes\n'
    ]

    main()

# ==== End of file ===========================================================
Esempio n. 8
0
def test(width, height):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    alpha = 90
    beta = 90
    gamma = 90
    focalLength = 500
    dist = 500
    move_upper = 0
    move_left = 0
    move_right = 0
    move_lower = 0
    move_together = 0
    cv2.namedWindow('tune_upper')
    cv2.namedWindow('tune_left')
    cv2.namedWindow('tune_right')
    cv2.namedWindow('tune_lower')
    cv2.namedWindow('move')

    cv2.createTrackbar('Alpha', 'tune_upper', 60, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_upper', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_upper', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_left', 70, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_left', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_left', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_right', 110, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_right', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_right', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_lower', 60, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_lower', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_lower', 500, 2000, nothing)

    cv2.createTrackbar('Move upper', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move left', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move right', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move lower', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move together', 'move', 200, 400, nothing)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    h = 480
    w = 720
    vis = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_center = np.zeros(((w + h * 2), h, 3), np.uint8)
    vis_left = np.zeros(((w + h * 2), h, 3), np.uint8)
    vis_right = np.zeros(((w + h * 2), h, 3), np.uint8)
    void = np.zeros((w, w, 3), np.uint8)
    void_side = np.zeros((h, h, 3), np.uint8)

    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()

        frame11 = undistort(frame1)
        frame22 = undistort(frame2)
        frame33 = undistort(frame3)
        frame44 = undistort(frame4)

        alpha_upper = cv2.getTrackbarPos('Alpha', 'tune_upper')
        beta_upper = cv2.getTrackbarPos('Beta', 'tune_upper')
        gamma_upper = cv2.getTrackbarPos('Gamma', 'tune_upper')
        focalLength_upper = cv2.getTrackbarPos('f', 'tune_upper')
        dist_upper = cv2.getTrackbarPos('Distance', 'tune_upper')

        alpha_left = cv2.getTrackbarPos('Alpha', 'tune_left')
        beta_left = cv2.getTrackbarPos('Beta', 'tune_left')
        gamma_left = cv2.getTrackbarPos('Gamma', 'tune_left')
        focalLength_left = cv2.getTrackbarPos('f', 'tune_left')
        dist_left = cv2.getTrackbarPos('Distance', 'tune_left')

        alpha_right = cv2.getTrackbarPos('Alpha', 'tune_right')
        beta_right = cv2.getTrackbarPos('Beta', 'tune_right')
        gamma_right = cv2.getTrackbarPos('Gamma', 'tune_right')
        focalLength_right = cv2.getTrackbarPos('f', 'tune_right')
        dist_right = cv2.getTrackbarPos('Distance', 'tune_right')

        alpha_lower = cv2.getTrackbarPos('Alpha', 'tune_lower')
        beta_lower = cv2.getTrackbarPos('Beta', 'tune_lower')
        gamma_lower = cv2.getTrackbarPos('Gamma', 'tune_lower')
        focalLength_lower = cv2.getTrackbarPos('f', 'tune_lower')
        dist_lower = cv2.getTrackbarPos('Distance', 'tune_lower')

        move_upper = cv2.getTrackbarPos('Move upper', 'move')
        move_left = cv2.getTrackbarPos('Move left', 'move')
        move_right = cv2.getTrackbarPos('Move right', 'move')
        move_lower = cv2.getTrackbarPos('Move lower', 'move')
        move_together = cv2.getTrackbarPos('Move together', 'move')

        alpha_upper = (alpha_upper - 90) * math.pi / 180
        beta_upper = (beta_upper - 90) * math.pi / 180
        gamma_upper = (gamma_upper - 90) * math.pi / 180

        alpha_left = (alpha_left - 90) * math.pi / 180
        beta_left = (beta_left - 90) * math.pi / 180
        gamma_left = (gamma_left - 90) * math.pi / 180

        alpha_right = (alpha_right - 90) * math.pi / 180
        beta_right = (beta_right - 90) * math.pi / 180
        gamma_right = (gamma_right - 90) * math.pi / 180

        alpha_lower = (alpha_lower - 90) * math.pi / 180
        beta_lower = (beta_lower - 90) * math.pi / 180
        gamma_lower = (gamma_lower - 90) * math.pi / 180

        #rotation_mat = cv2.getRotationMatrix2D((360,240), 180, 1)
        #print("Transformation: ", Transformation.shape)
        #print("Rotation: ", rotation_mat.shape)

        transformation_upper = get_transformation(w, h, alpha_upper,
                                                  beta_upper, gamma_upper,
                                                  dist_upper,
                                                  focalLength_upper)
        transformation_left = get_transformation(h, w, alpha_left, beta_left,
                                                 gamma_left, dist_left,
                                                 focalLength_left)
        transformation_right = get_transformation(h, w, alpha_right,
                                                  beta_right, gamma_right,
                                                  dist_right,
                                                  focalLength_right)
        transformation_lower = get_transformation(w, h, alpha_lower,
                                                  beta_lower, gamma_lower,
                                                  dist_lower,
                                                  focalLength_lower)

        left = rotate_bound(frame33, 270)
        right = rotate_bound(frame22, 90)

        result_upper = cv2.warpPerspective(frame11, transformation_upper,
                                           (720, 480))
        result_left = cv2.warpPerspective(left, transformation_left,
                                          (480, 720))
        result_right = cv2.warpPerspective(right, transformation_right,
                                           (480, 720))
        result_lower = cv2.warpPerspective(frame44, transformation_lower,
                                           (720, 480))

        #cv2.imshow('Frame1', frame1)
        #cv2.imshow('Frame11', frame11)
        #cv2.imshow('Frame2', frame2)
        #cv2.imshow('Frame22', frame22)
        #cv2.imshow('Frame3', frame3)
        #cv2.imshow('Frame33', frame33)
        #cv2.imshow('Frame4', frame4)
        #cv2.imshow('Frame44', frame44)
        #left = rotate_bound(result_left, 270)
        #right = rotate_bound(result_right, 90)
        result_lower = cv2.flip(result_lower, 0)
        #cv2.imshow('left', left)
        #cv2.imshow('right',right)
        #vis_center = np.concatenate((result,void,frame44),axis=0)
        #vis_right = np.concatenate((void_side,right,void_side),axis=0)
        #vis_left = np.concatenate((void_side,left,void_side),axis=0)
        #vis = np.concatenate((vis_left,vis_center,vis_right),axis=1)
        vis[move_upper + move_together:result_upper.shape[0] + move_upper +
            move_together,
            h - (result_upper.shape[1] - 720) / 2:result_upper.shape[1] + h -
            (result_upper.shape[1] - 720) / 2, :] = result_upper
        vis[h:result_left.shape[0] + h,
            move_left + move_together:result_left.shape[1] + move_left +
            move_together:] += result_left
        vis[h:result_right.shape[0] + h,
            h + w - move_right - move_together:result_right.shape[1] -
            move_right - move_together + w + h, :] += result_right
        vis[h + w - move_lower - move_together:result_lower.shape[0] -
            move_lower - move_together + w + h,
            h:result_lower.shape[1] + h, :] += result_lower
        height, width = vis.shape[:2]
        res = cv2.resize(vis, (width * 4 / 7, height * 4 / 7),
                         interpolation=cv2.INTER_CUBIC)
        #cv2.imshow('combined', vis_center)
        #cv2.imshow('combined_left', vis_left)
        #cv2.imshow('combined_right', vis_right)
        cv2.imshow('vis', res)
        vis = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
        if cv2.waitKey(1) == 27:
            cv2.destroyAllWindows()
            break

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
    print("FirebaseAction Done")


#face_rec_training_data_file = 'training_data_yml/face_rec_training_data_vid1.yml'
face_rec_training_data_file = 'training_data_yml/face_rec_training_data1.yml'

face_recognizer = cv2.face.LBPHFaceRecognizer_create()  #LBPH face recognizer
face_recognizer.read(face_rec_training_data_file)


def start_recogniz(n_frames=500, width=1280, height=720, async=False):
    if async:
        cap = VideoCaptureAsync(0)
    else:
        cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    if async:
        cap.start()
    t0 = time.time()
    i = 0

    while i < n_frames:
        ret, frame = cap.read()

        faces = detect_multiple_o_face(frame)
        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            detect_flg = 1
            ts = time.gmtime()
            ptimestamp = time.strftime("%s", ts)
Esempio n. 10
0
def test(n_frames=500, width=1280, height=720):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix,
                                                      dist_coefs, (720, 480),
                                                      1, (720, 480))
    x, y, w, h = roi
    #M = cv2.getRotationMatrix2D((720,480),5,2)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()
        frame11 = cv2.undistort(frame1, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame11 = frame11[130:390, 110:550]
        frame22 = cv2.undistort(frame2, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame22 = frame22[130:390, 110:550]
        frame33 = cv2.undistort(frame3, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame33 = frame33[130:390, 110:550]
        frame44 = cv2.undistort(frame4, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame44 = frame44[130:390, 110:550]
        cv2.imshow('Frame1', frame1)
        cv2.imshow('Frame11', frame11)
        cv2.imshow('Frame2', frame2)
        cv2.imshow('Frame22', frame22)
        cv2.imshow('Frame3', frame3)
        cv2.imshow('Frame33', frame33)
        cv2.imshow('Frame4', frame4)
        cv2.imshow('Frame44', frame44)

        if cv2.waitKey(1) == 27:
            cv2.destroyAllWindows()
            break

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
Esempio n. 11
0
                        type=float,
                        help='input standard deviation')

    args = parser.parse_args()

    import tensorflow as tf
    interpreter = tf.lite.Interpreter(model_path=args.model)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # setup access to the *real* webcam
    print('Opening webcam', args.input, '...')
    #cap = cv2.VideoCapture(args.input)
    cap = VideoCaptureAsync(args.input, args.width, args.height)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
    cap.set(cv2.CAP_PROP_FPS, 25)

    # setup the fake camera
    fake = None
    if args.output != "imshow":
        print('Writing to loopback device', args.output, '...')
        fake = pyfakewebcam.FakeWebcam(args.output, args.width, args.height)

    # load the virtual background
    background_index = 0
    background = None
    bg_cap = None

    import glob
Esempio n. 12
0
def test(width, height):

    cap1 = VideoCaptureAsync(
        'udpsrc port=50003 caps = "application/x-rtp,media=(string)video,payload=(int)26,clock-rate=(int)90000,encoding-name=(string)JPEG" ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink '
    )
    cap2 = VideoCaptureAsync(
        'udpsrc port=50004 caps = "application/x-rtp,media=(string)video,payload=(int)26,clock-rate=(int)90000,encoding-name=(string)JPEG" ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink '
    )
    cap3 = VideoCaptureAsync(
        'udpsrc port=50005 caps = "application/x-rtp,media=(string)video,payload=(int)26,clock-rate=(int)90000,encoding-name=(string)JPEG" ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink '
    )
    cap4 = VideoCaptureAsync(
        'udpsrc port=50006 caps = "application/x-rtp,media=(string)video,payload=(int)26,clock-rate=(int)90000,encoding-name=(string)JPEG" ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink '
    )

    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    alpha = 90
    beta = 90
    gamma = 90
    focalLength = 500
    dist = 500
    move_upper = 0
    move_left = 0
    move_right = 0
    move_lower = 0
    move_together = 0
    cv2.namedWindow('tune_upper')
    cv2.namedWindow('tune_left')
    cv2.namedWindow('tune_right')
    cv2.namedWindow('tune_lower')
    cv2.namedWindow('move')

    cv2.createTrackbar('Alpha', 'tune_upper', 97, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_upper', 279, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_upper', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_left', 106, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_left', 279, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_left', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_right', 105, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_right', 279, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_right', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_lower', 106, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_lower', 279, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_lower', 500, 2000, nothing)

    cv2.createTrackbar('Move upper', 'move', 171, 400, nothing)
    cv2.createTrackbar('Move left', 'move', 162, 400, nothing)
    cv2.createTrackbar('Move right', 'move', 103, 400, nothing)
    cv2.createTrackbar('Move lower', 'move', 165, 400, nothing)
    cv2.createTrackbar('Move together', 'move', 645, 700, nothing)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    h = 720
    w = 1280
    vis = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_upper = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_left = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_right = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_lower = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    void = np.zeros((w, w, 3), np.uint8)
    void_side = np.zeros((h, h, 3), np.uint8)

    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()

        frame11 = undistort(frame1)
        frame22 = undistort(frame2)
        frame33 = undistort(frame3)
        frame44 = undistort(frame4)

        alpha_upper = cv2.getTrackbarPos('Alpha', 'tune_upper')
        beta_upper = cv2.getTrackbarPos('Beta', 'tune_upper')
        gamma_upper = cv2.getTrackbarPos('Gamma', 'tune_upper')
        focalLength_upper = cv2.getTrackbarPos('f', 'tune_upper')
        dist_upper = cv2.getTrackbarPos('Distance', 'tune_upper')

        alpha_left = cv2.getTrackbarPos('Alpha', 'tune_left')
        beta_left = cv2.getTrackbarPos('Beta', 'tune_left')
        gamma_left = cv2.getTrackbarPos('Gamma', 'tune_left')
        focalLength_left = cv2.getTrackbarPos('f', 'tune_left')
        dist_left = cv2.getTrackbarPos('Distance', 'tune_left')

        alpha_right = cv2.getTrackbarPos('Alpha', 'tune_right')
        beta_right = cv2.getTrackbarPos('Beta', 'tune_right')
        gamma_right = cv2.getTrackbarPos('Gamma', 'tune_right')
        focalLength_right = cv2.getTrackbarPos('f', 'tune_right')
        dist_right = cv2.getTrackbarPos('Distance', 'tune_right')

        alpha_lower = cv2.getTrackbarPos('Alpha', 'tune_lower')
        beta_lower = cv2.getTrackbarPos('Beta', 'tune_lower')
        gamma_lower = cv2.getTrackbarPos('Gamma', 'tune_lower')
        focalLength_lower = cv2.getTrackbarPos('f', 'tune_lower')
        dist_lower = cv2.getTrackbarPos('Distance', 'tune_lower')

        move_upper = cv2.getTrackbarPos('Move upper', 'move')
        move_left = cv2.getTrackbarPos('Move left', 'move')
        move_right = cv2.getTrackbarPos('Move right', 'move')
        move_lower = cv2.getTrackbarPos('Move lower', 'move')
        move_together = cv2.getTrackbarPos('Move together', 'move')

        alpha_upper = (alpha_upper - 90) * math.pi / 180
        beta_upper = (beta_upper - 90) * math.pi / 180
        gamma_upper = (gamma_upper - 90) * math.pi / 180

        alpha_left = (alpha_left - 90) * math.pi / 180
        beta_left = (beta_left - 90) * math.pi / 180
        gamma_left = (gamma_left - 90) * math.pi / 180

        alpha_right = (alpha_right - 90) * math.pi / 180
        beta_right = (beta_right - 90) * math.pi / 180
        gamma_right = (gamma_right - 90) * math.pi / 180

        alpha_lower = (alpha_lower - 90) * math.pi / 180
        beta_lower = (beta_lower - 90) * math.pi / 180
        gamma_lower = (gamma_lower - 90) * math.pi / 180

        transformation_upper = get_transformation(w, h, alpha_upper,
                                                  beta_upper, gamma_upper,
                                                  dist_upper,
                                                  focalLength_upper)
        transformation_left = get_transformation(w, h, alpha_left, beta_left,
                                                 gamma_left, dist_left,
                                                 focalLength_left)
        transformation_right = get_transformation(w, h, alpha_right,
                                                  beta_right, gamma_right,
                                                  dist_right,
                                                  focalLength_right)
        transformation_lower = get_transformation(w, h, alpha_lower,
                                                  beta_lower, gamma_lower,
                                                  dist_lower,
                                                  focalLength_lower)

        result_upper = cv2.warpPerspective(frame11,
                                           transformation_upper, (w, h),
                                           flags=cv2.INTER_NEAREST)
        result_left = cv2.warpPerspective(frame33,
                                          transformation_left, (w, h),
                                          flags=cv2.INTER_NEAREST)
        result_right = cv2.warpPerspective(frame22,
                                           transformation_right, (w, h),
                                           flags=cv2.INTER_NEAREST)
        result_lower = cv2.warpPerspective(frame44,
                                           transformation_lower, (w, h),
                                           flags=cv2.INTER_NEAREST)

        result_left = rotate_bound(result_left, 270)
        result_right = rotate_bound(result_right, 90)
        result_lower = cv2.flip(result_lower, 0)
        result_lower = cv2.flip(result_lower, 1)

        vis_upper[move_upper + move_together:(int)(result_upper.shape[0] +
                                                   move_upper + move_together),
                  (int)(h - (result_upper.shape[1] - 1280) / 2):
                  (int)(result_upper.shape[1] + h -
                        (result_upper.shape[1] - 1280) / 2), :] = result_upper
        vis_left[h:result_left.shape[0] + h,
                 move_left + move_together:result_left.shape[1] + move_left +
                 move_together, :] = result_left
        vis_right[h:result_right.shape[0] + h,
                  h + w - move_right - move_together:result_right.shape[1] -
                  move_right - move_together + w + h, :] = result_right
        vis_lower[h + w - move_lower - move_together:result_lower.shape[0] -
                  move_lower - move_together + w + h,
                  h:result_lower.shape[1] + h, :] = result_lower

        height, width = vis.shape[:2]
        vis_res1 = cv2.bitwise_or(vis_lower, vis_upper)
        vis_res2 = cv2.bitwise_or(vis_left, vis_right)
        vis_res = cv2.bitwise_or(vis_res1, vis_res2)
        vis_res3 = cv2.resize(vis_res,
                              ((int)(width * 4 / 7), (int)(height * 4 / 7)),
                              interpolation=cv2.INTER_NEAREST)
        cv2.imshow('vis_res', vis_res3)
        vis_res = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
        if cv2.waitKey(1) == 27:
            cv2.destroyAllWindows()
            break

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()