Exemplo n.º 1
0
def main(argv):
    global isNewPoint
    global newPoint

    img = cv2.imread("Penguins.jpg")
    img = u.resize(img)

    winName="watershed Testes"
    imgPrev = np.copy(img)
    cv2.namedWindow(winName)

    cv2.imshow(winName, imgPrev)
    cv2.setMouseCallback(winName, onMouse)

    op='f'
    fgRect=[]
    bgRect=[]
    while op!='p' and op!='P':
        op = cv2.waitKey(50)
        if isNewPoint == 1:
            isNewPoint=0
            if op == 'f' or op == 'F':
                fgRect+=[newPoint]
            elif op == 'f' or op == '':
                pass

    cv2.destroyAllWindows()
 def update(self, dt):
     ret, frame = self.capture.read()
     if frame is None:
         sys.exit()
     frame = utilities.resize(frame, 480, 480)
     # convert it to texture
     buf1 = cv2.flip(frame, 0)
     buf = buf1.tostring()
     image_texture = Texture.create(
         size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
     image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
     # display image from the texture
     self.texture = image_texture
Exemplo n.º 3
0
 def test_resize(self):
   image = decode_base64_image(TEST_BINARY_DATA)
   img = images.Image(resize(image))
   self.assertEqual(img.width, 200)
   self.assertEqual(int(img.height), 170)
import imutils
from queue import Queue

firstFrame = None
consecFrames = 0
kcw = KeyClipWriter(bufSize=128)
print("[INFO] Camera is warming up... Please Wait...")

# Use threads to read from camera
cap = VideoStream(src=0).start()
time.sleep(3)

fourcc = cv2.VideoWriter_fourcc(*'MJPG')
while True:
    frame = cap.read()
    frame = utilities.resize(frame, width=500)
    updateConsecFrame = True
    frame = cv2.flip(frame, 1)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)
    if firstFrame is None:
        firstFrame = gray
        cv2.imshow("First Frame", firstFrame)
        continue
    cv2.imshow("Original Video", frame)
    difference = cv2.absdiff(gray, firstFrame)
    thresh = cv2.threshold(difference, 20, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(contours)
    def frames():
        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(PTOTO_PATH, CAFFE_PATH)
        camera = cv2.VideoCapture(Camera.video_source, cv2.CAP_DSHOW)
        kcw = KeyClipWriter(bufSize=128)
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        consecFrames = 0
        md = MotionDetector(accumWeight=0.4)
        COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
        total = 0
        frameCount = 16
        frame_rate = 16
        # previous_time = 0
        if not camera.isOpened():
            raise RuntimeError('Could not start camera.')

        while True:
            # print("Name of video file from original function: ", Camera.name_of_videoFile)
            updateConsecFrame = True
            # read current frame
            # time_elapsed = time() - previous_time
            _, frame = camera.read()
            # if time_elapsed > 1./frame_rate:
            #     previous_time = time()
            # fps = FPS().start()
            (h, w) = frame.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
            frame = cv2.flip(frame, 1)
            frame = utilities.resize(frame,width=350)
            frame_for_video = frame.copy()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (7, 7), 0)
            
            if total > frameCount:
                motion = md.detect(gray)
                if motion is not None:
                    (thresh, (minX, minY, maxX, maxY)) = motion
                    cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2)
                    net.setInput(blob)
                    detections = net.forward()
                    consecFrames = 0
                    for i in np.arange(0, detections.shape[2]):
                        confidence = detections[0, 0, i, 2]
                        if confidence > MIN_CONFIDENCE:
                            idx = int(detections[0, 0, i, 1])
                            if CLASSES[idx] in IGNORE:
                                continue
                            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                            (startX, startY, endX, endY) = box.astype("int")
                            label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                            cv2.rectangle(frame, (startX, startY), (endX, endY),
                                COLORS[idx], 2)
                            y = startY - 15 if startY - 15 > 15 else startY + 15
                            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                    if not kcw.recording:
                        timestamp = datetime.datetime.now()
                        name_of_videoFile = "{}-{}.avi".format("output",timestamp.strftime("%Y-%m-%d-%H-%M-%S"))
                        kcw.start(name_of_videoFile, fourcc, 30)
            if updateConsecFrame:
                consecFrames += 1
                # # update the key frame clip buffer
                kcw.update(frame_for_video)
                # if we are recording and reached a threshold on consecutive
                # number of frames with no action, stop recording the clip
                if kcw.recording and consecFrames == 64:
                    print("Name from Original Function: ", name_of_videoFile)
                    subprocess.run(['python', 'print_test.py', name_of_videoFile])
                    kcw.finish()
                # encode as a jpeg image and return it
            md.update(gray)
            total += 1
            cv2.waitKey(10)
            # fps.update()
            yield cv2.imencode('.jpg', frame)[1].tobytes()
            
def getdata():
    trainsize = 5000
    testsize = 1000
    folder = ""

    # read training labels
    with open(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +
            "\dataset\\" + "train-labels.idx1-ubyte", "rb") as f:
        data = f.read(4)  # magic number
        data = f.read(4)  # number of samples
        training_samples = struct.unpack('>HH', data)[1]
        traininglabels = np.empty(trainsize)
        for i in range(0, trainsize):
            data = f.read(1)
            traininglabels[i] = int(struct.unpack('>B', data)[0])

    # read training images
    with open(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +
            "\dataset\\" + "train-images.idx3-ubyte", "rb") as f:
        data = f.read(4)  # magic number
        data = f.read(4)  # number of samples
        trainingsample = np.zeros(784)
        trainingsamples = np.empty((trainsize, 196))
        data = f.read(4)  # y dimension
        data = f.read(4)  # x dimension
        for i in range(0, trainsize):
            for j in range(0, 784):
                data = f.read(1)
                trainingsample[j] = util.round(struct.unpack('>B', data)[0])
            trainingsamples[i] = util.subsample(util.resize(trainingsample),
                                                14)

    # read test labels
    with open(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +
            "\dataset\\" + "t10k-labels.idx1-ubyte", "rb") as f:
        data = f.read(4)  # magic number
        data = f.read(4)  # number of samples
        test_samples = struct.unpack('>HH', data)[1]
        testlabels = np.empty(testsize)
        for i in range(0, testsize):
            data = f.read(1)
            testlabels[i] = int(struct.unpack('>B', data)[0])

    # read test images
    with open(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +
            "\dataset\\" + "t10k-images.idx3-ubyte", "rb") as f:
        data = f.read(4)  # magic number
        data = f.read(4)  # number of samples
        testsample = np.zeros(784)
        testsamples = np.empty((testsize, 196))
        data = f.read(4)  # y dimension
        data = f.read(4)  # x dimension
        for i in range(0, testsize):
            for j in range(0, 784):
                data = f.read(1)
                testsample[j] = util.round(struct.unpack('>B', data)[0])
            testsamples[i] = util.subsample(util.resize(testsample), 14)

        y_train = traininglabels
    X_train = trainingsamples
    y_test = testlabels
    X_test = testsamples
    return X_train, X_test, y_train, y_test
    def update(self, dt):
        ret, frame = self.capture.read()
        # if frame is None:
        #     print("[Info] Frame not Read... Leaving....")
        #     sys.exit()
        if ret:
            frame = utilities.resize(frame, 480, 480)
            # convert it to texture
            (h, w) = frame.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                (300, 300), (104.0, 177.0, 123.0))

            # pass the blob through the network and obtain the detections and
            # predictions
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in range(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated with the
                # prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections
                if confidence > 0.85:
                    # compute the (x, y)-coordinates of the bounding box for
                    # the face and extract the face ROI
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    # ensure the detected bounding box does fall outside the
                    # dimensions of the frame
                    startX = max(0, startX)
                    startY = max(0, startY)
                    endX = min(w, endX)
                    endY = min(h, endY)

                    # extract the face ROI and then preproces it in the exact
                    # same manner as our training data
                    face = frame[startY:endY, startX:endX]
                    if len(face) == 0:
                        continue
                    face = cv2.resize(face, (32, 32))
                    face = face.astype("float") / 255.0
                    face = img_to_array(face)
                    face = np.expand_dims(face, axis=0)

                    # pass the face ROI through the trained liveness detector
                    # model to determine if the face is "real" or "fake"
                    preds = model.predict(face)[0]
                    j = np.argmax(preds)
                    label = le.classes_[j]
                    if label == "Real":
                        rect_color = (255, 128, 255)
                        """
                        # Important Note
                        # Recognition Runs here
                        # Do not forgot to import face recognition
                        username = RecognizeMe(frame)
                        if username != "unknown":
                            # following lines should be shifted to inside this if block
                            # Then comment/ remove line username = '******'
                        """
                        ev = OnFaceRecognition()
                        ev.bind(on_recognize=custom_event_callback)
                        username = '******'
                        ev.goHome('Real', username)
                        self.capture.release()
                    else:
                        rect_color = (0, 0, 255)
                    # draw the label and bounding box on the frame
                    label = "{}: {:.2f}".format(label, preds[j])
                    cv2.rectangle(frame, (startX - 1 , startY - 20), (startX - int((startX - endX) / 1.5), startY), 
                        rect_color, -1)
                    cv2.putText(frame, label, (startX, startY - 5),
                        cv2.FONT_HERSHEY_PLAIN, 0.80, (255, 255, 255), 1, cv2.LINE_AA)
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                        rect_color, 2)

            buf1 = frame.copy()
            buf1 = cv2.flip(buf1, 0)
            buf = buf1.tostring()
            image_texture = Texture.create(
                size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
            image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
            # display image from the texture
            self.texture = image_texture
    def frames():
        camera = cv2.VideoCapture(Camera.video_source)
        if not camera.isOpened():
            raise RuntimeError('Could not start camera.')

        while True:
            # read current frame
            _, frame = camera.read()
            if frame is None:
                from app import app
                with app.app_context():
                    # return redirect(url_for('ctrl_video'))
                    # from flask import redirect, url_for
                    # redirect = url_for('ctrl_video')
                    msg = {"msg": "success"}
                    r = requests.post("http://127.0.0.1:5000/", json=msg)
                break
            # grab the frame dimensions and convert it to a blob
            frame = utilities.resize(frame, 400)
            (h, w) = frame.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                (300, 300), (104.0, 177.0, 123.0))

            # pass the blob through the network and obtain the detections and
            # predictions
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in range(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated with the
                # prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections
                if confidence > 0.85:
                    # compute the (x, y)-coordinates of the bounding box for
                    # the face and extract the face ROI
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    # ensure the detected bounding box does fall outside the
                    # dimensions of the frame
                    startX = max(0, startX)
                    startY = max(0, startY)
                    endX = min(w, endX)
                    endY = min(h, endY)

                    # extract the face ROI and then preproces it in the exact
                    # same manner as our training data
                    face = frame[startY:endY, startX:endX]
                    if len(face) == 0:
                        continue
                    face = cv2.resize(face, (32, 32))
                    face = face.astype("float") / 255.0
                    face = img_to_array(face)
                    face = np.expand_dims(face, axis=0)

                    # pass the face ROI through the trained liveness detector
                    # model to determine if the face is "real" or "fake"
                    preds = model.predict(face)[0]
                    j = np.argmax(preds)
                    label = le.classes_[j]
                    if label == "Real":
                        rect_color = (255, 128, 255)
                    else:
                        rect_color = (0, 0, 255)
                    # draw the label and bounding box on the frame
                    label = "{}: {:.2f}".format(label, preds[j])
                    cv2.rectangle(frame, (startX - 1 , startY - 20), (startX - int((startX - endX) / 1.5), startY), 
                        rect_color, -1)
                    cv2.putText(frame, label, (startX, startY - 5),
                        cv2.FONT_HERSHEY_PLAIN, 0.80, (255, 255, 255), 1, cv2.LINE_AA)
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                        rect_color, 2)

            cv2.waitKey(1)
            # encode as a jpeg image and return it
            yield cv2.imencode('.jpg', frame)[1].tobytes()
Exemplo n.º 9
0
# Ignoring Unwanted classes

COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(PTOTO_PATH, CAFFE_PATH)

# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
# (note: normalization is done via the authors of the MobileNet SSD
# implementation)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
    ret, image = cap.read()
    image = utilities.resize(image, 350, 350)
    (h, w) = image.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843,
                                 (300, 300), 127.5)

    # pass the blob through the network and obtain the detections and
    # predictions
    print("[INFO] computing object detections...")
    net.setInput(blob)
    detections = net.forward()
    # loop over the detections
    for i in np.arange(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with the
        # prediction
        confidence = detections[0, 0, i, 2]
        # filter out weak detections by ensuring the `confidence` is