예제 #1
0
def main():
    try:
        idx = sys.argv.index('--in')
        path = sys.argv[idx + 1]
    except ValueError:
        path = None

    cam = Camera(path)

    if '--lr' in sys.argv:
        from lib import LRTracker
        ln = LRTracker(cam)
    else:
        from lib import ContourTracker
        ln = ContourTracker(cam)

    if '--preview' in sys.argv:
        ln.preview(True)

    autoplay = '--manual' not in sys.argv

    while True:
        print(ln.track_line())

        if autoplay:
            cv2.waitKey(1)
            time.sleep(0.1)
        else:
            cv2.waitKey(0)

    cv2.closeAllWindows()
예제 #2
0
def main():
    cam = Camera()

    if '--lr' in sys.argv:
        from lib import LRTracker
        ln = LRTracker(cam)
    else:
        from lib import ContourTracker
        ln = ContourTracker(cam)

    if '--preview' in sys.argv:
        ln.preview(True)

    while True:
        print(ln.track_line())
        cv2.waitKey(1)

    cv2.closeAllWindows()
예제 #3
0
 def ConvertImageToGrayscale(self):
     """
         Converts Image into a Grayscale Image for Enhanced Image Reading
         Args:
             No Args needed
     """
     for ImageName in self.ImagesList:
         GrayImage = cv2.imread(ImageName, cv2.IMREAD_GRAYSCALE)
         ImageName = "Grayscale_" + ImageName
         
         os.chdir(".\Cambire")            
         
         cv2.imwrite(ImageName, GrayImage)
         
         cv2.imshow(ImageName, GrayImage)
         cv2.waitKey(0)
         cv2.closeAllWindows()
         
         os.chdir("..")
예제 #4
0
import cv2

face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img=cv2.imread("mypic.jpg",1)

#Face detection works better with grayscale images, for converting bgr to grayscale
grey_img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces=face_cascade.detectMultiScale(grey_img, scaleFactor=1.1, minNeighbors=5)
print(faces)

#plotting coordinates on image
for x,y,w,h in faces:
    img=cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 2)

resized_img=cv2.resize(img,(int(img.shape[1]/2), int(img.shape[0]/2)) )
cv2.imshow("My pic",resized_img)
cv2.waitKey(0)
cv2.closeAllWindows()

def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U',delimiter=',')
    else:
        people = []



# MAIN WORK

    #Capture Video indefinitely
    video_capture = cv2.VideoCapture(0)
    # time.delay(2)
    # TODO: GET FROM DATABASE
    # known encodings of persons in database.
    # known_encodings = []
    # people = []

    #Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #Eat the Meat, Hmm process the image
    while True:

        # 
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED: 
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame 
        # 
        ret, frame = video_capture.read()

        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0,0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(small_frame, face_locations)

            face_names=[]
            other = 0 #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings, face_encoding)
                name = "Unknown"

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:
                        name = people[i]
                        break

                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
        
        # Send the names of the people to the parent process
        # os.write(3,b'{"dt" : "This is a test"}')
        print(face_names, flush=True)
            
        process_this_frame = not process_this_frame
        

        #Display the border
        for (top, right, bottom, left),name in zip(face_locations, face_names):
            #Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            #Assuming person in authenticated
            color =  (0,255,0)  #GREEN
            if not authorised(name):
                #Unauthenticated person
                color = (0,0,255) #RED
                #print so that parent process in Node.js can use it
                print(name,flush=True)

            #Display border
            cv2.rectangle(frame, (left,top), (right,bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left,bottom-35), (right, bottom), color, cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name,(left+6, bottom-6), font, 1.0, (255,255,255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break
            
    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()
예제 #6
0
eyes_cascade.load(cv.samples.findFile('eyeclassifier.xml'))
camInput = cv.VideoCapture(0)

while True:
    booleanCheck, frame = camInput.read()

    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    frame_gray = cv.equalizeHist(frame_gray)
    faces = face_cascade.detectMultiScale(frame_gray)

    for (x, y, w, h) in faces:
        center = (x + w // 2, y + h // 2)
        frame = cv.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360,
                           (255, 0, 255), 4)
        faceROI = frame_gray[y:y + h, x:x + w]

        eyes = eyes_cascade.detectMultiScale(faceROI)
        for (x2, y2, w2, h2) in eyes:
            eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
            radius = int(round((w2 + h2) * 0.25))
            frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4)

    cv.imshow('Capture - Face detection', frame)
    keyPressed = cv.waitKey(1)
    if keyPressed == ord('s'):
        break

camInput.release()

cv.closeAllWindows()
예제 #7
0
def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U', delimiter=',')
    else:
        people = []

    # Capture Video indefinitely
    video_capture = cv2.VideoCapture(0)

    original_width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    original_height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)

    # Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:

        #
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED:
        #             Criminal
        #         elif Pink:
        #             Missing person
        #         elif Pink:
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame
        #

        # Due to QR Code scanning, video element changes the size of video capture,
        # which also affected this process(don't know why) so to convert it to original size
        if video_capture.get(cv2.CAP_PROP_FRAME_WIDTH
                             ) != original_width or video_capture.get(
                                 cv2.CAP_PROP_FRAME_HEIGHT) != original_height:
            video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, original_width)
            video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, original_height)
        ret, frame = video_capture.read()

        # Don't proceed further until camera is able to capture pics
        if not ret:
            continue
        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0, 0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(
                small_frame, face_locations)

            face_names = []
            face_type = []
            face_number = []
            other = 0  #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings,
                                                       face_encoding,
                                                       tolerance=0.5)
                name = "Unknown"
                vtype = ""
                vnumber = ""

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:

                        name = people[i][0]
                        vtype = people[i][1]
                        vnumber = people[i][2]
                        break
                # Change it, run the loop to find no. of Unknown
                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
                face_type.append(vtype)
                face_number.append(vnumber)
            print(face_names, flush=True)
        process_this_frame = not process_this_frame

        # Display the border
        for (top, right, bottom,
             left), name, vtype, vnumber in zip(face_locations, face_names,
                                                face_type, face_number):

            # Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Assuming person in authenticated
            color = (0, 255, 0)  #GREEN
            if not authorised(name):
                # Unauthenticated person
                color = (255, 0, 0)  #Blue
                directory = "./public/photos/"
                filename = directory + time.strftime("%Y%m%d-%H%M%S") + ".jpeg"
                if not os.path.exists(directory + time.strftime("%Y%m%d-%H")):
                    os.makedirs(directory + time.strftime("%Y%m%d-%H"))
                    crop_img = frame[(top):(bottom), (left):(right)]
                    cv2.imwrite(filename, crop_img)
                    txt = "Hi, Identified at " + time.strftime(
                        "%d-%m-%Y %H:%M:%S"
                    ) + ". Use the link to verify or report http://localhost:3000/load"
                    #send_email(filename,txt)
            else:
                if (vtype):
                    if vtype == "Missing":
                        color = (170, 80, 255)  #Pink
                        Missing_txt = "Missing person found, ID :" + vnumber
                        print("Missing_txt: ", Missing_txt)
                        #send_email(filename,Missing_txt)
                    else:
                        color = (0, 0, 255)  #RED

            # Display border
            cv2.rectangle(frame, (left, top), (right, bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color,
                          cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX

            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break

    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()
예제 #8
0
def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U', delimiter=',')
    else:
        people = []

# MAIN WORK

#Capture Video indefinitely

    parser = argparse.ArgumentParser()
    parser.add_argument('--dlibFacePredictor',
                        type=str,
                        help="Path to dlib's face predictor.",
                        default=os.path.join(
                            dlibModelDir,
                            "shape_predictor_68_face_landmarks.dat"))
    parser.add_argument('--networkModel',
                        type=str,
                        help="Path to Torch network model.",
                        default='nn4.small2.3d.v1.t7')
    # Download the 3D model from:
    # https://storage.cmusatyalab.org/openface-models/nn4.small2.3d.v1.t7
    parser.add_argument('--imgDim',
                        type=int,
                        help="Default image dimension.",
                        default=96)
    parser.add_argument(
        '--captureDevice',
        type=int,
        default=0,
        help='Capture device. 0 for latop webcam and 1 for usb webcam')
    # parser.add_argument('--width', type=int, default=640)
    # parser.add_argument('--height', type=int, default=480)
    parser.add_argument('--width', type=int, default=1280)
    parser.add_argument('--height', type=int, default=800)
    parser.add_argument('--scale', type=int, default=0.25)
    parser.add_argument('--threshold', type=float, default=0.5)
    parser.add_argument('--cuda', action='store_true')
    parser.add_argument('--verbose', action='store_true')

    args = parser.parse_args()

    align = openface.AlignDlib(args.dlibFacePredictor)
    net = openface.TorchNeuralNet(args.networkModel,
                                  imgDim=args.imgDim,
                                  cuda=args.cuda)

    video_capture = cv2.VideoCapture(args.captureDevice)

    original_width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    original_height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    # TODO: GET FROM DATABASE
    # known encodings of persons in database.
    # known_encodings = []
    # people = []

    #Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #Eat the Meat, Hmm process the image
    while True:

        #
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED:
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame
        #

        # Due to QR Code scanning, video element changes the size of video capture,
        # which also affected this process(don't know why) so to convert it to original size
        if video_capture.get(cv2.CAP_PROP_FRAME_WIDTH
                             ) != original_width or video_capture.get(
                                 cv2.CAP_PROP_FRAME_HEIGHT) != original_height:
            video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, original_width)
            video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, original_height)
        ret, frame = video_capture.read()

        # Don't proceed further until camera is able to capture pics
        if not ret:
            continue
        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0, 0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(
                small_frame, face_locations)

            face_names = []
            other = 0  #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings,
                                                       face_encoding)
                name = "Unknown"

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:
                        name = people[i]
                        break
                #Change it, run the loop to find no. of Unknown
                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
            print(face_names)

        process_this_frame = not process_this_frame

        #Display the border
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            #Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            #Assuming person in authenticated
            color = (0, 255, 0)  #GREEN
            if not authorised(name):
                #Unauthenticated person
                color = (0, 0, 255)  #RED
                #print so that parent process in Node.js can use it
                # print(name,flush=True)

            #Display border
            cv2.rectangle(frame, (left, top), (right, bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color,
                          cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break

    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()