Exemple #1
0
 def __init__(self, model, camera_id, cascade_filename, video_file):
     self.model = model
     self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                      minNeighbors=5,
                                      scaleFactor=1.1)
     self.TagGenerator = TagGenerator()
     self.video_file = video_file
def find_faces(image):
    """ Extracts the faces from an image, if any """
    """ Returns the regions of interest for a given face """
    log.debug("locating regions of interest")
    ar = numpy.asarray(image)
    detector = CascadedDetector(minSize=(1, 1))
    return detector.detect(ar)
Exemple #3
0
	def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
		self.dst_dir = dst_dir
		self.subject_name = subject_name
		self.face_sz = face_sz
		self.cam = create_capture(video_src)
		self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
		self.stored = 0
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.cam = create_capture(camera_id)

        self.cap = cv2.VideoCapture('http://localhost:8080/stream.ogg')

        self.fajl = open("../web/data.txt", 'w+')
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
        self.cam = create_capture(camera_id)
            
    def run(self):
        while True:
            ret, frame = self.cam.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
                # Draw the predicted name (folder name...):
                draw_str(imgout, (x0-20,y0-20), self.model.subject_names[prediction])
            cv2.imshow('videofacerec', imgout)
            # Show image & exit on escape:
            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #6
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
        #self.cam = create_capture(camera_id)
        self.cap = cv2.VideoCapture('http://192.168.0.231:8080/stream.ogg')
    def run(self):
        while True:
            #ret, frame = self.cam.read()
            ret, frame = self.cap.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
                # Draw the predicted name (folder name...):
                draw_str(imgout, (x0-20,y0-20), self.model.subject_names[prediction])
            cv2.imshow('videofacerec', imgout)
            # Show image & exit on escape:
            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #7
0
 def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="haarcascade_frontalface_alt2.xml"):
     self.dst_dir = dst_dir
     self.subject_name = subject_name
     self.face_sz = face_sz
     self.cam = create_capture(video_src)
     self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
     self.stored = 0
	def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="/Users/george/job/__webdocs/webguerillas/opencv/project/facereg/opencv/OpenCV-2.4.2/data/haarcascades/haarcascade_frontalface_alt2.xml"):
		self.dst_dir = dst_dir
		self.subject_name = subject_name
		self.face_sz = face_sz
		self.cam = create_capture(video_src)
		self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
		self.stored = 0
Exemple #9
0
 def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
     self.dst_dir = dst_dir
     self.subject_name = subject_name
     self.face_sz = face_sz
     self.cam = create_capture(video_src)
     self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
     self.stored = 0
Exemple #10
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.cam = create_capture(camera_id)

    def run(self):
        while True:
            ret, frame = self.cam.read()

            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r

                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.model.image_size,
                                  interpolation=cv2.INTER_CUBIC)

                prediction = self.model.predict(face)[0]

                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)

                draw_str(imgout, (x0 - 20, y0 - 20),
                         self.model.subject_names[prediction])
            cv2.imshow('videofacerec', imgout)

            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #11
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename, video_file):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.TagGenerator = TagGenerator()
        self.video_file = video_file
        #self.cam = create_capture(camera_id)

    def run(self):
        # Path to video filenames
        vidcap = cv2.VideoCapture(self.video_file)
        while True:
            # Skip 10 frames at a time
            for i in xrange(10):
                vidcap.grab()
            # Retrieve frame for detection
            ret, frame = vidcap.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.model.image_size,
                                  interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                predict_distance = self.model.predict(face)[1]['distances']

                # Drop detection if threshold "distance" above value
                if predict_distance < 1200:
                    print predict_distance
                    # Draw the face area in image:
                    cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                    # Draw the predicted name (folder name...):
                    draw_str(imgout, (x0 - 20, y0 - 20),
                             self.model.subject_names[prediction])
                    self.TagGenerator.addAthlete(
                        self.model.subject_names[prediction],
                        vidcap.get(0) / 1000)
            cv2.imshow('videofacerec', imgout)
            # Show image & exit on escape:
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            # End program at end of video
            if vidcap.get(2) >= 0.90:
                print self.TagGenerator.namedb
                break
Exemple #12
0
 def __init__(
     self,
     video_src,
     dataset_fn,
     face_sz=(130, 130),
     cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"
 ):
     self.face_sz = face_sz
     self.cam = create_capture(video_src)
     ret, self.frame = self.cam.read()
     self.detector = CascadedDetector(cascade_fn=cascade_fn,
                                      minNeighbors=5,
                                      scaleFactor=1.1)
     # define feature extraction chain & and classifier)
     feature = ChainOperator(TanTriggsPreprocessing(), LBP())
     classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
     # build the predictable model
     self.predictor = PredictableModel(feature, classifier)
     # read the data & compute the predictor
     self.dataSet = DataSet(filename=dataset_fn, sz=self.face_sz)
     self.predictor.compute(self.dataSet.data, self.dataSet.labels)
Exemple #13
0
class App(object):
    def __init__(
        self,
        video_src,
        dst_dir,
        subject_name,
        face_sz=(130, 130),
        cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"
    ):
        self.dst_dir = dst_dir
        self.subject_name = subject_name
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        self.detector = CascadedDetector(cascade_fn=cascade_fn,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.stored = 0

    def saveImage(self, src):
        out_fn = "%s_%d.png" % (self.subject_name, self.stored)
        out_fn = os.path.join(self.dst_dir, out_fn)
        cv2.imwrite(out_fn, src)
        self.stored = self.stored + 1

    def run(self):
        while True:
            ret, frame = self.cam.read()
            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            faces = []
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1].copy()
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.face_sz,
                                  interpolation=cv2.INTER_CUBIC)
                # draw a rectangle to show the detection
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 1)
                # and append to currently detected faces
                faces.append(face)
            cv2.imshow("detections", imgout)
            # wait for a key press
            ch = cv2.waitKey(10)
            # store the currently detected faces
            if (ch == ord('s')) and (len(faces) > 0):
                for face in faces:
                    self.saveImage(face)
            if ch == 27 or ch == ord('q'):
                break
Exemple #14
0
class App(object):
    def __init__(
            self,
            video_src,
            dst_dir,
            subject_name,
            face_sz=(130, 130),
            cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"
    ):
        self.dst_dir = dst_dir
        self.subject_name = subject_name
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        self.detector = CascadedDetector(
            cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
        self.stored = 0

    def saveImage(self, src):
        out_fn = "%s_%d.png" % (self.subject_name, self.stored)
        out_fn = os.path.join(self.dst_dir, out_fn)
        cv2.imwrite(out_fn, src)
        self.stored = self.stored + 1

    def run(self):
        while True:
            ret, frame = self.cam.read()
            # resize the frame to half the original size
            img = cv2.resize(
                frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            faces = []
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1].copy()
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(
                    face, self.face_sz, interpolation=cv2.INTER_CUBIC)
                # draw a rectangle to show the detection
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 1)
                # and append to currently detected faces
                faces.append(face)
            cv2.imshow("detections", imgout)
            # wait for a key press
            ch = cv2.waitKey(10)
            # store the currently detected faces
            if (ch == ord('s')) and (len(faces) > 0):
                for face in faces:
                    self.saveImage(face)
            if ch == 27 or ch == ord('q'):
                break
	def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn="/Users/george/job/__webdocs/webguerillas/opencv/project/facereg/opencv/OpenCV-2.4.2/data/haarcascades/haarcascade_frontalface_alt2.xml"):
		self.face_sz = face_sz
		self.cam = create_capture(video_src)
		ret, self.frame = self.cam.read()
		self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
		# define feature extraction chain & and classifier) 
		feature = ChainOperator(TanTriggsPreprocessing(), LBP())
		classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
		# build the predictable model
		self.predictor = PredictableModel(feature, classifier)
		# read the data & compute the predictor
		self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
		self.predictor.compute(self.dataSet.data,self.dataSet.labels)
Exemple #16
0
 def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
     self.face_sz = face_sz
     self.cam = create_capture(video_src)
     ret, self.frame = self.cam.read()
     self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
     # define feature extraction chain & and classifier)
     feature = ChainOperator(TanTriggsPreprocessing(), LBP())
     classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
     # build the predictable model
     self.predictor = PredictableModel(feature, classifier)
     # read the data & compute the predictor
     self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
     self.predictor.compute(self.dataSet.data,self.dataSet.labels)
Exemple #17
0
class App(object):
    def __init__(
        self,
        video_src,
        dataset_fn,
        face_sz=(130, 130),
        cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"
    ):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn, sz=self.face_sz)
        self.predictor.compute(self.dataSet.data, self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()
            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.face_sz,
                                  interpolation=cv2.INTER_CUBIC)
                # get a prediction
                prediction = self.predictor.predict(face)[0]
                # draw the face area
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # draw the predicted name (folder name...)
                draw_str(imgout, (x0 - 20, y0 - 20),
                         self.dataSet.names[prediction])
            cv2.imshow('videofacerec', imgout)
            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #18
0
    def __init__(self, model, camera_id, cascade_filename):
        signal.signal(signal.SIGINT, self.shutdown)

        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)

        try:
            self.cam = create_capture(camera_id)
        except:
            to_node("error", "Camera '%s' unable to connect." % camera_id)
            sys.exit()

        self.user = None
        self.faces = None
        self.has_changed = {"face_count": False, "user": False}
        to_node(
            "status", {
                "camera": str(camera_id),
                "model": str(model),
                "detector": str(self.detector)
            })
Exemple #19
0
class App(object):
    def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
        self.predictor.compute(self.dataSet.data,self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()

            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r

                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)

                # get a prediction
                prediction = self.predictor.predict(face)

                # draw the face area
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)

                # draw the predicted name (folder name...)
                draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])

            cv2.imshow('videofacerec', imgout)

            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #20
0
class recognizer(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        #self.cam = create_capture(camera_id)

    def recFace(self, picture):
        #ret, frame = self.cam.read()
        frame = picture
        '''
        frame=cv2.imread('juan.pgm')
        frame=cv2.imread('leo.png')
        frame=cv2.imread('leonora.pgm')
        frame=cv2.imread('pedro.pgm')
        #frame=cv2.imread('juan.pgm')
        '''
        # Resize the frame to half the original size for speeding up the detection process:
        img = cv2.resize(frame, (frame.shape[1] / 1, frame.shape[0] / 1),
                         interpolation=cv2.INTER_CUBIC)
        imgout = img.copy()
        name = 'nesuno'
        for i, r in enumerate(self.detector.detect(img)):
            x0, y0, x1, y1 = r
            # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
            face = img[y0:y1, x0:x1]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
            face = cv2.resize(face,
                              self.model.image_size,
                              interpolation=cv2.INTER_CUBIC)
            # Get a prediction from the model:
            prediction = self.model.predict(face)[0]
            # Draw the face area in image:
            cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
            # Draw the predicted name (folder name...):
            name = self.model.subject_names[prediction]
            draw_str(imgout, (x0 - 20, y0 - 20),
                     self.model.subject_names[prediction])
        cv2.imshow('NAO CAM', imgout)
        # Show image & exit on escape:
        ch = cv2.waitKey(10)
        return name
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.cam = create_capture(camera_id)

    def run(self):
        frame = cv2.imread(imagePath)
        # Resize the frame to half the original size for speeding up the detection process:
        img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                         interpolation=cv2.INTER_CUBIC)
        imgout = img.copy()
        print "Detect the faces..."
        detection = self.detector.detect(img)
        print "{0} faces detected...".format(len(detection))
        for i, r in enumerate(detection):
            x0, y0, x1, y1 = r
            # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
            face = img[y0:y1, x0:x1]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
            face = cv2.resize(face,
                              self.model.image_size,
                              interpolation=cv2.INTER_CUBIC)
            # Get a prediction from the model:
            prediction = self.model.predict(face)[0]
            confidence = self.model.predict(face)[1]
            distance = confidence['distances'][0]
            print "{0} -> distance {1}".format(
                self.model.subject_names[prediction], distance)
            # Draw the face area in image:
            cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
            # Draw the predicted name (folder name...):
            draw_str(imgout, (x0 - 20, y0 - 20),
                     self.model.subject_names[prediction])
            draw_str(imgout, (x0 + 20, y0 + 20), format(distance))

        cv2.imshow('videofacerec', imgout)
        cv2.imwrite(filename4, imgout)
Exemple #22
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
        self.cam = create_capture(camera_id)
        startGui(self.cam, self.getFrame)

    def getFrame(self):
        ret, frame = self.cam.read()
        # Resize the frame to half the original size for speeding up the detection process:
        img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
        imgout = img.copy()
        predictions = []
        for i,r in enumerate(self.detector.detect(img)):
            x0,y0,x1,y1 = r
            # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
            face = img[y0:y1, x0:x1]
            face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
            face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
            face = cv2.equalizeHist(face)
            # Get a prediction from the model:
            prediction = self.model.predict(face)
            prediction_id = getWeightedPrediction(prediction, self.model.subject_names)
            #print ("La",prediction)
            #print("Location ", x0, y0 )
            # Draw the face area in image:
            cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
            # Draw the predicted name (folder name...):
            if(prediction_id == 0):
                draw_str(imgout, (x0-20,y0-20), "unknown")
            else:
                draw_str(imgout, (x0-20,y0-20), self.model.subject_names[prediction_id])

            # # TODO:
            # # draw stats to see matching
            # draw_stats(imgout, (200,230), prediction, self.model)
            predictions.append(prediction)
        predictions.append(self.model.subject_names)
        return imgout, predictions
Exemple #23
0
 def __init__(self, model, camera_id, cascade_filename):
     self.model = model
     self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                      minNeighbors=5,
                                      scaleFactor=1.1)
     self.cam = create_capture(camera_id)
Exemple #24
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=2, scaleFactor=1.2)
        self.cam = create_capture(camera_id)
            
    def run(self):
        for i in range(vnum_threads):
          worker = Thread(target=speak, args=(vq,))
          worker.setDaemon(True)
          worker.start()
        whosHere = {}
        oldLoc = {}  #Tracking persons old location
        foundPerson = None
        while True:
            ret, frame = self.cam.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            # Clean up image contrast automatically
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
            imgout = img.copy()
            img = clahe.apply(cv2.cvtColor(img,cv2.COLOR_BGR2GRAY))
            # See if we've found someone
            if self.detector.detect(img).size == 0:
              foundPerson = None
            else:
              #detectTimeBefore = time.time()
              detect = self.detector.detect(img)
              #detectTimeCalc = time.time() - detectTimeBefore
              #print("Time to Detect: "+str(detectTimeCalc))
              for i,r in enumerate(detect):
                  marker = (0,255,0)
                  x0,y0,x1,y1 = r
                  # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                  face = img[y0:y1, x0:x1]
                  face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
                  blurLevel = cv2.Laplacian(face, cv2.CV_64F).var()
                  # Get a prediction from the model:
                  predInfo = self.model.predict(face)
                  distance = predInfo[1]['distances'][0]
                  prediction = predInfo[0]
                  #cv2.imwrite("faces/"+str(time.time())+".jpg", face)
                  if distance > 200: #and not trainName:
                     foundPerson = 'Unknown'
                     if blurLevel > 600:
                       FACESDIR = "faces/"
                       facesImgCount = len([name for name in os.listdir(FACESDIR) if os.path.isfile(os.path.join(FACESDIR, name))])
                       if facesImgCount < 5000:                                                 
                         cv2.imwrite("faces/"+str(time.time())+".jpg", face)
                       else:
                         print("Max number of images (5000) are in the faces folder. Please delete them")
                  else:
                     foundPerson = self.model.subject_names[prediction]
                  if len(oldLoc) > 0 and distance > 200:  # Determine person with heuristics based on last location
                    for key,value in oldLoc.iteritems():
                       if np.isclose(value, r, atol=50.0).all() and foundPerson != key:  # Within 50 pixels of any direction
                         # Make sure we don't already have enough photos of this person... limit 200 and the image isn't toooooooo blurry. 
                         DIR = "pictures/"+key
                         personImgCount = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
                         if personImgCount < 1000 and blurLevel > 600:  # blurLevel higher the better
                           cv2.imwrite(DIR+"/"+str(time.time())+".jpg", face)
                         foundPerson = key
                         marker = (0,0,255)
                  if foundPerson not in whosHere.keys() and foundPerson != 'Unknown':
                    vq.put("Hello "+foundPerson)
                  whosHere.update({foundPerson:str(time.time())})
                  # Draw the face area in image:
                  cv2.rectangle(imgout, (x0,y0),(x1,y1),(marker),2)
                  # Draw the predicted name (folder name...):
                  draw_str(imgout, (x0,y0-5), foundPerson+" "+str(round(distance,0)))
                  if foundPerson != 'Unknown':
                    oldLoc.update({foundPerson:r})  # Update old person location for heuristics
            checkHere = whosHere.copy()
            for key, value in checkHere.iteritems():  # Check when we last saw someone and remove if longer than 10 seconds.
              if float(value) < (time.time() - 5):
                del whosHere[key]
                if key != 'Unknown':  # oldLoc doesn't ever contain Unknown, because we're aiming to eliminate unknowns.
                  del oldLoc[key]
            i = str(whosHere.keys())
            draw_str(imgout, (1, frame.shape[0]/2 - 5), str(whosHere.keys()))  # drop string of who's seen
            cv2.imshow('Jarvis 0.4a', imgout)
            # Show image & exit on escape:
            ch = cv2.waitKey(1)
            if ch == 27:
                break
            vq.join()
Exemple #25
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
        self.cam = create_capture(camera_id)
	self.cap = cv2.VideoCapture('http://192.168.0.136:8080/test') 

    def run(self):
	open = 0
	sendind_data = None
	time1 = time.time()
	detection_dict = {}
	count_frame = 0
	#self.model.subject_names[10] = 'Nije pronadjeno lice'
        while True:
            #ret, frame = self.cam.read()
	    ret, frame = self.cap.read()
	    print ret,frame
            # smanjujemo velicinu frejma na pola:
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
	    detect =  self.detector.detect(img)
	    count_frame += 1
	          
	
            for i,r in enumerate(detect):
                x0,y0,x1,y1 = r
		#print x0,y0,x1,y1
                # (1) Uzimamo lice, (2) Konverujemo u grayscale & (3) skaliramo velicinu na image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
                # uzimamo model:
                prediction = self.model.predict(face)[0]
		#print self.model.predict(face)
                # iscrtavamo pravougaonik:
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
                # ispisemo ime (ime foldera...):
                draw_str(imgout, (x0-20,y0-20), self.model.subject_names[prediction])
		if self.model.subject_names[prediction] in detection_dict:
			detection_dict[self.model.subject_names[prediction]] += 1
		else:
			detection_dict[self.model.subject_names[prediction]] = 1
	    
            cv2.imshow('videofacerec', imgout)
            # prikazi sliku & izadji na escape ili nakon 100 frejmova:
            ch = cv2.waitKey(10) & 0xFF
            if ch == 27 or count_frame == 100:

		break
	 #izracunaj najfrekventije prepoznato lice
	max_freq = max(detection_dict.values())
	max_name = None
	for name in detection_dict:
		if detection_dict[name] == max_freq:
			max_name = name
	print detection_dict		
	sum_freq = sum(detection_dict.values())
	print max_freq,sum_freq
	
	'''if max_freq >= 0.6 * sum_freq:
		sending_data = max_name'''
	sending_data = max_name
	'''else:
		
		sending_data = 'Nepoznato lice'
'''	
	print sending_data
	if open == 0:
		#open_url(data_dict[sending_data])
		open_url(sending_data)
		open = 1	
Exemple #26
0
 def __init__(self, _model, camera_id, cascade_filename):
     self.model = _model
     self.detector = CascadedDetector(
         cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
     self.cam = create_capture(camera_id)
Exemple #27
0
class App(object):
    def __init__(self, _model, camera_id, cascade_filename):
        self.model = _model
        self.detector = CascadedDetector(
            cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
        self.cam = create_capture(camera_id)

    def run(self):
        last_face_recognised = 0
        while True:
            ret, frame = self.cam.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[
                0] / 2), interpolation=cv2.INTER_CUBIC)
            image_out = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model.image_size,
                                  interpolation=cv2.INTER_CUBIC)

                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                # print self.model.predict(face)
                predicted_label = self.model.predict(face)[0]
                classifier_output = self.model.predict(face)[1]
                # print classifier_output
                distance = classifier_output['distances'][0]

                # class_list = set([1, 2, 3, 4]) - from the folders db-level
                attendees = set([])

                if last_face_recognised != self.model.subject_names[prediction]:
                    attendees.add(self.model.subject_names[prediction])
                    last_face_recognised = self.model.subject_names[prediction]
                    print 'Attending -> %s', [attendees]

                # thresholding prediction values
                if distance > 1000.0:
                    # grab prediction and store in redis - compare lists
                    # grab last face variable - only store in imgae var is diff -
                    # then add to list - grab date missed - count too
                    # Draw the face area in image:
                    cv2.rectangle(image_out, (x0, y0), (x1, y1), (0, 255, 0), 2)
                    # Draw the predicted name (folder name...):
                    draw_str(image_out, (
                        x0 - 20, y0 - 20), "Unknown Person")
                else:
                    print "Person is known with label %i" % (predicted_label)
                    cv2.rectangle(image_out, (x0, y0), (x1, y1), (0, 255, 0), 2)
                    draw_str(image_out, (
                        x0 - 20, y0 - 20),
                        self.model.subject_names[prediction])

            cv2.imshow('recognised', image_out)
            # Show image & exit on escape:
            ch = cv2.waitKey(33)
            # just log ch to get key id pressed
            if ch == 1048603:
                break
Exemple #28
0
 def __init__(self, model, camera_id, cascade_filename):
     self.model = model
     self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.1)
     #self.cam = create_capture(camera_id)
     self.cap = cv2.VideoCapture('http://192.168.0.231:8080/stream.ogg')
Exemple #29
0
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        signal.signal(signal.SIGINT, self.shutdown)

        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)

        try:
            self.cam = create_capture(camera_id)
        except:
            to_node("error", "Camera '%s' unable to connect." % camera_id)
            sys.exit()

        self.user = None
        self.faces = None
        self.has_changed = {"face_count": False, "user": False}
        to_node(
            "status", {
                "camera": str(camera_id),
                "model": str(model),
                "detector": str(self.detector)
            })

    def find_faces(self, img):
        faces = self.detector.detect(img)

        self.has_changed['face_count'] = self.faces is None or len(
            faces) is not len(self.faces)
        self.faces = faces

        return (faces)

    def shutdown(self, signum, stack):
        to_node("status", 'Shutdown -- Cleaning up camera...')
        self.cam.release()
        cv2.destroyAllWindows()
        sys.exit(0)

    def run(self):
        while True:
            ret, frame = self.cam.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(
                frame, (int(frame.shape[1] / 2), int(frame.shape[0] / 2)),
                interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            self.find_faces(img)

            if self.has_changed['face_count']:
                to_node("change", {"face_count": len(self.faces)})

            for i, r in enumerate(self.faces):
                x0, y0, x1, y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.model.image_size,
                                  interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                confidence = self.model.predict(face)[1]["distances"][0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # Draw the predicted name (folder name...):
                #print (confidence)

                if confidence < 550 and self.model.subject_names[
                        prediction] is not None:
                    user = self.model.subject_names[prediction]
                else:
                    user = ""

                self.has_changed[
                    'user'] = self.user is None or user is not self.user

                if self.has_changed['user']:
                    self.user = user
                    to_node("change", {"user": user, "confidence": confidence})
Exemple #30
0
    def cameraStack(self):
        model_filename = "model_gender_working.pkl"
        image_size = (200,200)
        [images, labels, subject_names] = read_images("gender/", image_size)
        list_of_labels = list(xrange(max(labels)+1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_gender = load_model(model_filename)

        model_filename = "model_emotion.pkl"
        image_size = (200, 200)
        [images, labels, subject_names] = read_images("emotion/", image_size)
        list_of_labels = list(xrange(max(labels) + 1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_emotion = load_model(model_filename)

        faceCascade = 'haarcascade_frontalface_alt2.xml'
        print self.model_gender.image_size
        print "Starting the face detection"

        self.detector = CascadedDetector(cascade_fn=faceCascade, minNeighbors=5, scaleFactor=1.1)
        self.video_capture = cv2.VideoCapture(0)

        while True:
            ret, frame = self.video_capture.read()
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2), interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                self.x0 = x0
                self.y0 = y0
                self.x1 = x1
                self.y1 = y1
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model_gender.image_size, interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model_gender.predict(face)[0]
                emotion = self.model_emotion.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # Draw the predicted name (folder name...):
                self.distance = str(np.asscalar(np.int16(self.y0)))
                draw_str(imgout, (x0 - 20, y0 - 5), self.model_emotion.subject_names[emotion])
                draw_str(imgout, (x0 - 20, y0 - 20), self.model_gender.subject_names[prediction])
                draw_str(imgout, (x0 - 20, y0 - 35), "distance: " + self.distance + "cm")
                self.gender = self.model_gender.subject_names[prediction]
                self.changeSetting(self.currently_playing_button)
                self.changeSetting(self.notifications_button)
                self.changeSetting(self.likes_button)
                self.changeSetting(self.collections_button)
            cv2.imshow('video', imgout)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
Exemple #31
0
class Ui_MainWindow(QtGui.QMainWindow):
    def __init__(self):
        QtGui.QMainWindow.__init__(self)
        self.animation = QtCore.QPropertyAnimation(self, "size")
        self.animation.setEndValue(QtCore.QSize(640, 480))


        #TODO: above to be filled up with the rest of the buttons
        self._state = 0
        self._camera_state = 0
        self.gender = ""
        self._background_clicked = 0
        self.setObjectName(_fromUtf8("MainWindow"))
        self.resize(640, 480)
        self.centralwidget = QtGui.QWidget(self)
        self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
        self.verticalLayout_3 = QtGui.QVBoxLayout(self.centralwidget)
        self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
        self.verticalLayout_4 = QtGui.QVBoxLayout()
        self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
        self.stackedWidget = QtGui.QStackedWidget(self.centralwidget)
        self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
        self.page_3 = QtGui.QWidget()
        self.page_3.setObjectName(_fromUtf8("page_3"))
        self.horizontalLayout_12 = QtGui.QHBoxLayout(self.page_3)
        self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
        self.horizontalLayout_2 = QtGui.QHBoxLayout()
        self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
        self.music_list_scrollArea = QtGui.QScrollArea(self.page_3)
        self.music_list_scrollArea.setWidgetResizable(True)
        self.music_list_scrollArea.setObjectName(_fromUtf8("music_list_scrollArea"))
        self.scrollAreaWidgetContents_2 = QtGui.QWidget()
        self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 940, 728))
        self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
        self.gridLayout_4 = QtGui.QGridLayout(self.scrollAreaWidgetContents_2)
        self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
        self.music_table = QtGui.QTableWidget(self.scrollAreaWidgetContents_2)
        self.music_table.setObjectName(_fromUtf8("music_table"))
        self.music_table.setColumnCount(5)
        self.music_table.setRowCount(0)
        self.gridLayout_4.addWidget(self.music_table, 0, 0, 1, 1)
        self.music_list_scrollArea.setWidget(self.scrollAreaWidgetContents_2)
        self.horizontalLayout_2.addWidget(self.music_list_scrollArea)
        self.horizontalLayout_12.addLayout(self.horizontalLayout_2)
        self.stackedWidget.addWidget(self.page_3)
        self.page_4 = QtGui.QWidget()
        self.page_4.setObjectName(_fromUtf8("page_4"))
        self.stackedWidget.addWidget(self.page_4)
        self.verticalLayout_4.addWidget(self.stackedWidget)
        self.verticalLayout_3.addLayout(self.verticalLayout_4)
        self.seekSlider = phonon.Phonon.SeekSlider(self.centralwidget)
        self.seekSlider.setObjectName(_fromUtf8("seekSlider"))
        self.verticalLayout_3.addWidget(self.seekSlider)
        self.horizontalLayout = QtGui.QHBoxLayout()
        self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
        self.play_button = QtGui.QPushButton(self.centralwidget)
        self.play_button.setMaximumSize(QtCore.QSize(30, 16777215))
        self.play_button.setText(_fromUtf8(""))
        icon = QtGui.QIcon()
        icon.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/play-button.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.play_button.setIcon(icon)
        self.play_button.setIconSize(QtCore.QSize(20, 20))
        self.play_button.setFlat(True)
        self.play_button.setObjectName(_fromUtf8("play_button"))
        self.horizontalLayout.addWidget(self.play_button)
        self.pause_button = QtGui.QPushButton(self.centralwidget)
        self.pause_button.setMaximumSize(QtCore.QSize(30, 16777215))
        self.pause_button.setText(_fromUtf8(""))
        icon1 = QtGui.QIcon()
        icon1.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/pause (1).png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.pause_button.setIcon(icon1)
        self.pause_button.setIconSize(QtCore.QSize(20, 20))
        self.pause_button.setFlat(True)
        self.pause_button.setObjectName(_fromUtf8("pause_button"))
        self.horizontalLayout.addWidget(self.pause_button)
        self.fast_forward_button = QtGui.QPushButton(self.centralwidget)
        self.fast_forward_button.setMaximumSize(QtCore.QSize(30, 16777215))
        self.fast_forward_button.setText(_fromUtf8(""))
        icon2 = QtGui.QIcon()
        icon2.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/fast-forward.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.fast_forward_button.setIcon(icon2)
        self.fast_forward_button.setIconSize(QtCore.QSize(20, 20))
        self.fast_forward_button.setFlat(True)
        self.fast_forward_button.setObjectName(_fromUtf8("fast_forward_button"))
        self.horizontalLayout.addWidget(self.fast_forward_button)
        self.label_12 = QtGui.QLabel(self.centralwidget)
        self.label_12.setText(_fromUtf8(""))
        self.label_12.setObjectName(_fromUtf8("label_12"))
        self.horizontalLayout.addWidget(self.label_12)
        self.lcdNumber = QtGui.QLCDNumber(self.centralwidget)
        self.lcdNumber.setMaximumSize(QtCore.QSize(70, 30))
        self.lcdNumber.setObjectName(_fromUtf8("lcdNumber"))
        self.horizontalLayout.addWidget(self.lcdNumber)
        self.line_3 = QtGui.QFrame(self.centralwidget)
        self.line_3.setFrameShape(QtGui.QFrame.VLine)
        self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
        self.line_3.setObjectName(_fromUtf8("line_3"))
        self.horizontalLayout.addWidget(self.line_3)
        self.volumeSlider = phonon.Phonon.VolumeSlider(self.centralwidget)
        self.volumeSlider.setMinimumSize(QtCore.QSize(150, 0))
        self.volumeSlider.setMaximumSize(QtCore.QSize(150, 16777215))
        self.volumeSlider.setObjectName(_fromUtf8("volumeSlider"))
        self.horizontalLayout.addWidget(self.volumeSlider)
        self.verticalLayout_3.addLayout(self.horizontalLayout)
        self.setCentralWidget(self.centralwidget)
        self.menubar = QtGui.QMenuBar(self)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 1254, 21))
        self.menubar.setObjectName(_fromUtf8("menubar"))
        self.menuFile = QtGui.QMenu(self.menubar)
        self.menuFile.setObjectName(_fromUtf8("menuFile"))
        self.menuAbout = QtGui.QMenu(self.menubar)
        self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
        self.setMenuBar(self.menubar)
        self.statusbar = QtGui.QStatusBar(self)
        self.statusbar.setObjectName(_fromUtf8("statusbar"))
        self.setStatusBar(self.statusbar)
        self.Dock_Settings = QtGui.QDockWidget(self)
        self.Dock_Settings.setMinimumSize(QtCore.QSize(194, 386))
        self.Dock_Settings.setMaximumSize(QtCore.QSize(524287, 524287))
        self.Dock_Settings.setObjectName(_fromUtf8("Dock_Settings"))
        self.dockWidgetContents = QtGui.QWidget()
        self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
        self.gridLayout_3 = QtGui.QGridLayout(self.dockWidgetContents)
        self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
        self.horizontalLayout_6 = QtGui.QHBoxLayout()
        self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
        self.likes_button = QtGui.QPushButton(self.dockWidgetContents)
        self.likes_button.setStyleSheet(_fromUtf8("text-align:left;"))
        icon3 = QtGui.QIcon()
        icon3.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/music-player (2).png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.likes_button.setIcon(icon3)
        self.likes_button.setIconSize(QtCore.QSize(23, 23))
        self.likes_button.setFlat(True)
        self.likes_button.setObjectName(_fromUtf8("likes_button"))
        self.horizontalLayout_6.addWidget(self.likes_button)
        self.gridLayout_3.addLayout(self.horizontalLayout_6, 5, 0, 1, 1)
        self.verticalLayout = QtGui.QVBoxLayout()
        self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
        self.saved_settings_box = QtGui.QGroupBox(self.dockWidgetContents)
        self.saved_settings_box.setAlignment(QtCore.Qt.AlignCenter)
        self.saved_settings_box.setObjectName(_fromUtf8("saved_settings_box"))
        self.horizontalLayout_11 = QtGui.QHBoxLayout(self.saved_settings_box)
        self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
        self.horizontalLayout_10 = QtGui.QHBoxLayout()
        self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
        self.list_settings = QtGui.QListWidget(self.saved_settings_box)
        self.list_settings.setTabKeyNavigation(True)
        self.list_settings.setDragEnabled(True)
        self.list_settings.setDragDropOverwriteMode(True)
        self.list_settings.setObjectName(_fromUtf8("list_settings"))
        item = QtGui.QListWidgetItem()
        self.list_settings.addItem(item)
        item = QtGui.QListWidgetItem()
        self.list_settings.addItem(item)
        item = QtGui.QListWidgetItem()
        self.list_settings.addItem(item)
        item = QtGui.QListWidgetItem()
        self.list_settings.addItem(item)
        self.horizontalLayout_10.addWidget(self.list_settings)
        self.horizontalLayout_11.addLayout(self.horizontalLayout_10)
        self.verticalLayout.addWidget(self.saved_settings_box)
        self.gridLayout_3.addLayout(self.verticalLayout, 8, 0, 1, 1)
        self.horizontalLayout_7 = QtGui.QHBoxLayout()
        self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
        self.currently_playing_button = QtGui.QPushButton(self.dockWidgetContents)
        self.currently_playing_button.setStyleSheet(_fromUtf8("text-align:left;"))
        icon4 = QtGui.QIcon()
        icon4.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/speakers.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.currently_playing_button.setIcon(icon4)
        self.currently_playing_button.setIconSize(QtCore.QSize(23, 23))
        self.currently_playing_button.setFlat(False)
        self.currently_playing_button.setObjectName(_fromUtf8("currently_playing_button"))
        self.horizontalLayout_7.addWidget(self.currently_playing_button)
        self.gridLayout_3.addLayout(self.horizontalLayout_7, 6, 0, 1, 1)
        self.horizontalLayout_5 = QtGui.QHBoxLayout()
        self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
        self.collections_button = QtGui.QPushButton(self.dockWidgetContents)
        self.collections_button.setStyleSheet(_fromUtf8("text-align:left;"))
        icon5 = QtGui.QIcon()
        icon5.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/music-player (1).png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.collections_button.setIcon(icon5)
        self.collections_button.setIconSize(QtCore.QSize(23, 23))
        self.collections_button.setFlat(False)
        self.collections_button.setObjectName(_fromUtf8("collections_button"))
        self.horizontalLayout_5.addWidget(self.collections_button)
        self.gridLayout_3.addLayout(self.horizontalLayout_5, 4, 0, 1, 1)
        self.horizontalLayout_3 = QtGui.QHBoxLayout()
        self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
        self.notifications_button = QtGui.QPushButton(self.dockWidgetContents)
        font = QtGui.QFont()
        font.setBold(False)
        font.setWeight(50)
        font.setStrikeOut(False)
        font.setKerning(True)
        self.notifications_button.setFont(font)
        self.notifications_button.setLayoutDirection(QtCore.Qt.LeftToRight)
        self.notifications_button.setStyleSheet(_fromUtf8("text-align:left;\n"
                                                          ""))
        icon6 = QtGui.QIcon()
        icon6.addPixmap(QtGui.QPixmap(_fromUtf8("Buttons/alarm (2).png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.notifications_button.setIcon(icon6)
        self.notifications_button.setIconSize(QtCore.QSize(23, 23))
        self.notifications_button.setAutoRepeat(False)
        self.notifications_button.setAutoDefault(False)
        self.notifications_button.setDefault(False)
        self.notifications_button.setFlat(False)
        self.notifications_button.setObjectName(_fromUtf8("notifications_button"))
        self.horizontalLayout_3.addWidget(self.notifications_button)
        self.gridLayout_3.addLayout(self.horizontalLayout_3, 2, 0, 1, 1)
        self.line = QtGui.QFrame(self.dockWidgetContents)
        self.line.setFrameShape(QtGui.QFrame.HLine)
        self.line.setFrameShadow(QtGui.QFrame.Sunken)
        self.line.setObjectName(_fromUtf8("line"))
        self.gridLayout_3.addWidget(self.line, 3, 0, 1, 1)
        self.line_2 = QtGui.QFrame(self.dockWidgetContents)
        self.line_2.setFrameShape(QtGui.QFrame.HLine)
        self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
        self.line_2.setObjectName(_fromUtf8("line_2"))
        self.gridLayout_3.addWidget(self.line_2, 7, 0, 1, 1)
        self.Dock_Settings.setWidget(self.dockWidgetContents)
        self.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.Dock_Settings)
        self.actionExit = QtGui.QAction("E&xit", self, shortcut="Ctrl+X",
                                        triggered=self.close)

        self.actionExit.setObjectName(_fromUtf8("actionExit"))
        self.menuFile.triggered[QtGui.QAction].connect(self.processTrigger)
        self.actionAdd_Folder = QtGui.QAction("Add &Folder", self,
                                              shortcut="Ctrl+W", triggered=self.addFolder)
        self.actionAdd_Folder.setObjectName(_fromUtf8("actionAdd_Folder"))
        self.actionAdd_File = QtGui.QAction("Add &Files", self,
                                            shortcut="Ctrl+F", triggered=self.addFiles)

        self.actionAdd_File.setObjectName(_fromUtf8("actionAdd_File"))
        self.actionPreference = QtGui.QAction(self)
        self.actionPreference.setObjectName(_fromUtf8("actionPreference"))
        self.menuFile.addAction(self.actionAdd_Folder)
        self.menuFile.addAction(self.actionAdd_File)
        self.menuFile.addAction(self.actionPreference)
        self.menuFile.addAction(self.actionExit)
        self.menubar.addAction(self.menuFile.menuAction())
        self.menubar.addAction(self.menuAbout.menuAction())

        self.retranslateUi(self)
        self.stackedWidget.setCurrentIndex(0)
        QtCore.QMetaObject.connectSlotsByName(self)

        self.notifications_button.setAutoRepeat(True)
        self.notifications_button.setAutoRepeatDelay(1000)
        self.notifications_button.setAutoRepeatInterval(1000)

        self.collections_button.setAutoRepeat(True)
        self.collections_button.setAutoRepeatDelay(1000)
        self.collections_button.setAutoRepeatInterval(1000)
        self.likes_button.setFlat(False)
        self.likes_button.setAutoRepeat(True)
        self.likes_button.setAutoRepeatDelay(1000)
        self.likes_button.setAutoRepeatInterval(1000)

        # self.notifications_button.clicked.connect(lambda: self.onChanged(self.notifications_button))
        # self.collections_button.clicked.connect(lambda: self.onChanged(self.collections_button))
        self.notifications_button.clicked.connect(self.switchingCamera)
        self.collections_button.clicked.connect(self.filterCollections)
        self.likes_button.clicked.connect(self.filterLike)

        self.currently_playing_button.clicked.connect(self.changeSetting)

        # Media stuff starts here
        self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
        self.mediaObject = Phonon.MediaObject(self)
        self.metaInformationResolver = Phonon.MediaObject(self)

        self.mediaObject.setTickInterval(1000)

        self.mediaObject.tick.connect(self.tick)
        self.mediaObject.stateChanged.connect(self.stateChanged)
        self.metaInformationResolver.stateChanged.connect(self.metaStateChanged)
        self.mediaObject.currentSourceChanged.connect(self.sourceChanged)
        self.mediaObject.aboutToFinish.connect(self.aboutToFinish)

        Phonon.createPath(self.mediaObject, self.audioOutput)
        self.setupActions()  # first method
        # self.setupMenus()  # second method
        self.setupUi()  # third method
        self.lcdNumber.display("00:00")
        self.sources = []
        self.retrieveMusicList()

        self.animation_button_currently = QtCore.QPropertyAnimation(self.currently_playing_button, "size")


    def tick(self, time):
        displayTime = QtCore.QTime(0, (time / 60000) % 60, (time / 1000) % 60)
        self.lcdNumber.display(displayTime.toString('mm:ss'))

    def sourceChanged(self, source):
        self.music_table.selectRow(self.sources.index(source))
        self.lcdNumber.display('00:00')

    def aboutToFinish(self):
        index = self.sources.index(self.mediaObject.currentSource()) + 1
        if len(self.sources) > index:
            self.mediaObject.enqueue(self.sources[index])

    # def setupMenus(self):

    def metaStateChanged(self, newState, oldState):
        if newState == Phonon.ErrorState:
            QtGui.QMessageBox.warning(self, "Error opening files",
                                      self.metaInformationResolver.errorString())

            while self.sources and self.sources.pop() != self.metaInformationResolver.currentSource():
                pass

        if newState != Phonon.StoppedState and newState != Phonon.PausedState:
            return

        if self.metaInformationResolver.currentSource().type() == Phonon.MediaSource.Invalid:
            return

        metaData = self.metaInformationResolver.metaData()

        title = metaData.get('TITLE', [''])[0]
        print title
        if not title:
            title = self.metaInformationResolver.currentSource().fileName()

        titleItem = QtGui.QTableWidgetItem(title)
        print titleItem
        titleItem.setFlags(titleItem.flags() ^ QtCore.Qt.ItemIsEditable)

        artist = metaData.get('ARTIST', [''])[0]
        print artist
        artistItem = QtGui.QTableWidgetItem(artist)
        artistItem.setFlags(artistItem.flags() ^ QtCore.Qt.ItemIsEditable)

        album = metaData.get('ALBUM', [''])[0]
        print album
        albumItem = QtGui.QTableWidgetItem(album)
        albumItem.setFlags(albumItem.flags() ^ QtCore.Qt.ItemIsEditable)

        year = metaData.get('DATE', [''])[0]
        print year
        yearItem = QtGui.QTableWidgetItem(year)
        yearItem.setFlags(yearItem.flags() ^ QtCore.Qt.ItemIsEditable)

        currentRow = self.music_table.rowCount()
        print currentRow
        self.music_table.insertRow(currentRow)
        self.music_table.setItem(currentRow, 0, titleItem)
        self.music_table.setItem(currentRow, 1, artistItem)
        self.music_table.setItem(currentRow, 2, albumItem)
        self.music_table.setItem(currentRow, 3, yearItem)

        if not self.music_table.selectedItems():
            self.music_table.selectRow(0)
            self.mediaObject.setCurrentSource(self.metaInformationResolver.currentSource())

        source = self.metaInformationResolver.currentSource()
        index = self.sources.index(self.metaInformationResolver.currentSource()) + 1

        if len(self.sources) > index:
            self.metaInformationResolver.setCurrentSource(self.sources[index])
        else:
            self.music_table.resizeColumnsToContents()
            if self.music_table.columnWidth(0) > 300:
                self.music_table.setColumnWidth(0, 300)

    def handleButton(self, button):
        if button == self.play_button:
            print "play"
            self.mediaObject.play()
        elif button == self.pause_button:
            print "pause"
            self.mediaObject.pause()
        elif button == self.fast_forward_button:
            print "stop"
            self.mediaObject.stop()

    def stateChanged(self, newState, oldState):
        if newState == Phonon.ErrorState:
            if self.mediaObject.errorType() == Phonon.FatalError:
                QtGui.QMessageBox.warning(self, "Fatal Error",
                                          self.mediaObject.errorString())
            else:
                QtGui.QMessageBox.warning(self, "Error",
                                          self.mediaObject.errorString())
        elif newState == Phonon.PlayingState:
            self.play_button.setEnabled(False)
            self.pause_button.setEnabled(True)
            self.fast_forward_button.setEnabled(True)

        elif newState == Phonon.StoppedState:
            self.fast_forward_button.setEnabled(False)
            self.play_button.setEnabled(True)
            self.pause_button.setEnabled(False)
            self.lcdNumber.display("00:00")

        elif newState == Phonon.PausedState:
            self.pause_button.setEnabled(False)
            self.fast_forward_button.setEnabled(True)
            self.play_button.setEnabled(True)

    def setupActions(self):
        self.play_button.clicked.connect(lambda: self.handleButton(self.play_button))
        self.pause_button.clicked.connect(lambda: self.handleButton(self.pause_button))
        self.fast_forward_button.clicked.connect(lambda: self.handleButton(self.fast_forward_button))

        self.nextAction = QtGui.QAction(
            self.style().standardIcon(QtGui.QStyle.SP_MediaSkipForward),
            "Next", self, shortcut="Ctrl+N"
        )

        self.previousAction = QtGui.QAction(
            self.style().standardIcon(QtGui.QStyle.SP_MediaSkipBackward),
            "Previous", self, shortcut="Ctrl+R"
        )

    def retranslateUi(self, MainWindow):
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
        self.menuFile.setTitle(_translate("MainWindow", "asdasdsad", None))
        self.menuAbout.setTitle(_translate("MainWindow", "About", None))
        self.Dock_Settings.setWindowTitle(_translate("MainWindow", "Dock Items", None))
        self.likes_button.setText(_translate("MainWindow", "Likes", None))
        self.saved_settings_box.setTitle(_translate("MainWindow", "Saved settings", None))
        __sortingEnabled = self.list_settings.isSortingEnabled()
        self.list_settings.setSortingEnabled(False)
        item = self.list_settings.item(0)
        item.setText(_translate("MainWindow", "Settings 1", None))
        item = self.list_settings.item(1)
        item.setText(_translate("MainWindow", "Settings 2", None))
        item = self.list_settings.item(2)
        item.setText(_translate("MainWindow", "Settings 3", None))
        item = self.list_settings.item(3)
        item.setText(_translate("MainWindow", "Settings 4", None))
        self.list_settings.setSortingEnabled(__sortingEnabled)
        self.currently_playing_button.setText(_translate("MainWindow", "Currently Playing", None))
        self.collections_button.setText(_translate("MainWindow", "Collections", None))
        self.notifications_button.setText(_translate("MainWindow", "Camera On/Off", None))
        self.notifications_button.setShortcut(_translate("MainWindow", "Return", None))
        self.actionExit.setText(_translate("MainWindow", "Exit", None))
        self.actionAdd_Folder.setText(_translate("MainWindow", "Add Folder", None))
        self.actionAdd_File.setText(_translate("MainWindow", "Add File", None))
        self.actionPreference.setText(_translate("MainWindow", "Preference", None))

    def processTrigger(self, q):
        if q.text() == 'Exit':
            sys.exit()

    def onChanged(self, button):
        if button.isDown():
            if self._state == 0:
                self._state = 1
                button.setAutoRepeatInterval(100)
                print 'press'
            else:
                print 'repeat'
        elif self._state == 1:
            self._state = 0
            button.setAutoRepeatInterval(1000)
            print 'release'
        else:
            print 'click'

    def getBackColor(self):
        return self.currently_playing_button.palette().color(QtGui.QPalette.Button)

    def setBackColor(self, color):
        pal = self.notifications_button.palette()
        pal.setColor(QtGui.QPalette.Button, color)
        self.setPalette(pal)

    def addFiles(self):
        files = QtGui.QFileDialog.getOpenFileNames(self, "Select Music Files",
                                                   QtGui.QDesktopServices.storageLocation(
                                                       QtGui.QDesktopServices.MusicLocation))
        if not files:
            return

        index = len(self.sources)

        print files
        file = QtCore.QFile('musiclist.txt')
        file.open(QtCore.QFile.Append | QtCore.QFile.Text)
        if not file.open(QtCore.QIODevice.WriteOnly):
            QtGui.QMessageBox.information(None, 'info', file.errorString())

        for string in files:
            stream = QtCore.QTextStream(file)
            stream << string
            stream << '\n'
            self.sources.append(Phonon.MediaSource(string))

        print index
        if self.sources:
            self.metaInformationResolver.setCurrentSource(self.sources[index])

    # TODO: implementing the add folder function
    def addFolder(self):
        dialog = FileDialog()
        if dialog.exec_() == QtGui.QDialog.Accepted:
            print(dialog.selectedFiles())


    def retrieveMusicList(self):
        file = QtCore.QFile('musiclist.txt')
        file.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text)
        index = len(self.sources)

        # if not file.open(QtCore.QIODevice.WriteOnly):
        #     QtGui.QMessageBox.information(None, 'info', file.errorString())

        stream = QtCore.QTextStream(file)
        while not stream.atEnd():
            self.sources.append(Phonon.MediaSource(stream.readLine()))

        print index
        if self.sources:
            self.metaInformationResolver.setCurrentSource(self.sources[0])

    def changeBackground(self):
        color1 = QtGui.QColor(255, 0, 0)
        color2 = QtGui.QColor(0, 255, 0)

        self.color = QtCore.QPropertyAnimation(self.notifications_button, 'color')
        self.color.setStartValue(color1)
        # self.color.setKeyValueAt(0.5, color2)
        self.color.setEndValue(color2)
        self.color.setDuration(1000)
        # self.color.setLoopCount(-1)
        self.color.start()

    def setupUi(self):
        self.seekSlider.setMediaObject(self.mediaObject)
        self.volumeSlider.setAudioOutput(self.audioOutput)
        self.volumeSlider.setSizePolicy(QtGui.QSizePolicy.Maximum,
                                        QtGui.QSizePolicy.Maximum)
        palette = QtGui.QPalette()
        palette.setBrush(QtGui.QPalette.Light, QtCore.Qt.darkGray)

        self.lcdNumber.setPalette(palette)

        headers = ("Title", "Artist", "Album", "Year", "Liked?")
        self.music_table.setHorizontalHeaderLabels(headers)
        self.music_table.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
        self.music_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
        self.music_table.cellPressed.connect(self.tableClicked)

    def tableClicked(self, row, column):
        wasPlaying = (self.mediaObject.state() == Phonon.PlayingState)

        self.mediaObject.stop()
        self.mediaObject.clearQueue()

        self.mediaObject.setCurrentSource(self.sources[row])

        if wasPlaying:
            self.mediaObject.play()
        else:
            self.mediaObject.stop()

    def contextMenuEvent(self, QContextMenuEvent):
        self.menu = QtGui.QMenu(self)
        renameAction = QtGui.QAction('Like/Unlike', self)
        renameAction.triggered.connect(self.likeSlot)
        self.menu.addAction(renameAction)
        self.menu.popup(QtGui.QCursor.pos())

    def likeSlot(self):
        print self.music_table.currentRow()
        print self.sources[self.music_table.currentRow()]
        item = QtGui.QTableWidgetItem(str("Yes"))
        item_list = self.music_table.item(self.music_table.currentRow(), 4)
        if not item_list:
            self.music_table.setItem(self.music_table.currentRow(), 4, item)
        else:
            self.music_table.setItem(self.music_table.currentRow(), 4, None)

    def filterLike(self):
        for i in range(0, self.music_table.rowCount()):
            item = self.music_table.item(i, 4)
            if not item:
                self.music_table.setRowHidden(i, True)

    def filterCollections(self):
        for i in range(0, self.music_table.rowCount()):
            self.music_table.setRowHidden(i, False)

    def cameraShutdown(self):
        self.video_capture.release()
        time.sleep(1)
        cv2.destroyWindow('video')
        cv2.waitKey(1)

    def cameraStack(self):
        model_filename = "model_gender_working.pkl"
        image_size = (200,200)
        [images, labels, subject_names] = read_images("gender/", image_size)
        list_of_labels = list(xrange(max(labels)+1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_gender = load_model(model_filename)

        model_filename = "model_emotion.pkl"
        image_size = (200, 200)
        [images, labels, subject_names] = read_images("emotion/", image_size)
        list_of_labels = list(xrange(max(labels) + 1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_emotion = load_model(model_filename)

        faceCascade = 'haarcascade_frontalface_alt2.xml'
        print self.model_gender.image_size
        print "Starting the face detection"

        self.detector = CascadedDetector(cascade_fn=faceCascade, minNeighbors=5, scaleFactor=1.1)
        self.video_capture = cv2.VideoCapture(0)

        while True:
            ret, frame = self.video_capture.read()
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2), interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                self.x0 = x0
                self.y0 = y0
                self.x1 = x1
                self.y1 = y1
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model_gender.image_size, interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model_gender.predict(face)[0]
                emotion = self.model_emotion.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # Draw the predicted name (folder name...):
                self.distance = str(np.asscalar(np.int16(self.y0)))
                draw_str(imgout, (x0 - 20, y0 - 5), self.model_emotion.subject_names[emotion])
                draw_str(imgout, (x0 - 20, y0 - 20), self.model_gender.subject_names[prediction])
                draw_str(imgout, (x0 - 20, y0 - 35), "distance: " + self.distance + "cm")
                self.gender = self.model_gender.subject_names[prediction]
                self.changeSetting(self.currently_playing_button)
                self.changeSetting(self.notifications_button)
                self.changeSetting(self.likes_button)
                self.changeSetting(self.collections_button)
            cv2.imshow('video', imgout)
            ch = cv2.waitKey(10)
            if ch == 27:
                break

        # ret, frame = video_capture.read()
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #
        # faces = faceCascade.detectMultiScale(
        #     gray,
        #     scaleFactor=1.1,
        #     minNeighbors=5,
        #     minSize=(30, 30),
        #     flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        # )
        #
        # # Draw a rectangle around the faces
        # for (x, y, w, h) in faces:
        #     cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        #
        # # Display the resulting frame
        # cv2.imshow('Video', frame)
        #
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     sys.exit()

    def switchingCamera(self):
        if self._camera_state == 0:
            threads = [t for t in threading.enumerate()]
            self.cameraStack()
            # self.m = multiprocessing.Process(target=self.changeSetting)
            # self.m.start()
            # self.t = threading.Thread(target=self.cameraStack)
            # self.t.daemon = True
            # self.t.start()
            # print "\n\n\n\male"
            self._camera_state = 1
        else:
            self.cameraShutdown()
            self._camera_state = 0

    #TODO: Figuring out to animate transitively
    def changeSetting(self, button):
        int_distance = int(self.distance)
        if int_distance > 90:
            self.resize(640, 480)
            if self.gender == "male":
                if not button.styleSheet() == "background-color: #99ccff; border-radius: 5px; text-align:left;":
                    button.setStyleSheet("background-color: #99ccff; border-radius: 5px;text-align:left;")
                if not self.centralwidget.styleSheet() == "background-color: #ffcccc; border-radius: 5px; text-align:left":
                    self.centralwidget.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")
                if not self.dockWidgetContents.styleSheet() == "background-color: #ffcccc; border-radius: 5px; text-align:left":
                    self.dockWidgetContents.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")
            elif self.gender == "female":
                if not button.styleSheet() == "background-color: #ff9999; border-radius: 5px;text-align:left;":
                    button.setStyleSheet("background-color: #ff9999; border-radius: 5px;text-align:left;")
                if not self.centralwidget.styleSheet() == "background-color: #ff9999; border-radius: 5px;text-align:left;":
                    button.setStyleSheet("background-color: #ff9999; border-radius: 5px;text-align:left;")
                if not self.dockWidgetContents.styleSheet() == "background-color: #ff9999; border-radius: 5px;text-align:left;":
                    button.setStyleSheet("background-color: #ff9999; border-radius: 5px;text-align:left;")
        elif int_distance < 90:
            self.resize(1280, 720)
            if self.gender == "male":
                if not button.styleSheet() == "background-color: #99ccff; border-radius: 5px;text-align:left;":
                    button.setStyleSheet("background-color: #99ccff; border-radius: 5px;text-align:left;")
                # if not self.centralwidget.styleSheet() == "background-color: #ccffff; border-radius: 5px; text-align:left":
                #     self.centralwidget.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")
                # if not self.dockWidgetContents.styleSheet() == "background-color: #ccffff; border-radius: 5px; text-align:left":
                #     self.dockWidgetContents.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")
            elif self.gender == "female":
                if not button.styleSheet() == "background-color: #ff9999; border-radius: 5px;text-align:left;":
                    button.setStyleSheet("background-color: #ff9999; border-radius: 5px;text-align:left;")
                # if not self.centralwidget.styleSheet() == "background-color: #ffcccc; border-radius: 5px; text-align:left":
                #     self.centralwidget.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")
                # if not self.dockWidgetContents.styleSheet() == "background-color: #ffcccc; border-radius: 5px; text-align:left":
                #     self.dockWidgetContents.setStyleSheet("background-color: #ccffff; border-radius: 5px; text-align:left")

    #TODO: Try with returning self.animation.start()
    def displayStack(self):
        if self.stackedWidget.currentIndex() == 1:
            self.stackedWidget.setCurrentIndex(0)
        else:
            self.stackedWidget.setCurrentIndex(1)

    backColor = QtCore.pyqtProperty(QtGui.QColor, getBackColor, setBackColor)
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.cam = create_capture(camera_id)

    def run(self):
        count = 0
        inc = 0
        noface = 0
        name = " "
        name1 = " "
        while True:
            ret, frame = self.cam.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                print face
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.model.image_size,
                                  interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # Draw the predicted name (folder name...):
                draw_str(imgout, (x0 - 20, y0 - 20),
                         self.model.subject_names[prediction])
            cv2.imshow('videofacerec', imgout)
            #count=count+1
            try:
                name = str(self.model.subject_names[prediction])
                #speak(str(name))
            except:
                noface = noface + 1
                if noface == 20:
                    #speech.say("no face found")
                    noface = 0
                    continue
                continue
            if name == name1:
                inc = inc + 1
                count = count + 1
            else:
                inc = 0
                count = count + 1
            if inc == 10:
                #speech.say("hi"+str(name))
                name = " "
                name1 = " "
                inc = 0
                count = 0
                continue
            if count == 20:
                #speech.say("Hi User. Please tell your name.")
                count = 0
            name1 = name
            #if count==3:
            #    #speak("hi"+str(self.model.subject_names[prediction]))
            #    count=0
            #    #break
            # Show image & exit on escape:
            ch = cv2.waitKey(10)
            if ch == 27:
                break
class App(object):
    def __init__(self, model, camera_id, cascade_filename):
        self.model = model
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        self.cam = create_capture(camera_id)

        self.cap = cv2.VideoCapture('http://localhost:8080/stream.ogg')

        self.fajl = open("../web/data.txt", 'w+')

    def run(self):
        while True:
            #ret, frame = self.cam.read()
            ret, frame = self.cap.read()
            # Resize the frame to half the original size for speeding up the detection process:
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            if (self.detector.detect(img)).size == 0:
                self.fajl = open("../web/data.txt", 'w+')
                self.fajl.write('')
                self.fajl.close()
                print "ao"
            else:
                for i, r in enumerate(self.detector.detect(img)):
                    x0, y0, x1, y1 = r
                    # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                    face = img[y0:y1, x0:x1]
                    face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                    face = cv2.resize(face,
                                      self.model.image_size,
                                      interpolation=cv2.INTER_CUBIC)
                    # Get a prediction from the model:
                    prediction = self.model.predict(face)[0]
                    # Draw the face area in image:
                    cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                    #print  str( (x0,y0)  ) +" cao: " + str( (x1,y1) )  #OVO STAMPAMO
                    #else:
                    #	print "nema"
                    #print x0.size
                    #320x240
                    #print '{"name":"'+ self.model.subject_names[prediction]+'","x0":"'+ str(x0) +'", "y0":"'+ str(y0) +'", "x1":"'+ str(x1) +'", "y1":"'+ str(y1) +'"} '
                    print "cao"

                    # Draw the predicted name (folder name...):
                    draw_str(imgout, (x0 - 20, y0 - 20),
                             self.model.subject_names[prediction])
                    #print self.model.subject_names[prediction] #STAMPAMO IME
                    self.fajl = open("../web/data.txt", 'w+')

                    self.fajl.write('{"pname":"' +
                                    self.model.subject_names[prediction] +
                                    '","x0":"' + str(x0) + '", "y0":"' +
                                    str(y0) + '", "x1":"' + str(x1) +
                                    '", "y1":"' + str(y1) + '"}')
                    self.fajl.close()


#OVO GASIMO?            cv2.imshow('videofacerec', imgout)
# Show image & exit on escape:
            ch = cv2.waitKey(10)
            if ch == 27:
                break