Exemple #1
0
    def on_query_completions(view, prefix, locations):

        logging.debug("on_query_completions view id is %s " % view.buffer_id())
        if not Tools.is_valid_view(view):
            logging.debug(" view id %s is invalid" % view.buffer_id())
            return Tools.SHOW_DEFAULT_COMPLETIONS

        if not completer:
            logging.debug(" completer is invalid")
            return Tools.SHOW_DEFAULT_COMPLETIONS

        if completer.async_completions_ready:
            completer.async_completions_ready = False
            #logging.debug(" completions result is %s", completer.completions)
            return (completer.completions)

        # Verify that character under the cursor is one allowed trigger
        pos_status = Tools.get_position_status(locations[0], view, settings)
        if pos_status == PosStatus.WRONG_TRIGGER:
            return Tools.HIDE_DEFAULT_COMPLETIONS
        if pos_status == PosStatus.COMPLETION_NOT_NEEDED:
            logging.debug(" show default completions")
            return Tools.SHOW_DEFAULT_COMPLETIONS

        logging.debug(" starting async auto_complete at pos: %s" %
                      locations[0])
        completion_thread = Thread(
            target=completer.complete,
            args=[view, locations[0]])
        completion_thread.deamon = True
        completion_thread.start()

        logging.debug(" show default completions last")
        return Tools.HIDE_DEFAULT_COMPLETIONS
Exemple #2
0
def rpc_server(handler, address, authkey):
    sock = Listener(address, authkey = authkey)
    while True:
        client = sock.accept()
        t = Thread(target=handler.handle_connection, args=(client,))
        t.deamon = True
        t.start()
Exemple #3
0
def test_make_fetcher():
    symmetric362 = SPHERE_FILES['symmetric362']
    with TemporaryDirectory() as tmpdir:
        stored_md5 = fetcher._get_file_md5(symmetric362)

        # create local HTTP Server
        testfile_url = op.split(symmetric362)[0] + os.sep
        test_server_url = "http://127.0.0.1:8000/"
        print(testfile_url)
        print(symmetric362)
        current_dir = os.getcwd()
        # change pwd to directory containing testfile.
        os.chdir(testfile_url)
        server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
        server_thread = Thread(target=server.serve_forever)
        server_thread.deamon = True
        server_thread.start()

        # test make_fetcher
        sphere_fetcher = fetcher._make_fetcher("sphere_fetcher",
                                               tmpdir, test_server_url,
                                               [op.split(symmetric362)[-1]],
                                               ["sphere_name"],
                                               md5_list=[stored_md5])

        sphere_fetcher()
        assert op.isfile(op.join(tmpdir, "sphere_name"))
        npt.assert_equal(fetcher._get_file_md5(op.join(tmpdir, "sphere_name")),
                         stored_md5)

        # stop local HTTP Server
        server.shutdown()
        # change to original working directory
        os.chdir(current_dir)
def dsffsf():
    global process    
    if not process:
        process = Thread(target = background_thread)
        process.deamon = True
        process.start()

    return "OK", 200
Exemple #5
0
def test_get_clients():
    from multiplayer.server import main as server_main
    from threading import Thread
    t = Thread(target = server_main)
    t.deamon = True
    t.start()
    c = Client(UDPEndpoint(('localhost', 6028)))
    c2 = Client(UDPEndpoint(('localhost', 6028)))
    return c, c2
Exemple #6
0
    def broadcast_command(self, command):
        def repeat_pub():
            for i in xrange(10):
                self.publisher.send(pickle.dumps(command))
                time.sleep(1)

        t = Thread(target=repeat_pub)
        t.deamon = True
        t.start()
        return t
Exemple #7
0
 def process(self, state):
     """Do whatever you need with the state object state"""
     self.last_state = self.current_state
     self.current_state = state
     world = World(state)
     self.policy = Policy(world)
     thread = Thread(target=self.policy.policyIteration)
     thread.deamon = True
     thread.start()
     self.thread = thread
Exemple #8
0
def parallelSort(l,num_threads):
    threads = []
    chunked_list = chunkList(l,num_threads)
    for chunk in chunked_list:
        t = Thread(target=sortList, args=(chunk,))
        t.deamon = True
        t.start()
        threads.append(t)

    for t in threads:
        t.join()

    mergeSortedLists(chunked_list,write_list=l)
Exemple #9
0
    def streamResponse():
        texFile = ""
        (fRoot, fExt) = os.path.splitext(f)
        if fExt == ".zip":
            yield println("Extracting contents of archive...")
            os.system("unzip %s -d %s" % (fPath, fDir))
            texFile = find_tex(fDir)
        elif fExt == ".tex":
            texFile = fPath
        else:
            yield println("File format not recognized: %s" % fExt)

        if texFile == "":
            yield println("No input file found.")
        else:
            epubFile = "%s.epub" % fRoot
            epubPath = os.path.join(dir_uuid(app.config["DOWNLOAD_FOLDER"], fileid), epubFile)
            yield println("Received file %s, converting to %s" % (os.path.basename(texFile), epubFile))
            # Launch conversion script
            proc = subprocess.Popen(
                ["python", "-u", "tex2ebook.py", "-o", epubPath, texFile],
                stdout=subprocess.PIPE,
                stderr=open(os.devnull, "w"),
            )
            # Start separate thread to read stdout
            q = Queue()
            t = Thread(target=enqueue_output, args=(proc.stdout, q))
            t.deamon = True
            t.start()
            start = time.time()
            running = True
            # Read stdout from other thread every second, stop if timeout
            while running and time.time() - start < app.config["TIMEOUT"]:
                running = proc.poll() is None
                try:
                    line = q.get(timeout=1)
                except Empty:
                    line = ""
                else:
                    yield println(line.rstrip())

            if running:
                yield println("Timed out.")
                # Kill subprocess if timeout occurred
                proc.kill()
            else:
                yield println()
                yield println("Done.")
                # Redirect to result file
                yield '<script type="text/javascript"> $(document).ready(function() { window.location.href = "%s"; }); </script>' % epubPath
                yield 'If the download does not start automatically, please click <a href="%s">here</a>.' % epubPath
Exemple #10
0
    def run(self, daemon=True):
        """Executes the command. A thread will be started to collect
        the outputs (stderr and stdout) from that command.
        The outputs will be written to the queue.

        :return: self
        """
        self.process = Popen(self.command, bufsize=1,
                             stdin=PIPE, stdout=PIPE, stderr=STDOUT)
        thread = Thread(target=self._queue_output,
                        args=(self.process.stdout, self.queue))
        thread.deamon = daemon
        thread.start()
        return self
    def on_query_completions(view, prefix, locations):
        """Function that is called when user queries completions in the code

        Args:
            view (sublime.View): current view
            prefix (TYPE): Description
            locations (list[int]): positions of the cursor (first if many).

        Returns:
            sublime.Completions: completions with a flag
        """
        log.debug(" on_query_completions view id %s", view.buffer_id())

        if not Tools.is_valid_view(view):
            return Tools.SHOW_DEFAULT_COMPLETIONS

        if not completer:
            return Tools.SHOW_DEFAULT_COMPLETIONS

        if completer.async_completions_ready:
            completer.async_completions_ready = False
            return (completer.completions, sublime.INHIBIT_WORD_COMPLETIONS |
                    sublime.INHIBIT_EXPLICIT_COMPLETIONS)

        # Verify that character under the cursor is one allowed trigger
        pos_status = Tools.get_position_status(locations[0], view, settings)
        if pos_status == PosStatus.WRONG_TRIGGER:
            # we are at a wrong trigger, remove all completions from the list
            return Tools.HIDE_DEFAULT_COMPLETIONS
        if pos_status == PosStatus.COMPLETION_NOT_NEEDED:
            # show default completions for now if allowed
            if settings.hide_default_completions:
                log.debug(" hiding default completions")
                return Tools.HIDE_DEFAULT_COMPLETIONS
            return Tools.SHOW_DEFAULT_COMPLETIONS

        # create a daemon thread to update the completions
        log.debug(" starting async auto_complete at pos: %s", locations[0])
        completion_thread = Thread(
            target=completer.complete,
            args=[view, locations[0], settings.errors_on_save])
        completion_thread.deamon = True
        completion_thread.start()

        # show default completions for now if allowed
        if settings.hide_default_completions:
            log.debug(" hiding default completions")
            return Tools.HIDE_DEFAULT_COMPLETIONS
        return Tools.SHOW_DEFAULT_COMPLETIONS
 def __init__(self,**kwargs):
     self.workers=[]
     self.echonest=None
     
     self.controller=kwargs.get("controller",None)
     self.echonest=echonest(kwargs.get("echo_nest_api_key",None))
     workers=kwargs.get("workers",1)
     
     
     Queue.Queue.__init__(self,100)
     
     for i in range(workers):
         t=Thread(target=self.worker)
         t.deamon=True
         t.start()
         self.workers.append(t)
	def run(self, daemon=True):
		"""
		Executes the command. A thread will be started to collect
		the outputs (stderr and stdout) from that command.
		The outputs will be written to the queue.
		"""
		# Start the external process
		if self.status == "ready":
			self.process = Popen(self.command, bufsize=1, shell=True, env=self.environment,
								stdin=PIPE, stdout=PIPE, stderr=STDOUT)
			# Prepare and start a thread to continiously read the output from the process
			thread = Thread(target=self._queue_output,
							args=(self.process.stdout, self.queue))
			thread.deamon = daemon
			thread.start()
		# Return self as the iterator object
		return self
Exemple #14
0
def test_fetch_data():
    symmetric362 = SPHERE_FILES['symmetric362']
    with TemporaryDirectory() as tmpdir:
        md5 = fetcher._get_file_md5(symmetric362)
        bad_md5 = '8' * len(md5)

        newfile = op.join(tmpdir, "testfile.txt")
        # Test that the fetcher can get a file
        testfile_url = symmetric362
        print(testfile_url)
        testfile_dir, testfile_name = op.split(testfile_url)
        # create local HTTP Server
        test_server_url = "http://127.0.0.1:8001/" + testfile_name
        current_dir = os.getcwd()
        # change pwd to directory containing testfile.
        os.chdir(testfile_dir + os.sep)
        # use different port as shutdown() takes time to release socket.
        server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)
        server_thread = Thread(target=server.serve_forever)
        server_thread.deamon = True
        server_thread.start()

        files = {"testfile.txt": (test_server_url, md5)}
        fetcher.fetch_data(files, tmpdir)
        npt.assert_(op.exists(newfile))

        # Test that the file is replaced when the md5 doesn't match
        with open(newfile, 'a') as f:
            f.write("some junk")
        fetcher.fetch_data(files, tmpdir)
        npt.assert_(op.exists(newfile))
        npt.assert_equal(fetcher._get_file_md5(newfile), md5)

        # Test that an error is raised when the md5 checksum of the download
        # file does not match the expected value
        files = {"testfile.txt": (test_server_url, bad_md5)}
        npt.assert_raises(fetcher.FetcherError,
                          fetcher.fetch_data, files, tmpdir)

        # stop local HTTP Server
        server.shutdown()
        # change to original working directory
        os.chdir(current_dir)
def main():
    host = "10.6.161.65"
    port = 8880

    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.bind((host, port))

    connection_addr = Upd_client_address_container()

    d = Thread(target=wait_response, args=(s, connection_addr,))
    d.deamon = True
    d.start()

    print("Server started")
    message = 'Connection with server established'
    while message != 'q':
        if connection_addr.addr:
            s.sendto(message.encode('utf-8'), connection_addr.addr)
            message = raw_input("->")
    s.close()
Exemple #16
0
    def parallelize(self, seconds_to_wait=2):
        """Start a parallel thread for receiving messages.

        If :meth:`start` was no called before, start will be called in the
        thread.
        The thread calls :meth:`receive_message` until the :attr:`state`
        :meth:`~AYABInterface.communication.states.State.is_connection_closed`.

        :param float seconds_to_wait: A time in seconds to wait with the
          parallel execution. This is useful to allow the controller time to
          initialize.

        .. seealso:: :attr:`lock`, :meth:`runs_in_parallel`
        """
        with self.lock:
            thread = Thread(target=self._parallel_receive_loop,
                            args=(seconds_to_wait,))
            thread.deamon = True
            thread.start()
            self._thread = thread
    def on_query_completions(view, prefix, locations):
        """Function that is called when user queries completions in the code

        Args:
            view (sublime.View): current view
            prefix (TYPE): Description
            locations (list[int]): positions of the cursor (first if many).

        Returns:
            sublime.Completions: completions with a flag
        """
        log.debug(" on_query_completions view id %s", view.id())
        if view.is_scratch():
            return None

        if not Tools.is_valid_view(view):
            return None

        if completer.async_completions_ready:
            completer.async_completions_ready = False
            return (completer.completions, sublime.INHIBIT_WORD_COMPLETIONS)

        # Verify that character under the cursor is one allowed trigger
        if not Tools.needs_autocompletion(locations[0], view, settings):
            # send None and show completions from other plugins if available
            return None

        log.debug(" starting async auto_complete at pos: %s", locations[0])

        # create a daemon thread to update the completions
        completion_thread = Thread(
            target=completer.complete,
            args=[view, locations[0], settings.errors_on_save])
        completion_thread.deamon = True
        completion_thread.start()

        # show default completions for now
        return None
Exemple #18
0
    def test_partial_logs(self):
        threads = []
        proc = IOProcess(timeout=1, max_threads=10)
        proc._sublog = FakeLogger()

        def worker():
            for i in range(100):
                proc.stat(__file__)

        try:
            for i in range(4):
                t = Thread(target=worker)
                t.deamon = True
                t.start()
                threads.append(t)
        finally:
            for t in threads:
                t.join()
            proc.close()

        for msg in proc._sublog.messages:
            self.assertFalse('DEBUG|' in msg,
                             "Raw log data in log message: %r" % msg)
    def main_run(self):
        #1.
        #6. make trained data
        np.random.seed(9)
        power, nomal, short = mtd.start(
            25
        )  #actually this three values aren't used now. (if you use this, you can do the plotting)
        #The array the actual test data is placed.
        test_data = []  #테스트 데이터
        #The array the actual labeld data of test data is placed.
        result_data = []  # 테스트 데이터의 실제 라벨 데이터가 배치 된 배열
        #For calculate fps
        prev_time = 0

        #7.
        print("loading facial landmark predictor...")
        detector = dlib.get_frontal_face_detector()  #얼굴을 감지하는 감지기
        predictor = dlib.shape_predictor(
            "drow/shape_predictor_68_face_landmarks.dat"
        )  # 이 프로젝트에서 얼굴에 각 점을 표시하는 모델. rio 얼굴에 대한 68개의 점의 좌표 값을 가지고 있다.

        (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[
            "left_eye"]  # 얼굴에 관한 랜드마크 중에서 왼쪽 눈을 선택. 각 shape 내에서 어디서부터 시작해서 어디까지가 눈인지 위치 값들이 들어있다.
        (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[
            "right_eye"]  # 얼굴에 관한 랜드마크 중에서 오른쪽 눈을 선택
        # 키값을 이용해서 얼굴중 키에 해당하는 값의 좌표를 가지고 온다.

        #8.
        print("starting video stream thread...")

        vs = cv2.VideoCapture(0)

        self.camera = vs
        time.sleep(1.0)

        #9.
        self.th_open = Thread(target=self.init_open_ear)
        self.th_open.deamon = True
        self.th_open.start()
        self.th_close = Thread(target=self.init_close_ear)
        self.th_close.deamon = True
        self.th_close.start()

        FPS = 60  # 현재 원하는  프레임 수
        prev_time = 0
        #####################################################################################################################
        try:
            while self.trigger is not None:

                ret, frame = vs.read()
                current_time = time.time() - prev_time

                if (ret is not True) or (current_time <= 1. / FPS):
                    continue

                prev_time = time.time()

                frame = cv2.flip(frame, 1)

                L, gray = lr.light_removing(frame)

                rects = detector(gray, 0)

                for rect in rects:  #인식된 얼굴 갯 수 만큼
                    shape = predictor(gray, rect)  # 인식된 얼굴의 68개의 좌표값들을 얻는다.
                    shape = face_utils.shape_to_np(
                        shape)  # 68개의 좌표 값을 넘파이 배열로 받아온다.

                    leftEye = shape[lStart:
                                    lEnd]  # 68개의 좌표 중에서 왼쪽 눈에 해당하는 좌표 값들만 모은다.
                    rightEye = shape[
                        rStart:rEnd]  # 68개의 좌표 값 중에서 오른쪽 눈에 해당하는 좌표 값들만 모은다.
                    # 눈을 감았는지 확인하기 위한 자료
                    leftEAR = self.eye_aspect_ratio(leftEye)
                    rightEAR = self.eye_aspect_ratio(rightEye)

                    #(leftEAR + rightEAR) / 2 => both_ear.
                    self.both_ear = (
                        leftEAR + rightEAR
                    ) * 500  #I multiplied by 1000 to enlarge the scope.
                    # 눈을 감았는지 떴는지에 대한 확실한 근거

                    leftEyeHull = cv2.convexHull(leftEye)
                    rightEyeHull = cv2.convexHull(rightEye)
                    cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                    cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0),
                                     1)  #블록 껍질을 계삲하고 오버레이에 껍질을 그린다. 눈주위에.

                    if self.both_ear < self.EAR_THRESH:  #경계값과 비교해서 눈을 감았는지 떴는지 판단, 경계 값보다 작을 때, 즉 눈을 감았을 때
                        if not self.TIMER_FLAG:  #처음으로 눈을 감았다고 인식 했을 때만
                            start_closing = timeit.default_timer()
                            self.TIMER_FLAG = True
                        self.COUNTER += 1  #눈을 감은 횟수를 증가 시킨다.

                        if self.COUNTER >= self.EAR_CONSEC_FRAMES:  # 눈을 감은 횟수가 20번 보다 많으면.

                            mid_closing = timeit.default_timer(
                            )  # mid 라는 단어가 들어간거 보면 중간 단계의 눈을 감고 있는 시간을 체크해 주는 건가.
                            closing_time = round(
                                (mid_closing - start_closing), 3
                            )  # 카메라를 통해서 지켜 보았을 때 존다고 판단한 횟수가 20번이 넘으면 얼마나 눈을 감고 있었는지 시간을 구한다.
                            #정확히 존다고 판단이 되기까지의 시간

                            #졸고 있다고 판단했고 아직 눈을 감은 채이다.
                            if closing_time >= self.RUNNING_TIME:  #졸았다는 가정하에, 졸았다고 판단하기까지의 시간이 러닝 타임보다 클때, 애초에 눈을 계속 감고 있으면 러닝 타임 보다 커질 것이다. 그러면 계속 알람은 울리게 된다.
                                if self.RUNNING_TIME == 0:  # 맨 처음 졸았을 때인가. #처음 으로 알람이 울릴때
                                    CUR_TERM = timeit.default_timer(
                                    )  # 처음 부터 해서 졸기까지의 텀을 아래에서 계산한다.
                                    OPENED_EYES_TIME = round(
                                        (CUR_TERM - self.PREV_TERM),
                                        3)  #졸아서 알람이 울리고 완전히 꺼진 뒤로 부터 얼마나 걸렸는지
                                    self.PREV_TERM = CUR_TERM  #졸았던 시간 기록, 정확히는 처음 알람을 울리기로 정한 시간을 기록
                                    self.RUNNING_TIME = 1.75  #알람이 울리는 시간 설정

                                self.RUNNING_TIME += 2
                                self.ALARM_FLAG = True  #알람을 킬수 있게 플래그를 참으로 변경
                                self.ALARM_COUNT += 1  #알람 횟수를 1 늘린다.

                                print("{0}st ALARM".format(
                                    self.ALARM_COUNT))  # 알람이 몇번째 인지
                                print(
                                    "The time eyes is being opened before the alarm went off :",
                                    OPENED_EYES_TIME)  #알람이 울리기 전에 눈이 뜨는 시간
                                print("closing time :",
                                      closing_time)  # 눈을 감고 있는 시간
                                t = Thread(target=self.wake_up,
                                           args=())  #알람을 울리는 쓰레드를 작동 시킨다.
                                t.deamon = True
                                t.start()
                            cv2.rectangle(frame, (rect.left(), rect.top()),
                                          (rect.right(), rect.bottom()),
                                          (0, 0, 255), 3)
                            cv2.putText(frame, "You are Drowsy", (300, 130),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (0, 0, 255), 2)

                    else:  #졸지 않았을때, 경계값보다 클 때
                        self.COUNTER = 0  #눈을 감았을때의 세는 카운터 리셋. 눈을 계속 감고 있어야 졸았다고 판단한다. 눈이 떠져서 경계값이 넘어가면 카운터가 리셋된다. 이 카운터가 20이 넘어가야 졸았다고 판단한다.
                        self.TIMER_FLAG = False  #타이머 플래그를 리셋한다.
                        self.RUNNING_TIME = 0  # 러닝 타임 역시 리셋

                        if self.ALARM_FLAG:  #알람 플래그가 참일때만, 즉 알람이 울고있다라는 건가
                            end_closing = timeit.default_timer()
                            (self.closed_eyes_time).append(
                                round((end_closing - start_closing), 3)
                            )  #즉 눈을 뜬 시간이다. 알람이 울리고 눈을 뜬 시간. 좀더 정확히는 전체 적으로 눈을 감고있던 시간이다.
                            print("The time eyes were being offed :",
                                  (self.closed_eyes_time))

                        self.ALARM_FLAG = False  #알람이 울리지 않게 알람 플래그 값을 거짓으로 변경. 좀더 정확히는 자고 있던중 눈을 감고 있는 시간을 계산하는 등의 계산에 영향을 주지 않게 false로 변경한다.

                        cv2.putText(frame,
                                    "EAR : {:.2f}".format(self.both_ear),
                                    (300, 130), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (200, 30, 20), 2)
                    #인식된 얼굴에 한해서 눈을 감고 있나 뜨고 있나에 대한 눈꺼풀 사이의 거리를 표시해 준다.

                ret, jpeg = cv2.imencode('.jpg', frame)
                self.frame = jpeg
                self.check = True

        except Exception as ex:
            print('let out', ex)
Exemple #20
0
def Yawn():
    def sound_alarm(path):
        # play an alarm sound
        playsound.playsound(path)

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-a",
                    "--alarm",
                    type=str,
                    default="",
                    help="path alarm .WAV file")
    args = vars(ap.parse_args())

    PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(PREDICTOR_PATH)
    #cascade_path='haarcascade_frontalface_default.xml'
    #cascade = cv2.CascadeClassifier(cascade_path)
    detector = dlib.get_frontal_face_detector()

    def get_landmarks(im):
        rects = detector(im, 1)

        if len(rects) > 1:
            return "error"
        if len(rects) == 0:
            return "error"
        return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])

    def annotate_landmarks(im, landmarks):
        im = im.copy()
        for idx, point in enumerate(landmarks):
            pos = (point[0, 0], point[0, 1])
            cv2.putText(im,
                        str(idx),
                        pos,
                        fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
                        fontScale=0.4,
                        color=(0, 0, 255))
            cv2.circle(im, pos, 3, color=(0, 255, 255))
        return im

    def top_lip(landmarks):
        top_lip_pts = []
        for i in range(50, 53):
            top_lip_pts.append(landmarks[i])
        for i in range(61, 64):
            top_lip_pts.append(landmarks[i])
        top_lip_all_pts = np.squeeze(np.asarray(top_lip_pts))
        top_lip_mean = np.mean(top_lip_pts, axis=0)
        return int(top_lip_mean[:, 1])

    def bottom_lip(landmarks):
        bottom_lip_pts = []
        for i in range(65, 68):
            bottom_lip_pts.append(landmarks[i])
        for i in range(56, 59):
            bottom_lip_pts.append(landmarks[i])
        bottom_lip_all_pts = np.squeeze(np.asarray(bottom_lip_pts))
        bottom_lip_mean = np.mean(bottom_lip_pts, axis=0)
        return int(bottom_lip_mean[:, 1])

    def mouth_open(image):
        landmarks = get_landmarks(image)

        if landmarks == "error":
            return image, 0

        image_with_landmarks = annotate_landmarks(image, landmarks)
        top_lip_center = top_lip(landmarks)
        bottom_lip_center = bottom_lip(landmarks)
        lip_distance = abs(top_lip_center - bottom_lip_center)
        return image_with_landmarks, lip_distance

        #cv2.imshow('Result', image_with_landmarks)
        #cv2.imwrite('image_with_landmarks.jpg',image_with_landmarks)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()

    cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
    yawns = 0
    yawn_status = False

    while True:
        ret, frame = cap.read()
        image_landmarks, lip_distance = mouth_open(frame)

        prev_yawn_status = yawn_status

        if lip_distance > 55:
            yawn_status = True

            cv2.putText(frame, "Subject is yawning", (50, 450),
                        cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
            # check to see if an alarm file was supplied,
            # and if so, start a thread to have the alarm
            # sound played in the background
            if args["alarm"] != "":
                t = Thread(target=sound_alarm, args=(args["alarm"], ))
                t.deamon = True
                t.start()

            output_text = " Yawn Count: " + str(yawns + 1)

            cv2.putText(frame, output_text, (50, 50), cv2.FONT_HERSHEY_COMPLEX,
                        1, (0, 255, 127), 2)
            if yawns >= 3:
                cv2.putText(frame, "Drowsiness Detected", (150, 150),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 127), 2)
                p = sms.snd()
                yawns = 0

        else:
            yawn_status = False

        if prev_yawn_status == True and yawn_status == False:
            yawns += 1

        cv2.imshow('Live Landmarks', image_landmarks)
        cv2.imshow('Yawn Detection', frame)

        if cv2.waitKey(1) == ord("q"):  #13 is the Enter Key
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #21
0
                    if (IOT.stateAir1On == True
                        ):  # Si el estado está definido en ON
                        print 'D'
                        self.sendSignalAir1On(False)  # Apagar aire
                        self.setStateAir1On(False)  # Setear en base el estado
                elif (IOT.stateAir1On == False):
                    print 'E'
                    self.sendSignalAir1On(True)
                    self.setStateAir1On(True)
            elif (IOT.stateAir1On == True):
                print 'F'
                self.sendSignalAir1On(False)
                self.setStateAir1On(False)

            self.initStateAir1()
            print 'antes de sleep'
            time.sleep(4)


# START
print('START !')
iot = IOT()

subprocess_configuration1 = Thread(target=iot.checkConfiguration1)
subprocess_configuration1.deamon = True
subprocess_configuration1.start()

subprocess_control1 = Thread(target=iot.controlAir1)
subprocess_control1.deamon = True
subprocess_control1.start()
Exemple #22
0
 def run(self):
    t = Thread(target=self.do_work)
    t.deamon = True
    t.start()
    self.serve(on_message)
    t.join()
Exemple #23
0
    def process_frame(self, frame=None, eyethresh=0, mouththresh=0):
        global st, er, mr

        #Detect faces in frame
        faces_loc = self.ddestimator.detect_faces(frame, None, True)

        cv2.putText(frame,
                    "TEDDY HACKERS", (20, 25),
                    cv2.FONT_HERSHEY_TRIPLEX,
                    0.9, (36, 36, 240),
                    thickness=2)

        #If there's more than one face...
        if len(faces_loc) > 0:

            #Only interested in first face found (for his demo)
            face_loc = faces_loc[0]

            #Predict coordinates of 68 points of this face using ML trained model
            points = self.ddestimator.pred_points_on_face(frame, face_loc)

            #All immediate estimations based on points locations
            euler, rotation, translation = self.ddestimator.est_head_dir(
                points)

            if demo1.CALIBRATE_CAMERA_ANGLES and not self.ddestimator.has_calibrated:
                has_calibration, _, meds = self.ddestimator.get_med_eulers()
                if has_calibration:
                    self.ddestimator.calibrate_camera_angles(meds)
            _, _, gaze_D = self.ddestimator.est_gaze_dir(points)
            ear_B, ear_R, ear_L = self.ddestimator.est_eye_openness(points)

            mar = self.ddestimator.est_mouth_openess(points)
            print("MAR", mar)

            if abs(st - int(round(time.time()))) < 8:

                if er < ear_B:
                    er = ear_B

                if mr == 0:
                    mr = mar

                elif mr > mar:
                    mr = mar

                return frame

            #All timescale estimations based on points locations
            head_distraction, _, _ = self.ddestimator.est_head_dir_over_time()
            if not head_distraction:
                gaze_distraction, _, _ = self.ddestimator.est_gaze_dir_over_time(
                )
            else:
                gaze_distraction = False
            eye_drowsiness, _, _, eye_closedness = self.ddestimator.get_eye_closedness_over_time(
                ear_threshold=eyethresh)
            did_yawn, _, _, _ = self.ddestimator.get_mouth_openess_over_time(
                mar_threshold=mouththresh)
            #########################################
            ##########################################
            global alarm_frames_threshold, aft, last_alert, talking_frames_threshold, talking_theshold, right, left

            if mar >= mr and mar < mouththresh:
                talking_theshold += 2
            else:
                talking_theshold -= 1

            if talking_theshold > talking_frames_threshold:
                cv2.putText(frame,
                            "Talking",
                            (frame.shape[1] // 2, frame.shape[0] // 2),
                            cv2.FONT_HERSHEY_COMPLEX,
                            0.8, (8, 8, 183),
                            thickness=2)

            if (eye_drowsiness and eye_closedness) or did_yawn:
                aft += 2
            if head_distraction or gaze_distraction:
                aft += 1

            t = Thread(target=fun)
            if aft > alarm_frames_threshold:
                if abs(last_alert - int(round(time.time()))) > 7:

                    t.deamon = True
                    t.start()
                    t2 = Thread(target=textsms)
                    t2.deamon = True
                    #t2.start()
                    last_alert = int(round(time.time()))
                aft = 0

            kss = self.ddestimator.calc_kss()
            if kss is not None:
                print("\t%.2f" % (kss * 10))

            #Show results on frame
            if self.show_points:
                frame = self.ddestimator.draw_points_on_face(
                    frame, points, (0, 0, 255))

            if self.show_bounding:
                bc_2d_coords = self.ddestimator.proj_head_bounding_cube_coords(
                    rotation, translation)
                frame = self.ddestimator.draw_bounding_cube(
                    frame, bc_2d_coords, (250, 149, 104), euler)

            if self.show_gaze:

                if right and left:
                    ("no warning")

                elif right:
                    if gaze_D <= 0:
                        gl_2d_coords = self.ddestimator.proj_gaze_line_coords(
                            rotation, translation, gaze_D)
                        self.ddestimator.draw_gaze_line(
                            frame, gl_2d_coords, (0, 70, 150), gaze_D)
                elif left:
                    if gaze_D > 0:
                        gl_2d_coords = self.ddestimator.proj_gaze_line_coords(
                            rotation, translation, gaze_D)
                        self.ddestimator.draw_gaze_line(
                            frame, gl_2d_coords, (0, 70, 150), gaze_D)

                else:
                    gl_2d_coords = self.ddestimator.proj_gaze_line_coords(
                        rotation, translation, gaze_D)
                    self.ddestimator.draw_gaze_line(frame, gl_2d_coords,
                                                    (0, 70, 150), gaze_D)

            if self.show_ear:
                frame = self.ddestimator.draw_eye_lines(
                    frame, points, ear_R, ear_L)

            if self.show_mar:
                frame = self.ddestimator.draw_mouth(frame, points, mar)

            if self.show_dd:
                h = frame.shape[0]
                if head_distraction:
                    print()

                elif self.show_gaze and gaze_distraction:
                    cv2.putText(frame,
                                "Distration due to Gaze",
                                (frame.shape[1] // 2, h - 60),
                                cv2.FONT_HERSHEY_COMPLEX,
                                0.8, (8, 8, 183),
                                thickness=2)
                if did_yawn:
                    cv2.putText(frame,
                                "Yawn", (frame.shape[1] // 2, h - 40),
                                cv2.FONT_HERSHEY_COMPLEX,
                                0.8, (8, 8, 183),
                                thickness=2)

                if eye_closedness:
                    cv2.putText(frame,
                                "DROWSY ", (frame.shape[1] // 2, h - 20),
                                cv2.FONT_HERSHEY_COMPLEX,
                                0.8, (8, 8, 183),
                                thickness=2)
                elif self.show_ear and eye_drowsiness:
                    cv2.putText(frame,
                                "DROWSY", (frame.shape[1] // 2, h - 20),
                                cv2.FONT_HERSHEY_COMPLEX,
                                0.8, (8, 8, 183),
                                thickness=2)

        return frame
Exemple #24
0
        #increase the counter
        if eye_aspect_ratio < EAR_limit:
            FRAME_COUNTER += 1

            #if the eyes were closed for >=40 frames alert the driver by playing alert sound
            if FRAME_COUNTER >= EAR_UNDER_LIMIT_FRAMES:
                print("Logs: The driver was found sleepy!")

                if not ALERT:
                    ALERT = True

                    #start the thread the play the sound in background
                    if alert_sound_file != "":
                        thread = Thread(target=play_audio,
                                        args=(alert_sound_file, ))
                        thread.deamon = True
                        #starting the thread
                        thread.start()
                        print(
                            "Logs: Played the Alert sound to wake up the driver..."
                        )

                        ###Twilio message sending
                        ###Twilio message sending
                        #send the message to a friend/family
                        # Account sid provided by Twilio
                        account_sid = ''
                        #Authentcation token provided by Twilio
                        auth_token = ''

                        # Phone number of the friend where we need to send the text message
Exemple #25
0
        cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

        lip = shape[48:60]
        cv2.drawContours(frame, [lip], -1, (0, 255, 0), 1)

        if ear < EYE_AR_THRESH:
            COUNTER += 1

            if COUNTER >= EYE_AR_CONSEC_FRAMES:

                if alarm_status == False:
                    alarm_status = True
                    # t = Thread(target=alarm, args=('wake up sir',))
                    t1 = Thread(target=sound_alarm, args=('alarm.wav', ))
                    # t.deamon = True
                    t1.deamon = True
                    # t.start()
                    t1.start()

                cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        else:
            COUNTER = 0
            alarm_status = False

        if distance > YAWN_THRESH:
            cv2.putText(frame, "Yawn Alert", (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            if alarm_status2 == False and saying == False:
                alarm_status2 = True
    def show_webcam(vs):

        import datetime
        import numpy as np
        import pandas as pd

        from time import time
        from time import sleep
        import re
        import os
        import math
        import argparse
        from collections import OrderedDict

        ### Image processing ###
        from scipy.ndimage import zoom
        from scipy.spatial import distance
        import imutils
        from scipy import ndimage

        import dlib

        from tensorflow.keras.models import load_model
        from imutils import face_utils

        import requests

        global shape_x
        global shape_y
        global input_shape
        global nClasses

        from imutils import face_utils
        from threading import Thread
        import numpy as np
        import playsound
        import argparse
        import imutils

        import dlib

        import simpleaudio as sa

        shape_x = 48
        shape_y = 48
        input_shape = (shape_x, shape_y, 1)
        nClasses = 7

        thresh = 0.25
        frame_check = 20

        def prepare(filepath):
            IMG_SIZE = 150
            img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
            new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
            return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)

        def sound_alarm():
            filename = 'alarm.wav'
            wave_obj = sa.WaveObject.from_wave_file(filename)
            play_obj = wave_obj.play()

        def eye_aspect_ratio(eye):
            A = distance.euclidean(eye[1], eye[5])
            B = distance.euclidean(eye[2], eye[4])
            C = distance.euclidean(eye[0], eye[3])
            ear = (A + B) / (2.0 * C)
            return ear

        def detect_face(frame):

            #Cascade classifier pre-trained model
            cascPath = 'face_landmarks.dat'
            faceCascade = cv2.CascadeClassifier(cascPath)

            #BGR -> Gray conversion
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            #Cascade MultiScale classifier
            detected_faces = faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=6,
                minSize=(shape_x, shape_y),
                flags=cv2.CASCADE_SCALE_IMAGE)
            coord = []

            for x, y, w, h in detected_faces:
                if w > 100:
                    sub_img = frame[y:y + h, x:x + w]
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255),
                                  1)
                    coord.append([x, y, w, h])

            return gray, detected_faces, coord

        def extract_face_features(faces, offset_coefficients=(0.075, 0.05)):
            gray = faces[0]
            detected_face = faces[1]

            new_face = []

            for det in detected_face:
                #Region dans laquelle la face est détectée
                x, y, w, h = det
                #X et y correspondent à la conversion en gris par gray, et w, h correspondent à la hauteur/largeur

                #Offset coefficient, np.floor takes the lowest integer (delete border of the image)
                horizontal_offset = np.int(np.floor(offset_coefficients[0] *
                                                    w))
                vertical_offset = np.int(np.floor(offset_coefficients[1] * h))

                #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                #gray transforme l'image
                extracted_face = gray[y + vertical_offset:y + h,
                                      x + horizontal_offset:x -
                                      horizontal_offset + w]

                #Zoom sur la face extraite
                new_extracted_face = zoom(extracted_face,
                                          (shape_x / extracted_face.shape[0],
                                           shape_y / extracted_face.shape[1]))
                #cast type float
                new_extracted_face = new_extracted_face.astype(np.float32)
                #scale
                new_extracted_face /= float(new_extracted_face.max())
                #print(new_extracted_face)

                new_face.append(new_extracted_face)

            return new_face

        EYE_AR_THRESH = 0.24

        EYE_AR_CONSEC_FRAMES = 6

        # initialize the frame counter as well as a boolean used to
        # indicate if the alarm is going off
        COUNTER = 0
        ALARM_ON = False

        # initialize dlib's face detector (HOG-based) and then create
        # the facial landmark predictor
        print("[INFO] loading facial landmark predictor...")
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")

        # grab the indexes of the facial landmarks for the left and
        # right eye, respectively
        (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

        (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

        (nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]
        (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
        (jStart, jEnd) = face_utils.FACIAL_LANDMARKS_IDXS["jaw"]

        (eblStart, eblEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
        (ebrStart, ebrEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]

        model = load_model('video.h5')
        face_detect = dlib.get_frontal_face_detector()
        predictor_landmarks = dlib.shape_predictor("face_landmarks.dat")

        nag = 1
        count = 0
        #Lancer la capture video
        nav = 0
        prev = 0
        while True:

            while True:
                # Capture frame-by-frame
                '''imageResp=urlopen(url)
				imgNp=np.array(bytearray(imageResp.read()),dtype=np.uint8)
				frame1=cv2.imdecode(imgNp,-1)'''

                ret, frame1 = vs.read()
                count = count + 1
                #frame = imutils.resize(frame1, width=450)
                if int(
                        str(datetime.datetime.now())[11] +
                        str(datetime.datetime.now())[12]) >= 18:
                    frame = exposure.equalize_hist(frame1)
                    imsave('test2.jpg', frame1)

                else:
                    cv2.imwrite('test2.jpg', frame1)
                img1 = cv2.imread('test2.jpg')
                '''if nav==1:
					gen(img1)
					nav=0'''
                gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                if count == 1 or count == 300:
                    violence('test2.jpg')
                    count = 2
                face_index = 0

                #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                rects = face_detect(gray, 1)
                #gray, detected_faces, coord = detect_face(frame)
                '''pres=len(rects)
				if pres!=prev:
					prev=pres
					nav=1'''

                # detect faces in the grayscale frame
                #rects = detector(gray, 0)

                for (i, rect) in enumerate(rects):
                    try:
                        shape = predictor_landmarks(gray, rect)
                        shape = face_utils.shape_to_np(shape)

                        # Identify face coordinates
                        (x, y, w, h) = face_utils.rect_to_bb(rect)
                        face = gray[y:y + h, x:x + w]

                        #Zoom on extracted face
                        face = zoom(
                            face,
                            (shape_x / face.shape[0], shape_y / face.shape[1]))

                        #Cast type float
                        face = face.astype(np.float32)

                        #Scale
                        face /= float(face.max())
                        face = np.reshape(face.flatten(), (1, 48, 48, 1))

                        #Make Prediction
                        prediction = model.predict(face)
                        prediction_result = np.argmax(prediction)
                        '''print("Angry : " + str(round(prediction[0][0],3)))
						print("Disgust : " + str(round(prediction[0][1],3)))
						print("Fear : " + str(round(prediction[0][2],3)))
						print("Happy : " + str(round(prediction[0][3],3)))
						print("Sad : " + str(round(prediction[0][4],3)))
						print("Surprise : " + str(round(prediction[0][5],3)))
						print("Neutral : " + str(round(prediction[0][6],3)))'''

                        # Rectangle around the face
                        '''cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
					
						cv2.putText(frame, "Face #{}".format(i + 1), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
				 
						for (j, k) in shape:
							cv2.circle(frame, (j, k), 1, (0, 0, 255), -1)'''

                        shape1 = predictor(gray, rect)
                        shape1 = face_utils.shape_to_np(shape1)

                        # extract the left and right eye coordinates, then use the
                        # coordinates to compute the eye aspect ratio for both eyes
                        leftEye = shape[lStart:lEnd]
                        rightEye = shape[rStart:rEnd]
                        leftEAR = eye_aspect_ratio(leftEye)
                        rightEAR = eye_aspect_ratio(rightEye)

                        # average the eye aspect ratio together for both eyes
                        ear = (leftEAR + rightEAR) / 2.0
                        #print(1)

                        # compute the convex hull for the left and right eye, then
                        # visualize each of the eyes

                        leftEyeHull = cv2.convexHull(leftEye)
                        rightEyeHull = cv2.convexHull(rightEye)
                        cv2.drawContours(img1, [leftEyeHull], -1, (0, 255, 0),
                                         1)
                        cv2.drawContours(img1, [rightEyeHull], -1, (0, 255, 0),
                                         1)

                        # check to see if the eye aspect ratio is below the blink
                        # threshold, and if so, increment the blink frame counter
                        if ear < EYE_AR_THRESH:
                            COUNTER += 1
                            #print(1)
                            # if the eyes were closed for a sufficient number of
                            # then sound the alarm
                            if COUNTER >= EYE_AR_CONSEC_FRAMES:

                                cv2.putText(img1, "DROWSINESS ALERT!",
                                            (10, 80), cv2.FONT_HERSHEY_SIMPLEX,
                                            0.7, (0, 0, 255), 2)
                                #print('drowsiness alert')
                                # if the alarm is not on, turn it on
                                if not ALARM_ON:
                                    ALARM_ON = True

                                    # check to see if an alarm file was supplied,
                                    # and if so, start a thread to have the alarm
                                    # sound played in the background

                                    t = Thread(target=sound_alarm)
                                    t.deamon = True
                                    t.start()

                            #nag=1
                            # draw an alarm on the frame

                        # otherwise, the eye aspect ratio is not below the blink
                        # threshold, so reset the counter and alarm
                        else:
                            COUNTER = 0
                            ALARM_ON = False
                        '''
						# 1. Add prediction probabilities
						cv2.putText(frame, "----------------",(40,100 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 0)
						cv2.putText(frame, "Emotional report : Face #" + str(i+1),(40,120 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 0)
						cv2.putText(frame, "Angry : " + str(round(prediction[0][0],3)),(40,140 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 0)
						cv2.putText(frame, "Disgust : " + str(round(prediction[0][1],3)),(40,160 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 0)
						cv2.putText(frame, "Fear : " + str(round(prediction[0][2],3)),(40,180 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 1)
						cv2.putText(frame, "Happy : " + str(round(prediction[0][3],3)),(40,200 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 1)
						cv2.putText(frame, "Sad : " + str(round(prediction[0][4],3)),(40,220 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 1)
						cv2.putText(frame, "Surprise : " + str(round(prediction[0][5],3)),(40,240 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 1)
						cv2.putText(frame, "Neutral : " + str(round(prediction[0][6],3)),(40,260 + 180*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 155, 1)'''

                        # 2. Annotate main image with a label
                        if prediction_result == 0:
                            cv2.putText(img1, "Angry", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        elif prediction_result == 1:
                            cv2.putText(img1, "Disgust", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        elif prediction_result == 2:
                            cv2.putText(img1, "Fear", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        elif prediction_result == 3:
                            cv2.putText(img1, "Happy", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        elif prediction_result == 4:
                            cv2.putText(img1, "Sad", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        elif prediction_result == 5:
                            cv2.putText(img1, "Surprise", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)
                        else:
                            cv2.putText(img1, "Neutral", (x + w - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2)

                        # 3. Eye Detection and Blink Count
                        leftEye = shape[lStart:lEnd]
                        rightEye = shape[rStart:rEnd]

                        # Compute Eye Aspect Ratio
                        leftEAR = eye_aspect_ratio(leftEye)
                        rightEAR = eye_aspect_ratio(rightEye)
                        ear = (leftEAR + rightEAR) / 2.0

                        # And plot its contours
                        leftEyeHull = cv2.convexHull(leftEye)
                        rightEyeHull = cv2.convexHull(rightEye)
                        cv2.drawContours(img1, [leftEyeHull], -1, (0, 255, 0),
                                         1)
                        cv2.drawContours(img1, [rightEyeHull], -1, (0, 255, 0),
                                         1)

                        # 4. Detect Nose
                        nose = shape[nStart:nEnd]
                        noseHull = cv2.convexHull(nose)
                        cv2.drawContours(img1, [noseHull], -1, (0, 255, 0), 1)

                        # 5. Detect Mouth
                        mouth = shape[mStart:mEnd]
                        mouthHull = cv2.convexHull(mouth)
                        cv2.drawContours(img1, [mouthHull], -1, (0, 255, 0), 1)

                        # 6. Detect Jaw
                        jaw = shape[jStart:jEnd]
                        jawHull = cv2.convexHull(jaw)
                        cv2.drawContours(img1, [jawHull], -1, (0, 255, 0), 1)

                        # 7. Detect Eyebrows
                        ebr = shape[ebrStart:ebrEnd]
                        ebrHull = cv2.convexHull(ebr)
                        cv2.drawContours(img1, [ebrHull], -1, (0, 255, 0), 1)
                        ebl = shape[eblStart:eblEnd]
                        eblHull = cv2.convexHull(ebl)
                        cv2.drawContours(img1, [eblHull], -1, (0, 255, 0), 1)
                        '''modelx=tf.keras.models.load_model("64x3-CNN.model")

						prediction=modelx.predict([prepare('test2.jpg')])
						print(prediction)'''

                    except:
                        pass

                cv2.putText(img1, 'Number of Faces : ' + str(len(rects)),
                            (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, 155, 1)
                cv2.imshow('Video', img1)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    vs.release()
                    break
def callback(subtopic, time_given, data):

    global running

    routing_key = data[1]
    body = data[0]

    a = 0
    go_on = True
    q = queue.Queue()

    while go_on:
        try:
            os.mkdir(os.path.join(log_path, '{}-{}'.format(routing_key, a)))
            go_on = False
        except FileExistsError:
            a += 1

    log_file = os.path.join(log_path, '{}-{}'.format(routing_key, a),
                            'data.{}'.format(body.get('file_type', 'unknown')))
    running[log_file] = True

    print('[{}] streamer connected'.format(log_file))
    with open(
            os.path.join(log_path, '{}-{}'.format(routing_key, a), 'info.txt'),
            'w') as f:
        f.write(json.dumps(body))

    def run(log_file):
        global global_runner, running

        context = zmq.Context()
        s = context.socket(zmq.SUB)
        s.setsockopt_string(zmq.SUBSCRIBE, '')
        # s.RCVTIMEO = 30000
        s.connect(body['address'])
        sockets.append(s)
        t = time.time()

        d = bytes()
        while running[log_file] and global_runner:
            data = s.recv()
            if data == b'CLOSE':
                print('close received')
                running[log_file] = False
                break
            d += data
            if time.time() - t > 5:
                q.put(d)
                d = bytes()

        global_runner = True
        if d:
            q.put(d)

        s.close()
        print('[{}] streamer closed'.format(log_file))

    def storage_writer(log_file):
        global global_runner, running
        with open(log_file, 'ab') as f:
            while global_runner or q.qsize() != 0:
                data = q.get()
                f.write(data)
                print('{} writes left to do..', q.qsize())
        print('writer closed'.format(log_file))

    _thread = Thread(target=run, args=(log_file, ))
    _thread.deamon = True
    _thread.start()

    thread = Thread(target=storage_writer, args=(log_file, ))
    thread.deamon = True
    thread.start()
def drowsiness_detection(args):
    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold for to set off the
    # alarm
    EYE_AR_THRESH = 0.2
    EYE_AR_CONSEC_FRAMES = 20

    # initialize the frame counter as well as a boolean used to
    # indicate if the alarm is going off
    COUNTER = 0
    ALARM_ON = False

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    vs = VideoStream(src=args["webcam"]).start()
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

                # if the eyes were closed for a sufficient number of
                # then sound the alarm
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    # if the alarm is not on, turn it on
                    if not ALARM_ON:
                        ALARM_ON = True

                        # check to see if an alarm file was supplied,
                        # and if so, start a thread to have the alarm
                        # sound played in the background
                        if args["alarm"] != "":
                            t = Thread(target=sound_alarm,
                                       args=(args["alarm"], ))
                            t.deamon = True
                            t.start()

                    # draw an alarm on the frame
                    cv2.putText(frame, "DROWSINESS DETECTED!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # otherwise, the eye aspect ratio is not below the blink
            # threshold, so reset the counter and alarm
            else:
                COUNTER = 0
                ALARM_ON = False

            # draw the computed eye aspect ratio on the frame to help
            # with debugging and setting the correct eye aspect ratio
            # thresholds and frame counters
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Exemple #29
0
def translate(config, sess):
    model, saver = create_model(config, sess)
    start_time = time.time()
    _, _, _, num_to_target = load_dictionaries(config)
    logging.info("NOTE: Length of translations is capped to {}".format(config.translation_maxlen))

    n_sent = 0
    try:
        batches, idxs = read_all_lines(config, open(config.valid_source_dataset, 'r').readlines())
    except exception.Error as x:
        logging.error(x.msg)
        sys.exit(1)
    in_queue, out_queue = Queue(), Queue()
    model._get_beam_search_outputs(config.beam_size)
    
    def translate_worker(in_queue, out_queue, model, sess, config):
        while True:
            job = in_queue.get()
            if job is None:
                break
            idx, x = job
            y_dummy = numpy.zeros(shape=(len(x),1))
            x, x_mask, _, _ = prepare_data(x, y_dummy, maxlen=None)
            try:
                samples = model.beam_search(sess, x, x_mask, config.beam_size)
                out_queue.put((idx, samples))
            except:
                in_queue.put(job)

    threads = [None] * config.n_threads
    for i in xrange(config.n_threads):
        threads[i] = Thread(
                        target=translate_worker,
                        args=(in_queue, out_queue, model, sess, config))
        threads[i].deamon = True
        threads[i].start()

    for i, batch in enumerate(batches):
        in_queue.put((i,batch))
    outputs = [None]*len(batches)
    for _ in range(len(batches)):
        i, samples = out_queue.get()
        outputs[i] = list(samples)
        n_sent += len(samples)
        logging.info('Translated {} sents'.format(n_sent))
    for _ in range(config.n_threads):
        in_queue.put(None)
    outputs = [beam for batch in outputs for beam in batch]
    outputs = numpy.array(outputs, dtype=numpy.object)
    outputs = outputs[idxs.argsort()]

    for beam in outputs:
        if config.normalize:
            beam = map(lambda (sent, cost): (sent, cost/len(sent)), beam)
        beam = sorted(beam, key=lambda (sent, cost): cost)
        if config.n_best:
            for sent, cost in beam:
                print seq2words(sent, num_to_target), '[%f]' % cost
        else:
            best_hypo, cost = beam[0]
            print seq2words(best_hypo, num_to_target)
    duration = time.time() - start_time
    logging.info('Translated {} sents in {} sec. Speed {} sents/sec'.format(n_sent, duration, n_sent/duration))
Exemple #30
0
            cv2.drawContours(frame, [cv2.convexHull(rightEye)], -1,
                             (0, 255, 0), 1)

            # check if eye is closed now
            if ear < EYE_AR_THRESH:
                COUNTER += 1

                # check if eye is closed too long
                if COUNTER >= EYE_AR_CONSEC_FRAMES:

                    # start alarm in new thread
                    if not ALARM_ON:
                        ALARM_ON = True
                        alarm_thread = Thread(target=sound_alarm,
                                              args=("alarm.wav", engine))
                        alarm_thread.deamon = True
                        alarm_thread.start()

                    # draw an alarm on the frame
                    cv2.putText(frame, "DROWSINESS ALERT!", (120, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # otherwise, reset counter
            else:
                COUNTER = 0
                ALARM_ON = False

    # show frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
Exemple #31
0
def on_message(client, userdata, msg):
    print(msg.topic + " " + str(msg.payload))
    if msg.topic == "garage/door/switch" and msg.payload == "active":
        client.publish("garage/door/switch", "idle")
        activate_door_switch()


if __name__ == '__main__':
    port = 5000  #the custom port you want
    # set up periodical check wich will send email if garage
    # stays open for too long
    send_email("Garage Dog startet.")
    EMAIL_SENT = False

    # set up mqtt client
    client = mqtt.Client()
    client.on_connect = on_connect
    client.on_message = on_message
    client.connect(MQTT_SERVER, 1883, 60)

    thread = Thread(target=periodical_check)
    thread.deamon = True
    thread.start()

    thread2 = Thread(target=client.loop_forever)
    thread2.deamon = True
    thread2.start()

    # start flak main function
    app.run(host='localhost', port=port)
print("loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

#8.
print("starting video stream thread...")
vs = VideoStream(src=0).start()
time.sleep(1.0)

#9.
th_open = Thread(target=init_open_ear)
th_open.deamon = True
th_open.start()
th_close = Thread(target=init_close_ear)
th_close.deamon = True
th_close.start()

# Basic Checks, Functions & Threads Ends here

while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    L, gray = lr.light_removing(frame)
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    rects = detector(gray, 0)
Exemple #33
0
def f1():
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 48

    global COUNTER, SND, SND1, TOTAL, TOTAL1, TOTAL2, SPEED, i, j, j1, x
    ALARM_ON = False
    fr = firebase.FirebaseApplication('https://a124-270e9.firebaseio.com/')

    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args['shape_predictor'])

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    vs = VideoStream(src=0).start()
    fileStream = False
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = (leftEAR + rightEAR) / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1

                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    # if the alarm is not on, turn it on
                    fr.put('/', 'Blinkrate', str("ALERT!!"))
                    TOTAL += 1
                    if not ALARM_ON:
                        ALARM_ON = True
                        if args["alarm"] != "":
                            t = Thread(target=sound_alarm,
                                       args=(args["alarm"], ))
                            t.deamon = True
                            t.start()

                    # draw an alarm on the frame
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            else:
                COUNTER = 0
                ALARM_ON = False
                fr.put('/', 'Blinkrate', str("0"))

            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Exemple #34
0
def flash_openocd(targets):
    success = False
    try:

        cmd_start = 'reset init; halt'
        cmd_done = 'core_reset;'  # should be implemented in the config file
        cmd_shutdown = 'shutdown'
        cmd_flash = []

        for target in targets:
            cmd_flash.append(
                ('flash write_image erase ' + target['binary'] + ' \
                    ' + target['address'] + ';',
                 'verify_image ' + target['binary'] + ' \
                    ' + target['address'] + ';'))

            # detect running openocd server
        openocdFound = False
        for line in os.popen("ps xa"):
            fields = line.split()
            process = fields[4]
            if (process.find('openocd') >= 0):
                openocdFound = True
                break

        if not openocdFound:

            def run_openocd():
                # NOTE: all targets are assumed to have the same cpu config
                cfg = targets[0]['cpu'] + '.cfg'

                orig_wd = os.getcwd()
                os.chdir(
                    os.path.realpath(os.path.dirname(__file__)) + '/config/')
                os.popen('openocd -f ' + cfg)
                os.chdir(orig_wd)

            thread = Thread(target=run_openocd)
            thread.deamon = True
            thread.start()
            time.sleep(0.1)

        tn = Telnet(args.ip, args.port)
        r = tn.expect([b'Open On-Chip Debugger'], 5)
        if r[0] != 0:
            print("ERROR starting OpenOCD:")
            print(r)

        tn.write(str.encode(cmd_start + '\n'))
        for cmd in cmd_flash:

            tn.write(str.encode(cmd[0] + '\n'))
            tn.write(str.encode(cmd[1] + '\n'))
            r = tn.expect([
                str.encode(s)
                for s in ['verified', 'error', 'checksum', 'mismatch']
            ], 5)
            done = (r[0] == 0)  # wait for 'verified'

            if done:
                success = True
                print('Flashing succesfull\n')
            else:
                print('Flashing failed\n')
        time.sleep(0.01)
        tn.write(str.encode(cmd_done + '\n'))
        time.sleep(0.01)
        if not openocdFound:
            print('shutdown openocd...')
            tn.write(str.encode(cmd_shutdown + '\n'))
        time.sleep(0.5)
        tn.sock.close()
    except Exception:
        print("Flashing failed\n")

    return success
Exemple #35
0
def runEyeDetection(startTime, totalTime):
    #-----------------------Variable Decleration-----------------------#
    EYE_AR_THRESHOLD = 0.3
    EYE_AR_CONSEC_FRAMES = 48
    COUNTER = 0
    BREAK_ON = False
    global BLINK_NUMBER
    BLINK_CONSEC_FRAMES = 5
    if defaultNapTime == False:
        NAPPING_FRAMES = napTime * 60
    else:
        NAPPING_FRAMES = 5 * 60
    global NAPPING_NUMBER

    #-----------------------Parsing Arguments-----------------------#
    ap = argparse.ArgumentParser()
    ap.add_argument("-p",
                    "--shape-predictor",
                    default="D:\shape_predictor.dat",
                    help="path to facial landmark predictor")

    ap.add_argument("-w",
                    "--webcam",
                    type=int,
                    default=0,
                    help="index of webcam on system")

    ap.add_argument("-b",
                    "--break",
                    type=bool,
                    default=True,
                    help="Do You want a Break Message?")

    ap.add_argument("-a",
                    "--alarm",
                    type=str,
                    default="D:\sound.wav",
                    help="index of Alarm.Wave")

    args = vars(ap.parse_args())

    #-----------------------Initializing Face Detetor and Landmark Predictor-----------------------#
    detector, predictor = loadFaceParameters(args)

    #-----------------------DLib eye Indicies-----------------------#
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    #-----------------------Loading the Rectangels-----------------------#
    print("[INFO] starting video stream thread...")
    vs = VideoStream(src=args["webcam"]).start()
    time.sleep(1.0)

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rects = detector(gray, 0)

        for rect in rects:

            #-----------------------Eye Outlining-----------------------#
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftAspectRatio = eyeAspectRatio(leftEye)
            rightAspectRatio = eyeAspectRatio(rightEye)

            bothAspectRatio = (leftAspectRatio + rightAspectRatio) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)

            outlineColor = (0, 255, 0)
            cv2.drawContours(frame, [leftEyeHull], -1, outlineColor, 1)
            cv2.drawContours(frame, [rightEyeHull], -1, outlineColor, 1)

            #-----------------------Checking if Eyes are Closed-----------------------#
            if bothAspectRatio < EYE_AR_THRESHOLD:
                COUNTER += 1

                if COUNTER >= EYE_AR_CONSEC_FRAMES:

                    if not BREAK_ON:
                        BREAK_ON = True

                        if args["break"] != "":
                            t = Thread(target=eyesAreClosed,
                                       args=(args["break"], ))
                            t.deamon = True
                            t.start()

                    textColor = (0, 0, 255)

                    if COUNTER >= NAPPING_FRAMES:
                        cv2.putText(frame, "NAPPING!", (10, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, textColor,
                                    2)
                        if args["alarm"] != "":
                            t = Thread(target=wakeUpAlarm,
                                       args=(args["alarm"], ))
                            t.daemon = True
                            t.start()
            else:
                if COUNTER >= BLINK_CONSEC_FRAMES:
                    BLINK_NUMBER += 1
                if COUNTER >= NAPPING_FRAMES:
                    NAPPING_NUMBER += 1

                COUNTER = 0
                BREAK_ON = False

            textColor = (0, 0, 255)
            cv2.putText(frame, "EyeAR: {:.2f}".format(bothAspectRatio),
                        (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, textColor, 2)
            cv2.putText(frame, "Blink: {}".format(BLINK_NUMBER), (300, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, textColor, 2)

        #Show Frame
        cv2.imshow("Frame", frame)

        #-----------------------Exiting Program-----------------------#
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
        if time.perf_counter() - startTime >= totalTime:
            break

    #-----------------------Closing Everthing Once Stopped-----------------------#
    cv2.destroyAllWindows()
    vs.stop()
    updateDatebase()
    return -1
Exemple #36
0
def handle(arg):
    global count
    count += 1
    print("irq on", arg, count)


def read_serial():
    serial = Serial("/dev/ttyAMA0", baudrate=115200)

    while True:
        c = serial.read().decode('utf-8')
        print("Received from serial", c)


t = Thread(target=read_serial)
t.deamon = True
t.start()

GPIO.setmode(GPIO.BCM)
GPIO.setup(RECV_PIN, GPIO.IN)
GPIO.setup(SEND_PIN, GPIO.OUT)

GPIO.add_event_detect(RECV_PIN, GPIO.RISING, handle)

val = True
while True:
    GPIO.output(SEND_PIN, val)
    val = not val
    time.sleep(0.5)
Exemple #37
0
 def eyeDetectionRun(runTime):
     #Running Eye Detection
     threadEye = Thread(target=runEyeDetection,
                        args=(time.perf_counter(), runTime))
     threadEye.deamon = True
     threadEye.start()
Exemple #38
0
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('localhost', 25000))

n = 0
quit = False

def moniter():
    global n
    while not quit:
        time.sleep(1)
        print(n, ' reqs/second')
        n = 0

    
th = Thread(target=moniter)
th.deamon = True
th.start()

while True:
    try:
        sock.send(b'3')
        resp = sock.recv(100)
        n += 1
    except (KeyboardInterrupt):
        print "get keyboard Interrupt"
        quit = True
        sock.close()
        break

th.join()
Exemple #39
0
import socket
from threading import Thread


def recv_message(sock):
    while True:
        msg = sock.recv(1024)
        print(msg.decode())


sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 12000))

th = Thread(target=recv_message, args=(sock, ))
th.deamon = True
th.start()

while True:
    msg = input("입력: ")
    sock.send(msg.encode())

    if msg == "/bye":
        break

sock.close()
Exemple #40
0
def execute(s):
    t = Thread(target=turn_motor, args=(motors[s['motor']], s['power'], s['degrees']))
    t.deamon = True
    ts.append(t)
    t.start()
def main():
	# import the necessary packages
	from scipy.spatial import distance as dist
	from imutils.video import FileVideoStream
	from imutils.video import VideoStream
	from imutils import face_utils
	from threading import Thread
	import playsound
	import numpy as np
	import argparse
	import imutils
	import time
	import dlib
	import cv2

	def sound_alarm(path):
		# play an alarm sound
		playsound.playsound(path)

	def eye_aspect_ratio(eye):
		# compute the euclidean distances between the two sets of
		# vertical eye landmarks (x, y)-coordinates
		A = dist.euclidean(eye[1], eye[5])
		B = dist.euclidean(eye[2], eye[4])

		# compute the euclidean distance between the horizontal
		# eye landmark (x, y)-coordinates
		C = dist.euclidean(eye[0], eye[3])

		# compute the eye aspect ratio
		ear = (A + B) / (2.0 * C)

		# return the eye aspect ratio
		return ear

	# construct the argument parse and parse the arguments
	# ap = argparse.ArgumentParser()
	# ap.add_argument("-p", "--shape-predictor", required=True,
	# 	help="path to facial landmark predictor")
	# ap.add_argument("-a", "--alarm", type=str, default="",
	# 	help="path alarm .WAV file")
	# ap.add_argument("-w", "--webcam", type=int, default=0,
	# 	help="index of webcam on system")
	# args = vars(ap.parse_args())
	# print (args)

	# define two constants, one for the eye aspect ratio to indicate
	# blink and then a second constant for the number of consecutive
	# frames the eye must be below the threshold
	EYE_AR_THRESH = 0.3
	EYE_AR_CONSEC_FRAMES = 3

	# initialize the frame counters and the total number of blinks
	COUNTER = 0
	ALARM_ON = False

	# initialize dlib's face detector (HOG-based) and then create
	# the facial landmark predictor
	print("[INFO] loading facial landmark predictor...")
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

	# grab the indexes of the facial landmarks for the left and
	# right eye, respectively
	(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
	(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

	# start the video stream thread
	print("[INFO] starting video stream thread...")
	# vs = FileVideoStream(args["video"]).start()
	fileStream = True
	vs = VideoStream(src=0).start()
	# vs = VideoStream(usePiCamera=True).start()
	fileStream = False
	time.sleep(1.0)

	# loop over frames from the video stream
	while True:
		# if this is a file video stream, then we need to check if
		# there any more frames left in the buffer to process
		if fileStream and not vs.more():
			break

		# grab the frame from the threaded video file stream, resize
		# it, and convert it to grayscale
		# channels)
		frame = vs.read()
		frame = imutils.resize(frame, width=450)
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

		# detect faces in the grayscale frame
		rects = detector(gray, 0)

	    	# loop over the face detections
		for rect in rects:
			# determine the facial landmarks for the face region, then
			# convert the facial landmark (x, y)-coordinates to a NumPy
			# array
			shape = predictor(gray, rect)
			shape = face_utils.shape_to_np(shape)

			# extract the left and right eye coordinates, then use the
			# coordinates to compute the eye aspect ratio for both eyes
			leftEye = shape[lStart:lEnd]
			rightEye = shape[rStart:rEnd]
			leftEAR = eye_aspect_ratio(leftEye)
			rightEAR = eye_aspect_ratio(rightEye)

			# average the eye aspect ratio together for both eyes
			ear = (leftEAR + rightEAR) / 2.0

	        # compute the convex hull for the left and right eye, then
			# visualize each of the eyes
			leftEyeHull = cv2.convexHull(leftEye)
			rightEyeHull = cv2.convexHull(rightEye)
			cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
			cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

	        # check to see if the eye aspect ratio is below the blink
			# threshold, and if so, increment the blink frame counter
			if (ear < EYE_AR_THRESH):
				COUNTER += 1

				# if the eyes were closed for a sufficient number of
				# then sound the alarm
				if COUNTER >= EYE_AR_CONSEC_FRAMES:
					# if the alarm is not on, turn it on
					if not ALARM_ON:
						ALARM_ON = True

						# check to see if an alarm file was supplied,
						# and if so, start a thread to have the alarm
						# sound played in the background
						if "alarm.wav" != "":
							t = Thread(target=sound_alarm,
								args=("alarm.wav",))
							t.deamon = True
							t.start()

					# draw an alarm on the frame
					cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
						cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


			# otherwise, the eye aspect ratio is not below the blink
			# threshold
			else:

				# reset the eye frame counter
				COUNTER = 0
				ALARM_ON = False

			# draw the computed eye aspect ratio on the frame to help
			# with debugging and setting the correct eye aspect ratio
			# thresholds and frame counters
			cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
				cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


		# show the frame
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if (key == ord("q")):
			break

	# do a bit of cleanup
	cv2.destroyAllWindows()
	vs.stop()
Exemple #42
0
    poller.register(frontend, zmq.POLLIN)
    poller.register(backend, zmq.POLLIN)

    while True:
        socks = dict(poller.poll(1000))

        if socks.get(frontend) == zmq.POLLIN:
            msg = frontend.recv_multipart()
            print("recv msg from server: ")
            print(msg)
            backend.send_multipart(msg)
        elif socks.get(backend) == zmq.POLLIN:
            msg = backend.recv_multipart()
            print("send client msg to server: ")
            print(msg)
            frontend.send_multipart(msg)


if __name__ == "__main__":
    context = zmq.Context()

    t1 = Thread(target=dealer, args=(context,))
    t2 = Thread(target=send_data, args=(context,))
    t1.deamon = True
    t2.deamon = True
    t1.start()
    t2.start()

    t1.join()
    t2.join()

q = Queue()
num_threads = 4
num_folds = 8
threads = []

# Spawn a thread for each fold
k_fold = KFold(n=len(raw_tweets), n_folds=num_folds, indices=True)
for train_indices, test_indices in k_fold:
    if len(threads) >= num_threads:
        while q.empty():
            sleep(2)
        status = q.get()
    t = Thread(target=run_fold, args=(train_indices, test_indices, vectorizer, train_data, pred, q))
    t.deamon = True
    t.start()
    threads.append(t)

# Wait for all threads to complete
for t in threads:
    t.join()

# Calculate (and print) mean squred errors for each variable
print "Mean Squared Errors:"
for variable in variables:
    y_pred = np.array(pred[variable])
    y_actu = np.array(train_data[variable])

    mse = np.average((y_pred - y_actu) ** 2)
    print "'%s': %f" % (variable, mse)
	sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
	host_name = socket.gethostname() #To get the name of host
	port_number = 8888
	print "The name of local machine",host_name

	host_port_pair = (host_name,port_number)
	print host_port_pair
	sock.bind(host_port_pair) #Bind address to the socket

	sock.listen(1)
	conn_obj,addr = sock.accept()
	print "Got a connection from ",addr

	#t1 = Thread(target=recev, args=())
	#t1.deamon = True
	#t1.start()
	t2 = Thread(target=sent, args=())
	t2.deamon = True
	t2.start()

	while True:
		try:
			msg_from_client = conn_obj.recv(1024)
			print "\nClient :", msg_from_client
		except KeyboardInterrupt:
			conn_obj.close()
			sys.exit()

		if not msg_from_client:
			break
Exemple #45
0
 def start_listening(self, callback):
     self._is_listening = True
     thread = Thread(target=self.socket_listener, args=(self.client, callback))
     thread.deamon = True
     thread.start()
                
        if not XYZ_Queue.full():
                if position != None:
                    print valuetosend
                    XYZ_Queue.put(position)
        else :
            print 'XYZ Queue is full'
	    time.sleep(0.02)

#websocket_running_th = Thread( target=websocket_thread)
websocket_send_th =  Thread( target=websocket_send_thread)
rf24_receiver_th =  Thread( target=rf24_receive_thread)
calculate_3D_th =  Thread( target=calculate_XYZ_thread)

#websocket_running_th.deamon = True
websocket_send_th.deamon =  True
rf24_receiver_th.deamon =  True
calculate_3D_th.deamon = True



######################################################
###############      EXIT HANDLER      ###############
######################################################

def exit_handler():
    print 'My application is ending!'
    #websocket_running_th.join()
    websocket_send_th.join()
    rf24_receiver_th.join()
    calculate_3D_th.join()
Exemple #47
0
def serverLoop(HTTP_PORT = 8080, WS_PORT = 5678):
    attempts = 0
    while True:
        try:
            '''
            By default, Camera will connect to the first available ToF device.
            Alternatively can specify serial port by using Camera.open('/dev/ttyACM0') to open specific port
            '''
            camera = Camera.open()

            cameraInfo = camera.info()
            print("\nToF camera opened successfully:")

            print("    model:      %s" % cameraInfo.model)
            print("    firmware:   %s" % cameraInfo.firmware)
            print("    uid:        %s" % cameraInfo.uid)
            print("    resolution: %s" % cameraInfo.resolution)
            print("    port:       %s" % cameraInfo.port)

            ## you may simply use camera.setDefaultParameters()
            camera.setModulationFrequency(VALUE_20MHZ) ## frequency: 20MHZ
            camera.setModulationChannel(0)             ## autoChannelEnabled: 0, channel: 0
            camera.setMode(0)                          ## Mode 0, wide fov
            camera.setHdr(0)                           ## HDR off
            camera.setIntegrationTime3d(0, 800)        ## set integration time 0: 1000
            camera.setMinimalAmplitude(0, 10)          ## set minimal amplitude 0: 80
            camera.setOffset(0)                        ## set distance offset: 0
            camera.setRoi(0, 0, 159, 59)               ## set ROI to max width and height

            ## static
            Camera.setColorMode(ColorMode.DISTANCE)    ## use distance for point color
            Camera.setRange(0, 7500)                   ## points in the distance range to be colored

            break

        except Exception as e:
            attempts += 1

            if attempts > 10:
                print("Exiting due to failure to start Tau Camera!")
                print("Error: %s" % str(e))
                try:
                    sys.exit(0)
                except SystemExit:
                    os._exit(0)
            sleep(5)
        sleep(0.1)


    ip_address = '127.0.0.1'

    print("    IP address: %s" % ip_address)
    print("    URL:  %s" % 'http://' + ip_address + ':' + str(HTTP_PORT))

    print("\nPress Ctrl + C keys to shutdown ...")

    _count = 0
    start_time = time()
    running = True

    async def send3DPoints(websocket, path):
        global _count
        global running
        start_time = time()
        _count = 0

        async for message in websocket:
            data = json.loads(message)

            if data['cmd'] == 'read':
                '''
                Camera supports frame type FrameType.DISTANCE, FrameType.DISTANCE_GRAYSCALE and FrameType.DISTANCE_AMPLITUDE,
                default FrameType is FrameType.DISTANCE_GRAYSCALE

                frame = camera.readFrame(FrameType.DISTANCE_AMPLITUDE)

                To get a 3D Frame object directly from calling camera.readFrame() is an expensive call,
                alternatively you may call camera.readFrameRawData to get raw data
                and possibly to compose Frame from a separate thread
                to boost frame rate:

                ...
                from TauLidarCommon.d3 import FrameBuilder
                ...
                frameBuilder = FrameBuilder()
                ...
                dataArray = camera.readFrameRawData(frameType=FrameType.DISTANCE_GRAYSCALE)

                Possibly you may compose Frame from a separate thread
                frame = frameBuilder.composeFrame(dataArray, frameType=FrameType.DISTANCE_GRAYSCALE)

                Default FrameType is FrameType.DISTANCE_GRAYSCALE

                Following examples are how to construct depth map, grayscale and amplitude image accordingly:

                mat_depth_rgb = np.frombuffer(frame.data_depth_rgb, dtype=np.uint16, count=-1, offset=0).reshape(frame.height, frame.width, 3)
                mat_depth_rgb = mat_depth_rgb.astype(np.uint8)

                mat_grayscale = np.frombuffer(frame.data_grayscale, dtype=np.uint16, count=-1, offset=0).reshape(frame.height, frame.width)
                mat_grayscale = mat_grayscale.astype(np.uint8)

                mat_amplitude = np.frombuffer(frame.data_amplitude, dtype=np.float32, count=-1, offset=0).reshape(frame.height, frame.width)
                mat_amplitude = mat_amplitude.astype(np.uint8)
                '''
                frame = camera.readFrame(frameType=FrameType.DISTANCE_AMPLITUDE)
                if frame == None:
                    print('skip frame')
                    continue

                try:
                    payload = json.dumps({
                        'points': frame.points_3d,
                        'depth': frame.data_depth_rgb.tolist(),
                        'grayscale': frame.data_grayscale.tolist(),
                        'amplitude': frame.data_amplitude.tolist(),
                        'h': frame.height,
                        'w': frame.width
                    })
                    await websocket.send(payload)
                    
                except Exception as e:
                    print(e)
            elif data['cmd'] == 'set':
                if data['param'] == 'range':
                    Camera.setRange(0, data['value'])
                elif data['param'] == 'intTime3D':
                    camera.setIntegrationTime3d(0, data['value'])

    ws_server = websockets.serve(send3DPoints, "127.0.0.1", WS_PORT)
    asyncio.get_event_loop().run_until_complete(ws_server)

    ws_t = Thread(target=asyncio.get_event_loop().run_forever)
    ws_t.deamon = True
    ws_t.start()

    # change dir to module directory
    web_dir = Path(__file__).absolute().parent
    os.chdir(web_dir)

    # start http server
    Handler = SimpleHTTPRequestHandler
    Handler.extensions_map.update({
        ".js": "application/javascript",
    })

    httpd = socketserver.TCPServer(("", HTTP_PORT), Handler)

    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        running = False
        print('\nShutting down ...')
        sleep(0.1)
        camera.close()
        try:
            httpd.socket.close()
            httpd.server_close()

            sys.exit(0)
        except SystemExit:
            os._exit(0)
def openScreen(client=None, session=None, thread=None):
    buffer = Buffer()
    inputToSend = Buffer()

    users = []
    for u in client.fetchAllUsers():
        users.append(User(uid=u.uid, name=u.name))
    users.append(
        User(uid=client.uid,
             name=client.fetchThreadInfo(client.uid)[client.uid].name))

    doPrintImage = False
    if thread is not None:
        messages = client.fetchThreadMessages(thread_id=thread.uid, limit=10)
        console_clear()
        buffer.addToBuffer(
            '---- Type /exit to get back to messages list menu ----\n')
        print('---- Type /exit to get back to messages list menu ----')
        try:
            for msg in reversed(messages):
                names = [u.name for u in users if u.uid == msg.author]
                toPrintMsg = ''
                if len(names) > 0:
                    toPrintMsg = '[' + names[0] + ']: ' \
                                 + toUTF8(msg.text)
                else:
                    toPrintMsg = '[' + client.fetchThreadInfo(msg.author)[msg.author].name+']: '\
                                 + toUTF8(msg.text)
                print(toPrintMsg)
                buffer.addToBuffer(toPrintMsg + '\n')
                try:
                    if msg.attachments:
                        if doPrintImage:
                            response = requests.get(
                                msg.attachments[0]['preview']['uri'])
                            img = Image.open(BytesIO(response.content))
                            baseheight = 22
                            basewidth = 20
                            img = img.resize((basewidth, baseheight),
                                             Image.ANTIALIAS)
                            printImage(img)
                        else:
                            print('{attachment}: ',
                                  msg.attachments[0]['preview']['uri'])
                            buffer.addToBuffer(
                                '{attachment}: ' +
                                msg.attachments[0]['preview']['uri'] + '\n')
                    if msg.extensible_attachment is not None:
                        if doPrintImage:
                            uriPreview = msg.extensible_attachment[
                                'story_attachment']['media']['image']['uri']
                            response = requests.get(uriPreview)
                            img = Image.open(BytesIO(response.content))
                            baseheight = 22
                            basewidth = 20
                            img = img.resize((basewidth, baseheight),
                                             Image.ANTIALIAS)
                            printImage(img)
                        else:
                            uriVideo = msg.extensible_attachment[
                                'story_attachment']['media']['playable_url']
                            print('{attachment}: ', uriVideo)
                            buffer.addToBuffer('{attachment}: ' + uriVideo +
                                               '\n')
                except:
                    pass

            client.startThread(thread.uid)
            receiveThread = Thread(target=receive,
                                   args=(
                                       client,
                                       buffer,
                                       inputToSend,
                                   ))
            receiveThread.deamon = True
            receiveThread.start()
            sendThread = Thread(target=send,
                                args=(
                                    client,
                                    session,
                                    thread,
                                    buffer,
                                    inputToSend,
                                ))
            sendThread.deamon = True
            sendThread.start()
            #printThread = Thread(target=printLoop, args=(client,buffer,session,))
            #printThread.deamon = True
            #printThread.start()
            client.listen()

        except KeyboardInterrupt:
            client.stopThread()
            messageListScreen.openScreen(client, session)

    else:

        console_clear()
        buffer.addToBuffer(
            '---- Type /exit to get back to the main menu ----\n')
        print('---- Type /exit to get back to the main menu ----')
        print('search for a user: '******'', flush=True)
        buffer.addToBuffer('search for a user: '******'/exit':
                    principalScreen.openScreen(client, session)
                    return
                else:
                    choice = input('choose a person\'s index: ')
                    if isInt(choice):
                        v = to_int(choice)
                        if len(result) >= v >= 1:
                            break
            if ord(c) == 3:
                principalScreen.openScreen(client, session)
            inputToSend.addChar(c)

            result = sorted([
                u for u in users
                if containByWords(inputToSend.getBuffer(), u.name)
            ],
                            key=sort_key_result)[:10]
            resultBuffer.clearBuffer()
            for i, u in enumerate(result):
                resultBuffer.addToBuffer(str(i + 1) + ' - ' + u.name + '\n')
            reprintScreen(buffer=buffer,
                          inputBuffer=inputToSend,
                          lastInput=resultBuffer)
        #buffer.addToBuffer('[' + client.getUser().name + ']: ' + inputToSend.getBuffer() + '\n')
        chosenThread = client.fetchThreadInfo(
            result[to_int(choice) - 1].uid)[result[to_int(choice) - 1].uid]

        openScreen(client=client, session=session, thread=chosenThread)
def main():
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 30
    ALARM_ON = False
    COUNTER = 0

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    # print("-> Starting Video Stream")
    vs = VideoStream(0).start()
    # vs= VideoStream(usePiCamera=True).start()       //For Raspberry Pi
    time.sleep(1.0)

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            eye = final_ear(shape)
            ear = eye[0]
            leftEye = eye[1]
            rightEye = eye[2]



            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1

                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    if not ALARM_ON:
                        ALARM_ON = True
                        t = Thread(target=sound_alarm,
                                   args=(["alarm"],))
                        t.deamon = True
                        t.start()
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


            else:
                COUNTER = 0
                ALARM_ON = False

            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
def move_car(action=None):
    if queue.empty():
        worker = Thread(target=move, args=(action, queue,))
        worker.deamon = True
        worker.start()
        queue.put(action)
Exemple #51
0
def main():
    for i in range(10):
        t = Thread(target=createClient, args=(i, ))
        t.deamon = True
        time.sleep(0.1)
        t.start()
Exemple #52
0
        if line != '':
            WebSocketHandler.broadcast(line)
            print line
            
            remaining_lines = pipein.read()
            pipein.close()
            pipeout = open(PUBLIC_PIPE, 'w')
            pipeout.write(remaining_lines)
            pipeout.close()
        else:
            pipein.close()
        
        time.sleep(0.05)

public_broadcaster_thread = Thread(target=public_broadcaster, args=[])
public_broadcaster_thread.deamon = True


def private_messenger():
    """
    Thread which runs in parallel and constantly checks for new messages
    in the private pipe and sends them to the specific client.
    If client is not connected the message is discarded.
    """
    while __websocket_server_running__:
        pipein = open(PRIVATE_PIPE, 'r')
        line = pipein.readline().replace('\n', '').replace('\r', '')
        if line != '':
            message = json.loads(line)
            WebSocketHandler.send_private_message(user_id=message['user_id'],
                                                  message=message)
Exemple #53
0
WheelDiam = 126
TrackWidth = 594.202898551
Circ = WheelDiam * 3.14
EncCnt = 120
DistA = 0
DistB = 0
# Set varibles for the motor controller

cv2.namedWindow("right")
#Clear the counters of the motors
Clear_Counters
#mythreads = []

# thread for Logitech controller
t = Thread(target=HID_Controller, args=(Get_Controller(), ))
t.deamon = True
print t.start()

# thread for MotorController RX serial
thread2 = Thread(target=read_from_port, args=(MotorController, ))
thread2.deamon = True
print thread2.start()

print "start GPS"
# thread for GPS RX serial
GPS_Thread = Thread(target=GPS_RX, args=(GPS, ))
GPS_Thread.deamon = True
print GPS_Thread.start()
print "started"

print "start IMU"
Exemple #54
0
def test_fetch_data():
    symmetric362 = SPHERE_FILES['symmetric362']
    with TemporaryDirectory() as tmpdir:
        md5 = fetcher._get_file_md5(symmetric362)
        bad_md5 = '8' * len(md5)

        newfile = op.join(tmpdir, "testfile.txt")
        # Test that the fetcher can get a file
        testfile_url = symmetric362
        print(testfile_url)
        testfile_dir, testfile_name = op.split(testfile_url)
        # create local HTTP Server
        test_server_url = "http://127.0.0.1:8001/" + testfile_name
        current_dir = os.getcwd()
        # change pwd to directory containing testfile.
        os.chdir(testfile_dir + os.sep)
        # use different port as shutdown() takes time to release socket.
        server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)
        server_thread = Thread(target=server.serve_forever)
        server_thread.deamon = True
        server_thread.start()

        files = {"testfile.txt": (test_server_url, md5)}
        try:
            fetcher.fetch_data(files, tmpdir)
        except Exception as e:
            print(e)
            # stop local HTTP Server
            server.shutdown()
        npt.assert_(op.exists(newfile))

        # Test that the file is replaced when the md5 doesn't match
        with open(newfile, 'a') as f:
            f.write("some junk")
        try:
            fetcher.fetch_data(files, tmpdir)
        except Exception as e:
            print(e)
            # stop local HTTP Server
            server.shutdown()
        npt.assert_(op.exists(newfile))
        npt.assert_equal(fetcher._get_file_md5(newfile), md5)

        # Test that an error is raised when the md5 checksum of the download
        # file does not match the expected value
        files = {"testfile.txt": (test_server_url, bad_md5)}
        npt.assert_raises(fetcher.FetcherError,
                          fetcher.fetch_data, files, tmpdir)

        # stop local HTTP Server
        server.shutdown()
        # change to original working directory
        os.chdir(current_dir)

    def test_dipy_home():
        test_path = 'TEST_PATH'
        if 'DIPY_HOME' in os.environ:
            old_home = os.environ['DIPY_HOME']
            del os.environ['DIPY_HOME']
        else:
            old_home = None

        reload(fetcher)

        npt.assert_string_equal(fetcher.dipy_home,
                                op.join(os.path.expanduser('~'), '.dipy'))
        os.environ['DIPY_HOME'] = test_path
        reload(fetcher)
        npt.assert_string_equal(fetcher.dipy_home, test_path)

        # return to previous state
        if old_home:
            os.environ['DIPY_HOME'] = old_home
Exemple #55
0
        cv2.drawContours(frame, [leftEyeHull], -1, (255, 69, 0), 1)
        cv2.drawContours(frame, [rightEyeHull], -1, (255, 69, 0), 1)

        # check to see if the eye aspect ratio is below the blink
        # threshold, and if so, increment the blink frame counter
        if ear < EYE_AR_THRESH:
            COUNTER += 1

            # if the eyes were closed for a sufficient number of
            # then sound the alarm
            if COUNTER >= EYE_AR_CONSEC_FRAMES:
                # if the alarm is not on, turn it on
                if not ALARM_ON:
                    ALARM_ON = True
                    c = Thread(target=send_alarm, args=())
                    c.deamon = True
                    c.start()
                    #r = requests.put(url, json=[{"alarm":1,"accident":0}])
                    # check to see if an alarm file was supplied,
                    # and if so, start a thread to have the alarm
                    # sound played in the background
                    if args["alarm"] != "":
                        t = Thread(target=sound_alarm, args=(args["alarm"], ))
                        t.deamon = True
                        t.start()
                # draw an alarm on the frame
                cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (250, 250, 210), 2)
                SMS += 1
                if SMS == 1:
                    f = Thread(target=send_sms, args=())
def relay_call(msg):
    start_th = Thread(target=relay_control.led_control, args=(msg,))
    start_th.deamon = True
    start_th.start()
#!/usr/bin/env python3
from queue import Queue
from threading import Thread


q = Queue(maxsize=1)


def worker():
    print("Work")
    item = q.get()
    print("Work done was: ", do_work(item))
    q.task_done()
    

def do_work(item):
    return (item, item*item)




if __name__ == '__main__':
    worker_t = Thread(target=worker)
    worker_t.deamon = True
    worker_t.start()

    q.put(3)
    q.join()