Example #1
0
def get_frames(file_path, type):
    #start video capture
    videoStream = VideoStream(file_path).start()
    time.sleep(1.0)

    start_time = time.time()
    count = 0
    while videoStream.more():
        frame = videoStream.read()
        count += 1
        if count % SKIP != 0:
            continue

        h, w = frame.shape[:2]
        #face detection
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))
        net.setInput(blob)
        detections = net.forward()

        if len(detections) > 0:
            i = np.argmax(detections[0, 0, :, 2])
            confidence = detections[0, 0, i, 2]

            if confidence > 0.5:
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")
                face = frame[startY - 100:endY + 100, startX - 100:endX + 100]
                face = cv2.resize(face, (224, 224))
                name = str(uuid.uuid4())
                file_name = './output/' + name + '.png'
                cv2.imwrite(file_name, face)
                result.append((name, type))
    def handle(self):
        util = Util(self.sock)
        self.cutil = util

        # get video filename
        # TODO: handle unknow filename
        filename = util.recv().decode('ascii')

        cmd = util.recv().decode('ascii')
        if cmd != SETUP:
            self.terminate()
            return

        util.send(OK)

        # Start video capture
        self.vs = VideoStream(filename)
        
        while True:
            cmd = util.recv().decode('ascii')
            if cmd == EXIT:
                self.terminate()
                return
            elif cmd == PLAY:
                self.streamer = threading.Thread(target=self.start_streaming, args=())
                self.streamer.start()
            elif cmd == HEARTBEAT:
                # TODO: handle if no HEARTBEAT received for long
                pass
            if self.completed:
                self.terminate()
                return
Example #3
0
def main():
    ctx = zmq.Context()
    sock = ctx.socket(zmq.PUB)
    sock.connect("tcp://127.0.0.1:3002")

    logfile = open(
        'showarucoboard.py_log_%s.json' %
        datetime.datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S"), 'w')

    vs = VideoStream(src=1).start()
    pe = RealAntPoseEstimator()

    last_time = None
    while True:
        # Read latest image
        img, utcnowms = vs.read()

        # Compute fps
        fps = 0
        if last_time != None:
            if last_time == utcnowms:
                continue
            fps = 1000 / (utcnowms - last_time)
        last_time = utcnowms

        # Estimate pose
        d, img = pe.get_pose(img, utcnowms)

        if d is not None:
            sock.send_json(d)
            d["sent"] = True

            print("fps %5.1f " % fps, end='')
            print(
                "- %d dist %2.1f x %1.3fm y %1.3fm z %1.3fm roll %3.0f pitch %3.0f yaw %3.0f"
                % tuple([
                    d[x] for x in [
                        "server_epoch_ms", "dist", "x", "y", "z", "roll",
                        "pitch", "yaw"
                    ]
                ]),
                end='')
            if "xvel" in d:
                print(" xvel%6.3f %6.3f y%6.3f z%6.3f" %
                      (d["xvel"], d["xvel_raw"], d["yvel"], d["zvel"]))
            else:
                print()

            logfile.write(json.dumps(d) + '\n')

        cv2.imshow('ant', img)
        if cv2.waitKey(1) == 27:
            break  # esc to quit

    vs.stop()
    cv2.destroyAllWindows()
Example #4
0
    def __init__(self):
        self.stop = False
        logging.basicConfig()
        self.logger = logging.getLogger("MMNT")
        if DEBUG:
            self.logger.setLevel(logging.DEBUG)
        else:
            self.logger.setLevel(logging.INFO)
        self.logger.info("Initializing")

        self.masterSampleTime = time.time()
        self.slaveSampleTime = time.time()

        self.humanSampleTime = time.time()
        self.micSampleTime = time.time()

        self.logger.debug("Initializing motor control")
        self.mc = MotorControl()
        self.mc.resetMotors()

        self.logger.debug("Initializing microphone")
        dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
        if not dev:
            sys.exit("Could not find ReSpeaker Mic Array through USB")
        self.mic = Tuning(dev)
        self.mic.write("NONSTATNOISEONOFF", 1)
        self.mic.write("STATNOISEONOFF", 1)
        self.mic.write("ECHOONOFF", 1)

        self.logger.debug("Initializing video streams")
        self.topCamStream = VideoStream(1)
        self.botCamStream = VideoStream(2)

        self.logger.debug("Initializing models")
        self.ht_model = ht.get_model()
        self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL),
                                      target_size=(VideoStream.DEFAULT_WIDTH,
                                                   VideoStream.DEFAULT_HEIGHT))
        self.logger.info("Initialization complete")

        self.topCamState = State.IDLE
        self.botCamState = State.IDLE

        self.topCamAngle = 0
        self.topAngleUpdated = False
        self.botCamAngle = 180
        self.botAngleUpdated = False
        self.master = Cams.TOP
        self.lastMaster = Cams.TOP

        self.botCamProc = None
        self.topCamProc = None

        self.audioMap = Map(15)
        self.checkMic()
Example #5
0
 def __init__(self, url, **properties):
     super(SensorFactory, self).__init__(**properties)
     self.vs = VideoStream(url).start()
     self.number_frames = 0
     self.fps = 20
     self.duration = 1 / self.fps * Gst.SECOND  # duration of a frame in nanoseconds
     self.shape = (720, 1280)
     self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
                          'caps=video/x-raw,format=BGR,width={},height={} ' \
                          '! videoconvert ! video/x-raw,format=I420 ' \
                          '! x264enc ! queue ' \
                          '! rtph264pay config-interval=1 name=pay0 pt=96'.format(self.shape[1], self.shape[0])
Example #6
0
    def __init__(self, **kwargs: dict):
        super().__init__()
        # Required variables in __del__
        self.command_socket = self.state_socket = self.videostream_socket = None
        self.state_thread = self.ack_thread = self.video_thread = None
        #Store all moves done by the drone to reverse them and allow drones to return to base
        self.all_instructions = []
        #Store the last state from the drone
        self.last_parameters = []

        self._end_connection = False

        self.local_address_command = ('', 9010)
        self.local_address_state = ('', 8890)
        self.local_address_video = ('', 11111)

        self.video_stream = kwargs.get('video_stream', False)
        self.state_listener = kwargs.get('state_listener', False)
        self.back_to_base = kwargs.get('back_to_base', False)

        self.flight_mode: AbstractFlightMode = None

        self.last_frame: bytes = None
        self.video_frames = VideoStream()
        self.av_target_opened = False
Example #7
0
    def __init__(self, mainWindow, parent=None):
        super(VideoWidget, self).__init__(parent)
        self.mainWindow = mainWindow

        self.videoStream = VideoStream(0)
        self.videoStream.newFrame.connect(self.onNewFrame)
        self.videoStream.sourceChanged.connect(self.onSourceChanged)
        self.videoStream.stateChanged.connect(self.onStateChanged)

        self.inputFilename = None
        self.outputFilename = None

        self.setupUi()

        self.setRecordingControlsState()
        self.switchToWebCamBtn.setChecked(True)
        self.play()
Example #8
0
 def setup(self):
     if self.state == self.INIT:
         print("SETUP request recv")
         try:
             self.client['video_stream'] = VideoStream(
                 self.client['filename'])
             self.state = self.READY
         except IOError:
             self.rtsp_socket.send(f"RTSP/1.0 404\n".encode('utf-8'))
         self.rtsp_socket.send(
             f"RTSP/1.0 200 OK\n{self.client['sequence_number']}\n".encode(
                 'utf-8'))
Example #9
0
    def __init__(self, mainWindow, parent=None):
        super(VideoWidget, self).__init__(parent)
        self.mainWindow = mainWindow

        self.videoStream = VideoStream(0)
        self.videoStream.newFrame.connect(self.onNewFrame)
        self.videoStream.sourceChanged.connect(self.onSourceChanged)
        self.videoStream.stateChanged.connect(self.onStateChanged)

        self.inputFilename = None
        self.outputFilename = None

        self.setupUi()

        self.setRecordingControlsState()
        self.switchToWebCamBtn.setChecked(True)
        self.play()
Example #10
0
class SensorFactory(GstRtspServer.RTSPMediaFactory):
    def __init__(self, url, **properties):
        super(SensorFactory, self).__init__(**properties)
        self.vs = VideoStream(url).start()
        self.number_frames = 0
        self.fps = 20
        self.duration = 1 / self.fps * Gst.SECOND  # duration of a frame in nanoseconds
        self.shape = (720, 1280)
        self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
                             'caps=video/x-raw,format=BGR,width={},height={} ' \
                             '! videoconvert ! video/x-raw,format=I420 ' \
                             '! x264enc ! queue ' \
                             '! rtph264pay config-interval=1 name=pay0 pt=96'.format(self.shape[1], self.shape[0])

    def on_need_data(self, src, lenght):
        # time.sleep(0.1)
        frame = self.vs.read()
        if frame is None:
            return
        # print(frame)
        frame = cv2.resize(frame, self.shape[::-1])
        data = frame.tostring()
        buf = Gst.Buffer.new_allocate(None, len(data), None)
        buf.fill(0, data)
        buf.duration = self.duration
        timestamp = self.number_frames * self.duration
        buf.pts = buf.dts = int(timestamp)
        buf.offset = timestamp
        self.number_frames += 1
        retval = src.emit('push-buffer', buf)
        print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames, self.duration, self.duration / Gst.SECOND))
        if retval != Gst.FlowReturn.OK:
            print(retval)

    def do_create_element(self, url):
        return Gst.parse_launch(self.launch_string)

    def do_configure(self, rtsp_media):
        self.number_frames = 0
        appsrc = rtsp_media.get_element().get_child_by_name('source')
        appsrc.connect('need-data', self.on_need_data)
Example #11
0
def main():
    vs = VideoStream()
    vs.start()
    names, known_encodings = load_known_faces('./faces/known_faces')
    print(len(known_encodings))
    while vs.isOpened():
        image = vs.read()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        face_locations = fr.face_locations(image, model='hog')
        img_face_encodings = fr.face_encodings(image, face_locations)
        match_matrix = [
            fr.compare_faces(known_encodings, f, tolerance=0.6)
            for f in img_face_encodings
        ]
        print(match_matrix)
        img_with_faces = draw_bbox_on_img(image, face_locations)

        cv2.imshow('frame', img_with_faces)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    vs.close()
    cv2.destroyAllWindows()
Example #12
0
def calculate_sequence(in_path, out_path, debug=False):
    sequence = []
    detections = deque(10 * [False], 10)
    object_present = False
    with VideoStream(in_path, loop=False) as stream:
        while True:
            image = stream.read()
            if image is None:
                break

            thresh = process_image(image)
            rect = find_mug(thresh)
            detections.append(rect is not None)
            if rect is not None:
                x, y, w, h = rect
                cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

            object_detected = sum(detections) >= 5
            if object_detected != object_present:
                text = 'Appeared' if object_detected else 'Disappeared'
                sequence_frame = cv2.resize(image, None, fx=0.25, fy=0.25)
                cv2.putText(sequence_frame, text, (0, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                            cv2.LINE_AA)
                sequence.append(sequence_frame)
            object_present = object_detected

            if debug:
                if len(sequence):
                    cv2.imshow("Emergence", sequence[-1])

                cv2.imshow("Card Detector", image)
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
    if debug:
        cv2.destroyAllWindows()
    cv2.imwrite(out_path, image_grid(sequence))
Example #13
0

# Initialize frame rate calculation and timer
fps = 1
timer = round(time.time())
freq = cv2.getTickFrequency()


# Initialize flags and timestamp for sending data to database
object_detected = False
send_data = False
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")


# Initialize video stream
videostream = VideoStream(resolution=(CAMERA_RES[0], CAMERA_RES[1])).start()
time.sleep(1)

# Create list of parking spots
parkingspots = [
    ParkingSpot("PS1", 100, 100, 300, 500),
    ParkingSpot("PS2", 350, 100, 550, 500)]


while True:
    # Start frame rate timer
    t1 = cv2.getTickCount()

    # Get snapshot of video stream, convert to correct color space and resize according to input requirements
    frame = videostream.read()
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
Example #14
0
	def processRtspRequest(self, data):
		"""Process RTSP request sent from the client."""
		# Get the request type
		request = data.split('\n')
		line1 = request[0].split(' ')
		requestType = line1[0]
		# Get the media file name
		filename = line1[1]
		# Get the RTSP sequence number
		seq = request[1].split(' ')

		# Process SETUP request
		if requestType == self.SETUP:
			if self.state == self.INIT:
				# Update state
				print "SETUP Request received\n"

				try:
					self.clientInfo['videoStream'] = VideoStream(filename)
					self.state = self.READY

				except IOError:
					self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1])

				# Generate a randomized RTSP session ID
				self.clientInfo['session'] = randint(100000, 999999)

				# Send RTSP reply
				self.replyRtsp(self.OK_200, seq[0])  #seq[0] the sequenceNum received from Client.py
				print "sequenceNum is " + seq[0]
				# Get the RTP/UDP port from the last line
				self.clientInfo['rtpPort'] = request[2].split(' ')[3]
				print "\nrtpPort is :" + self.clientInfo['rtpPort'] + "\n"
				print "filename is " + filename

		# Process PLAY request
		elif requestType == self.PLAY:
			if self.state == self.READY:
				print "\nPLAY Request Received\n"
				self.state = self.PLAYING

				# Create a new socket for RTP/UDP
				self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

				self.replyRtsp(self.OK_200, seq[0])
				print "\nSequence Number ("+ seq[0] + ")\nReplied to client\n"

				# Create a new thread and start sending RTP packets
				self.clientInfo['event'] = threading.Event()
				self.clientInfo['worker']= threading.Thread(target=self.sendRtp)
				self.clientInfo['worker'].start()
		# Process RESUME request
			elif self.state == self.PAUSE:
				print "\nRESUME Request Received\n"
				self.state = self.PLAYING

		# Process PAUSE request
		elif requestType == self.PAUSE:
			if self.state == self.PLAYING:
				print "\nPAUSE Request Received\n"
				self.state = self.READY

				self.clientInfo['event'].set()

				self.replyRtsp(self.OK_200, seq[0])

		# Process TEARDOWN request
		elif requestType == self.TEARDOWN:
			print "\nTEARDOWN Request Received\n"

			self.clientInfo['event'].set()

			self.replyRtsp(self.OK_200, seq[0])

			# Close the RTP socket
			self.clientInfo['rtpSocket'].close()
Example #15
0
from video_stream import VideoStream
from video_config import config
from curses_image import CursesWindow, CursesFrameRenderer

if __name__ == "__main__":
    with CursesWindow() as curses_window:
        url = config['url']
        auth = (config['user'], config['password'])
        video_stream = VideoStream(url, auth)
        curses_renderer = CursesFrameRenderer(curses_window.shape)
        for frame in video_stream.iter_frames():
            curses_frame = curses_renderer.render_frame(frame)
            curses_window.draw(curses_frame)
Example #16
0
def recognize_video(detector,
                    embedder: Embedder,
                    recognizer: Recognizer,
                    detector_params='default',
                    source=0):

    # инициализация видеопотока
    print('Starting video stream...')
    vs = VideoStream(src=source).start()

    if not is_detector(detector):
        raise TypeError('Incorrect type of detector')

    # разогрев камеры
    time.sleep(0.5)

    # запуск оценщика пропускной способности FPS
    fps = FPS().start()

    # цикл по фреймам из видео
    while True:

        frame = vs.read()

        if detector_params == 'default':
            faces_roi, boxes = detector.calc_image(frame, return_mode='both')

        elif type(detector) == DetectorSSD:
            confidence = detector_params[0]
            faces_roi, boxes = detector.calc_image(frame,
                                                   confidence=confidence,
                                                   return_mode='both')

        elif type(detector) == DetectorVJ or type(detector) == DetectorLBP:
            [scale_factor, min_neighbors] = detector_params
            faces_roi, boxes = detector.calc_image(frame,
                                                   scale_factor=scale_factor,
                                                   min_neighbors=min_neighbors,
                                                   return_mode='both')

        elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD:
            upsampling_times = detector_params[0]
            faces_roi, boxes = detector.calc_image(
                frame, upsampling_times=upsampling_times, return_mode='both')

        for i in range(len(faces_roi)):

            embeddings = embedder.calc_face(faces_roi[i])
            name = recognizer.recognize(embeddings)
            start_x, start_y, end_x, end_y = boxes[i]

            text = '{}'.format(name)
            y = start_y - 10 if start_y - 10 > 10 else start_y + 10
            cv2.rectangle(frame, (start_x, start_y), (end_x, end_y),
                          (0, 0, 255), 2)
            cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.45, (0, 0, 255), 2)

        # обновление счетчика FPS
        fps.update()

        # показ выходного фрейма
        cv2.imshow('Frame', frame)
        key = cv2.waitKey(1) & 0xFF

        # завершение при нажатии 'q'
        if key == ord("q"):
            break

    fps.stop()
    print('Elasped time: {:.2f}'.format(fps.elapsed()))
    print('Approx. FPS: {:.2f}'.format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Example #17
0
def m():
    global output, heatmap_image, total_people, field1_count, field2_count

    frame_rate_calc = 1
    freq = cv2.getTickFrequency()

    videostream = VideoStream(VIDEO_PATH).start()

    color_f1 = (0, 0, 255)
    color_f2 = (255, 0, 0)
    heatmap = np.zeros((720, 1270, 3), dtype=np.uint8)
    ht_color = (191, 255, 1)

    while True:
        t1 = cv2.getTickCount()

        frame1 = videostream.read()
        frame = frame1.copy()
        boxes, classes, scores = generate_detections(frame, interpreter)

        total_people = 0
        field1_count = 0
        field2_count = 0
        for i in range(len(scores)):
            if ((scores[i] > THRESHOLD) and (scores[i] <= 1.0)):
                ymin = int(max(1, (boxes[i][0] * imH)))
                xmin = int(max(1, (boxes[i][1] * imW)))
                ymax = int(min(imH, (boxes[i][2] * imH)))
                xmax = int(min(imW, (boxes[i][3] * imW)))

                total_people = total_people + 1

                center_x = int((xmin + xmax) / 2)
                center_y = int((ymin + ymax) / 2)
                center_coor = (center_x, center_y)

                color_bb = (10, 200, 10)
                cv2.circle(frame, center_coor, 10, color_bb, cv2.FILLED)

                pts_f1 = [[522, 138], [1066, 522], [1200, 270], [580, 30]]
                pts_f2 = [[172, 142], [410, 607], [657, 440], [319, 142]]

                create_polygon(pts_f1, frame, color_f1)
                create_polygon(pts_f2, frame, color_f2)

                center_point = Point(center_x, center_y)
                polygon_f1 = Polygon(pts_f1)
                polygon_f2 = Polygon(pts_f2)

                if is_field_contain_center(polygon_f1, center_point):
                    field1_count = field1_count + 1
                    color_bb = color_f1

                if is_field_contain_center(polygon_f2, center_point):
                    field2_count = field2_count + 1
                    color_bb = color_f2

                draw_bounding_boxes(frame, classes, xmin, xmax, ymin, ymax,
                                    color_bb, labels)

                if (heatmap[center_y, center_x][0] !=
                        0) and (heatmap[center_y, center_x][1] !=
                                0) and (heatmap[center_y, center_x][2] != 0):
                    b = heatmap[center_y, center_x][0]
                    g = heatmap[center_y, center_x][1]
                    r = heatmap[center_y, center_x][2]

                    b = b - b * 0.5
                    g = g - g * 0.2
                    r = r + r * 0.5

                    cv2.circle(heatmap, center_coor, 10, (b, g, r), cv2.FILLED)
                else:
                    cv2.circle(heatmap, center_coor, 10, ht_color, cv2.FILLED)

        cv2.putText(frame, 'FPS: {0:.2f}'.format(frame_rate_calc), (30, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2, cv2.LINE_AA)
        frame = cv2.resize(frame, (698, 396))

        t2 = cv2.getTickCount()
        time1 = (t2 - t1) / freq
        frame_rate_calc = 1 / time1

        overlay = frame1
        alpha_backgroud = 0.7
        alpha_heatmap = 0.9
        cv2.addWeighted(overlay, alpha_heatmap, frame1, 1 - alpha_heatmap, 0,
                        frame1)
        cv2.addWeighted(heatmap, alpha_backgroud, frame1, 1 - alpha_backgroud,
                        0, frame1)

        frame2 = cv2.resize(frame1, (698, 396))

        output = frame.copy()
        heatmap_image = frame2
class ClientHandler(threading.Thread):

    def __init__(self, sock, addr):
        threading.Thread.__init__(self)
        self.sock = sock
        self.addr = addr
        self.data_sock = None
        self.streaming = False
        self.completed = False
        self.fnbr = 0

    def run(self):
        self.handle()

    def terminate(self):
        # try to send EXIT cmd
        try:
            self.cutil.send(EXIT)
        except:
            # error if client closed
            pass
        self.sock.close()
        # if stream is live, quit stream
        if self.streaming:
            self.streaming = False
            self.streamer.join()
        if self.data_sock:
            self.data_sock.close()
    
    # control socket
    def handle(self):
        util = Util(self.sock)
        self.cutil = util

        # get video filename
        # TODO: handle unknow filename
        filename = util.recv().decode('ascii')

        cmd = util.recv().decode('ascii')
        if cmd != SETUP:
            self.terminate()
            return

        util.send(OK)

        # Start video capture
        self.vs = VideoStream(filename)
        
        while True:
            cmd = util.recv().decode('ascii')
            if cmd == EXIT:
                self.terminate()
                return
            elif cmd == PLAY:
                self.streamer = threading.Thread(target=self.start_streaming, args=())
                self.streamer.start()
            elif cmd == HEARTBEAT:
                # TODO: handle if no HEARTBEAT received for long
                pass
            if self.completed:
                self.terminate()
                return
    
    # data socket
    def start_streaming(self):
        self.streaming = True
        print('Started streaming...', self.addr)
        while self.streaming:
            data = self.vs.read()
            if data == None:
                print("Finished streaming...", self.addr)
                self.streaming = False
                break
            try:
                self.data_sock.sendall(data)
            except Exception as e:
                print('Client closed...')
                self.streaming = False
                self.completed = True
                break
            self.fnbr += 1
            print(f'Sent frame {self.fnbr}', end='\r')
        self.data_sock.close()

    def attach_data_sock(self, sock):
        self.data_sock = sock
Example #19
0
class Moment(object):
    def __init__(self):
        self.stop = False
        logging.basicConfig()
        self.logger = logging.getLogger("MMNT")
        if DEBUG:
            self.logger.setLevel(logging.DEBUG)
        else:
            self.logger.setLevel(logging.INFO)
        self.logger.info("Initializing")

        self.masterSampleTime = time.time()
        self.slaveSampleTime = time.time()

        self.humanSampleTime = time.time()
        self.micSampleTime = time.time()

        self.logger.debug("Initializing motor control")
        self.mc = MotorControl()
        self.mc.resetMotors()

        self.logger.debug("Initializing microphone")
        dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
        if not dev:
            sys.exit("Could not find ReSpeaker Mic Array through USB")
        self.mic = Tuning(dev)
        self.mic.write("NONSTATNOISEONOFF", 1)
        self.mic.write("STATNOISEONOFF", 1)
        self.mic.write("ECHOONOFF", 1)

        self.logger.debug("Initializing video streams")
        self.topCamStream = VideoStream(1)
        self.botCamStream = VideoStream(2)

        self.logger.debug("Initializing models")
        self.ht_model = ht.get_model()
        self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL),
                                      target_size=(VideoStream.DEFAULT_WIDTH,
                                                   VideoStream.DEFAULT_HEIGHT))
        self.logger.info("Initialization complete")

        self.topCamState = State.IDLE
        self.botCamState = State.IDLE

        self.topCamAngle = 0
        self.topAngleUpdated = False
        self.botCamAngle = 180
        self.botAngleUpdated = False
        self.master = Cams.TOP
        self.lastMaster = Cams.TOP

        self.botCamProc = None
        self.topCamProc = None

        self.audioMap = Map(15)
        self.checkMic()

    def stop(self):
        self.stop = True

    def updateTopAngle(self, angle):
        if abs(angle - self.topCamAngle) > ANGLE_THRESHOLD and abs(
                angle - self.botCamAngle) > OVERLAP_THRESHOLD:
            self.topCamAngle = angle
            self.topAngleUpdated = True

    def updateBotAngle(self, angle):
        if abs(angle - self.botCamAngle) > ANGLE_THRESHOLD and abs(
                angle - self.topCamAngle) > OVERLAP_THRESHOLD:
            self.botCamAngle = angle
            self.botAngleUpdated = True

    def updatePositions(self):
        # Send Serial Commands
        if self.topAngleUpdated and self.botAngleUpdated:
            self.logger.debug("Top Angle: {}".format(self.topCamAngle))
            self.logger.debug("Bot Angle: {}".format(self.botCamAngle))
            self.topAngleUpdated = False
            self.botAngleUpdated = False
            self.mc.runMotors(self.topCamAngle, self.botCamAngle)
        elif self.topAngleUpdated:
            self.logger.debug("Top Angle: {}".format(self.topCamAngle))
            self.topAngleUpdated = False
            self.mc.runTopMotor(self.topCamAngle)
        elif self.botAngleUpdated:
            self.logger.debug("Bot Angle: {}".format(self.botCamAngle))
            self.botAngleUpdated = False
            self.mc.runBotMotor(self.botCamAngle)

    def isWithinNoiseFov(self, angle):
        topDiff = abs(angle - self.topCamAngle)
        botDiff = abs(angle - self.botCamAngle)

        if topDiff < NOISE_ANGLE_THRESHOLD:
            self.topCamState |= State.NOISE
            if self.topCamState == State.BOTH:
                self.master = Cams.TOP
            return True
        else:
            self.topCamState &= ~State.NOISE
        if botDiff < NOISE_ANGLE_THRESHOLD:
            self.botCamState |= State.NOISE
            if self.botCamState == State.BOTH:
                self.master = Cams.BOT
            return True
        else:
            self.botCamState &= ~State.NOISE

        return False

    def checkMic(self):
        speechDetected, micDOA = self.mic.speech_detected(), self.mic.direction
        if not speechDetected:
            # self.audioMap.update_map_with_no_noise()
            self.topCamState &= ~State.NOISE
            self.botCamState &= ~State.NOISE
            return
        self.logger.debug("speech detected from {}".format(micDOA))
        self.audioMap.update_map_with_noise(micDOA)

        primaryMicDOA, secondaryMicDOA = self.audioMap.get_POI_location()
        if DEBUG:
            self.audioMap.print_map()
        if primaryMicDOA == -1:
            self.logger.debug("no good audio source")
            return

        self.logger.debug("mapped audio from {}".format(primaryMicDOA))

        # Check if camera is already looking at the primary noise source
        if self.isWithinNoiseFov(primaryMicDOA):
            # If camera is already looking, check the secondary noise source
            if secondaryMicDOA == -1:
                self.logger.debug("no good secondary audio source")
                return
            elif self.isWithinNoiseFov(secondaryMicDOA):
                return
            else:
                micDOA = secondaryMicDOA
        else:
            micDOA = primaryMicDOA

        topDiff = abs(micDOA - self.topCamAngle)
        botDiff = abs(micDOA - self.botCamAngle)

        # Camera is NOT looking at the noise source at this point
        # If both Cameras are not tracking a human,
        # move the closest camera
        if self.topCamState < State.HUMAN and self.botCamState < State.HUMAN:
            if botDiff < topDiff:
                self.botCamState |= State.NOISE
                self.updateBotAngle(micDOA)
                if self.botCamState == State.IDLE:
                    self.master = Cams.TOP
            else:
                self.topCamState |= State.NOISE
                self.updateTopAngle(micDOA)
                if self.topCamState == State.IDLE:
                    self.master = Cams.BOT
        # One of the cameras are on a human, if the other camera is not on a human, move it
        elif self.topCamState < State.HUMAN:
            self.topCamState |= State.NOISE
            self.updateTopAngle(micDOA)
            self.master = Cams.BOT
        elif self.botCamState < State.HUMAN:
            self.botCamState |= State.NOISE
            self.updateBotAngle(micDOA)
            self.master = Cams.TOP
        # The cameras are on a human
        else:
            # If both are on a human, move the one that's not master
            if self.topCamState == State.HUMAN and self.botCamState == State.HUMAN:
                if self.master != Cams.BOT:
                    self.botCamState |= State.NOISE
                    self.updateBotAngle(micDOA)
                else:
                    self.topCamState |= State.NOISE
                    self.updateTopAngle(micDOA)
            # One of the cameras are on a HUMAN+NOISE, move the one that's not only on a HUMAN
            elif self.topCamState == State.HUMAN:
                self.topCamState |= State.NOISE
                self.updateTopAngle(micDOA)
                self.master = Cams.BOT
            elif self.botCamState == State.HUMAN:
                self.botCamState |= State.NOISE
                self.updateBotAngle(micDOA)
                self.master = Cams.TOP

    def getBestFace(self, humans):
        midX = -1
        bestHuman = humans[0]
        maxScore = 0
        for human in humans:
            gotMidX = False
            score = 0
            currMidX = -1
            for part in headParts:
                if part in human.body_parts:
                    score += human.body_parts[part].score
                    if not gotMidX:
                        currMidX = human.body_parts[
                            part].x * VideoStream.DEFAULT_WIDTH
                        gotMidX = True
            if score > maxScore:
                maxScore = score
                midX = currMidX
                bestHuman = human

        return bestHuman, midX

    def checkHumans(self, frame, camera):
        humans = self.tfPose.inference(frame,
                                       resize_to_default=True,
                                       upsample_size=RESIZE_RATIO)
        if len(humans):
            if camera == Cams.TOP:
                self.topCamState |= State.HUMAN
                if self.topCamState == State.BOTH:
                    self.master = Cams.TOP
            else:
                self.botCamState |= State.HUMAN
                if self.botCamState == State.BOTH:
                    self.master = Cams.BOT

            if DISPLAY_VIDEO and DRAW_ON_FRAME:
                TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
            human, midX = self.getBestFace(humans)

            if (ht.is_hands_above_head(human)):
                self.logger.debug("HANDS ABOVE HEAD!!!")

            if midX != -1:
                centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH / 2)
                if centerDiff > FACE_THRESHOLD:
                    if midX < VideoStream.DEFAULT_WIDTH / 2:
                        # rotate CCW
                        if camera == Cams.TOP:
                            self.updateTopAngle(
                                (self.topCamAngle +
                                 centerDiff * degreePerPixel) % 360)
                        else:
                            self.updateBotAngle(
                                (self.botCamAngle +
                                 centerDiff * degreePerPixel) % 360)
                    elif midX > VideoStream.DEFAULT_WIDTH / 2:
                        # rotate CW
                        if camera == Cams.TOP:
                            self.updateTopAngle(
                                (self.topCamAngle -
                                 centerDiff * degreePerPixel) % 360)
                        else:
                            self.updateBotAngle(
                                (self.botCamAngle -
                                 centerDiff * degreePerPixel) % 360)
        else:
            if camera == Cams.TOP:
                self.topCamState &= ~State.HUMAN
            else:
                self.botCamState &= ~State.HUMAN
        return frame

    def playVideo(self, cam):
        if cam == Cams.TOP:
            if self.botCamProc is not None and self.botCamProc.poll(
            ) is not None:
                self.botCamProc.kill()
            self.topCamProc = subprocess.Popen(
                "ffmpeg -f v4l2 -i /dev/video3 -f v4l2 /dev/video5",
                shell=True,
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL)
        elif cam == Cams.BOT:
            if self.topCamProc is not None and self.topCamProc.poll(
            ) is not None:
                self.topCamProc.kill()
            self.botCamProc = subprocess.Popen(
                "ffmpeg -f v4l2 -i /dev/video4 -f v4l2 /dev/video5",
                shell=True,
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL)

    def start(self):
        Thread(target=self.run, args=()).start()

    def run(self):
        self.stop = False
        while not self.stop:
            try:
                topFrame = self.topCamStream.read()
                botFrame = self.botCamStream.read()
                if time.time() - self.humanSampleTime > HUMAN_SAMPLE_FREQ:
                    if topFrame is not None:
                        topFrame = self.checkHumans(topFrame, Cams.TOP)
                    if botFrame is not None:
                        botFrame = self.checkHumans(botFrame, Cams.BOT)
                    self.humanSampleTime = time.time()

                if time.time() - self.micSampleTime > MIC_SAMPLE_FREQ:
                    self.checkMic()
                    self.micSampleTime = time.time()

                self.updatePositions()

                # if DISPLAY_VIDEO and topFrame is not None and botFrame is not None:
                #     if self.master == Cams.TOP:
                #         if topFrame is not None:
                #             cv.imshow('Master', topFrame)
                #         if botFrame is not None:
                #             cv.imshow('Slave', botFrame)
                #     else:
                #         if botFrame is not None:
                #             cv.imshow('Master', botFrame)
                #         if topFrame is not None:
                #             cv.imshow('Slave', topFrame)
                #     if cv.waitKey(1) == 27:
                #         pass
                if DISPLAY_VIDEO and topFrame is not None and botFrame is not None:
                    if self.master == Cams.TOP:
                        top_master = np.concatenate((topFrame, botFrame),
                                                    axis=1)
                        cv.imshow('Master + Slave', top_master)
                    else:
                        bot_master = np.concatenate((botFrame, topFrame),
                                                    axis=1)
                        cv.imshow('Master + Slave', bot_master)
                    if cv.waitKey(1) == 27:
                        pass

            except KeyboardInterrupt:
                self.logger.debug("Keyboard interrupt! Terminating.")
                break

        self.mc.resetMotors()
Example #20
0
File: main.py Project: cudnn/mmnt
def main():
    logging.basicConfig()
    logger = logging.getLogger("MMNT")
    logger.setLevel(logging.INFO)
    logger.info("Initializing")

    masterSampleTime = time.time()
    slaveSampleTime = time.time()

    logger.debug("Initializing motor control")
    mc = MotorControl()
    mc.resetMotors()
    logger.debug("Initializing microphone")
    dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
    if not dev:
        sys.exit("Could not find ReSpeaker Mic Array through USB")
    mic = Tuning(dev)
    mic.write("NONSTATNOISEONOFF", 1)
    mic.write("STATNOISEONOFF", 1)

    logger.debug("Initializing models")
    ht_model = ht.get_model()
    tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT))

    logger.debug("Initializing video streams")
    topCamStream = VideoStream(1)
    botCamStream = VideoStream(2)

    topCamStream.start()
    botCamStream.start()


    masterCamID = TOP_CAM_ID
    masterStream = topCamStream
    slaveCamID = BOT_CAM_ID
    slaveStream = botCamStream

    masterTargetAngle = 0
    slaveTargetAngle = 180

    updateMasterAngle = False
    updateSlaveAngle = False

    masterTracking = False

    logger.info("Initialization complete")
    while True:
        try:
            # MASTER
            masterFrame = masterStream.read()
            if time.time() - masterSampleTime > MASTER_SAMPLE_FREQ:
                humans = tfPose.inference(masterFrame, resize_to_default=True, upsample_size=RESIZE_RATIO)
                if len(humans):
                    logger.debug("Master tracking")
                    masterTracking = True
                    if DISPLAY_VIDEO:
                        TfPoseEstimator.draw_humans(masterFrame, humans, imgcopy=False)
                    human = humans[0]
                    if (ht.is_hands_above_head(human)):
                        logger.debug("HANDS ABOVE HEAD!!!")
                    midX = -1
                    for part in headParts:
                        if part in human.body_parts:
                            midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH
                            break
                    if midX != -1:
                        centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2)
                        if centerDiff > FACE_THRESHOLD:
                            if midX < VideoStream.DEFAULT_WIDTH/2:
                                # rotate CCW
                                masterTargetAngle += centerDiff * degreePerPixel
                            elif midX > VideoStream.DEFAULT_WIDTH/2:
                                # rotate CW
                                masterTargetAngle -= centerDiff * degreePerPixel

                            masterTargetAngle = masterTargetAngle % 360
                            updateMasterAngle = True
                            masterSampleTime = time.time()
                else:
                    logger.debug("Master stopped tracking")
                    masterTracking = False

                # If master is not tracking a human, move towards speech
                if not masterTracking:
                    speechDetected, micDOA = mic.speech_detected(), mic.direction
                    logger.debug("master speech detected:", speechDetected, "diff:", abs(micDOA - masterTargetAngle))
                    if speechDetected and abs(micDOA - masterTargetAngle) > ANGLE_THRESHOLD:
                        masterTargetAngle = micDOA
                        logger.debug("Update master angle:", masterTargetAngle)
                        masterSampleTime = time.time()
                        updateMasterAngle = True

            # SLAVE
            slaveFrame = slaveStream.read()
            if time.time() - slaveSampleTime > SLAVE_SAMPLE_FREQ:
                # If master is not tracking a human and a slave sees a human, move master to the visible human and move slave away
                if not masterTracking and time.time() - masterSampleTime > MASTER_SAMPLE_FREQ:
                    humans = tfPose.inference(slaveFrame, resize_to_default=True, upsample_size=RESIZE_RATIO)
                    if len(humans):
                        logger.debug("slave found mans")
                        if DISPLAY_VIDEO:
                            TfPoseEstimator.draw_humans(slaveFrame, humans, imgcopy=False)
                        human = humans[0]
                        if (ht.is_hands_above_head(human)):
                            logger.debug("HANDS ABOVE HEAD!!!")
                        midX = -1
                        for part in headParts:
                            if part in human.body_parts:
                                midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH
                                break
                        if midX != -1:
                            centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2)
                            # if centerDiff > FACE_THRESHOLD:
                            if midX < VideoStream.DEFAULT_WIDTH/2:
                                # rotate CCW
                                masterTargetAngle = slaveTargetAngle + centerDiff * degreePerPixel
                            elif midX > VideoStream.DEFAULT_WIDTH/2:
                                # rotate CW
                                masterTargetAngle = slaveTargetAngle - centerDiff * degreePerPixel

                            masterTargetAngle = masterTargetAngle % 360
                            updateMasterAngle = True
                            masterSampleTime = time.time()
                            slaveTargetAngle = (masterTargetAngle + 180) % 360
                            updateSlaveAngle = True
                            logger.debug("Moving master to slave:", masterTargetAngle)

                speechDetected, micDOA = mic.speech_detected(), mic.direction
                speechMasterDiff = abs(micDOA - masterTargetAngle)
                if speechDetected and speechMasterDiff > SLAVE_MASTER_THRESHOLD and abs(micDOA - slaveTargetAngle) > ANGLE_THRESHOLD:
                    slaveTargetAngle = micDOA
                    logger.debug("Update slave angle:", slaveTargetAngle)
                    slaveSampleTime = time.time()
                    updateSlaveAngle = True


            # Send Serial Commands
            if updateSlaveAngle and updateMasterAngle:
                logger.debug("Slave Angle:", slaveTargetAngle)
                logger.debug("Master Angle:", masterTargetAngle)
                updateSlaveAngle = False
                updateMasterAngle = False
                if slaveCamID == BOT_CAM_ID:
                    mc.runMotors(masterTargetAngle, slaveTargetAngle)
                else:
                    mc.runMotors(slaveTargetAngle, masterTargetAngle)
            elif updateSlaveAngle:
                mc.runMotor(slaveCamID, slaveTargetAngle)
                logger.debug("Slave Angle:", slaveTargetAngle)
                updateSlaveAngle = False
            elif updateMasterAngle:
                mc.runMotor(masterCamID, masterTargetAngle)
                logger.debug("Master Angle:", masterTargetAngle)
                updateMasterAngle = False

            if DISPLAY_VIDEO:
                cv.imshow('Master Camera', masterFrame)
                cv.imshow('Slave Camera', slaveFrame)
                if cv.waitKey(1) == 27:
                    pass

        except KeyboardInterrupt:
            logger.debug("Keyboard interrupt! Terminating.")
            mc.stopMotors()
            slaveStream.stop()
            masterStream.stop()
            mic.close()
            time.sleep(2)
            break

    cv.destroyAllWindows()
Example #21
0

def draw_rec(frame, color, bndbox):
    cv2.rectangle(frame, bndbox[0], bndbox[1], color, 2)


def get_brightness(img):
    img = Image.fromarray(img)
    stat = ImageStat.Stat(img)
    return stat.mean[0]


model = load_model()
counter = 0
total = 0
vs = VideoStream(src=0).start()
while True:
    frame = vs.read()
    face, bndbox = eye_blink.get_face(frame)
    if face is None:
        continue
    try:
        face = cv2.resize(face, (224, 224))
    except:
        continue
    #cv2.putText(frame, "Brightness: {}".format(get_brightness(face)), (300, 30),
#cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
    face = read_preprocess_image(face)
    j = model.predict(face)
    counter, total = eye_blink.detect_blink(frame, bndbox, counter, total)
    draw_rec(frame,
Example #22
0
class VideoWidget(QtGui.QWidget):
    newFrame = QtCore.pyqtSignal(np.ndarray)
    sourceChanged = QtCore.pyqtSignal()
    stateChanged = QtCore.pyqtSignal(int)

    def __init__(self, mainWindow, parent=None):
        super(VideoWidget, self).__init__(parent)
        self.mainWindow = mainWindow

        self.videoStream = VideoStream(0)
        self.videoStream.newFrame.connect(self.onNewFrame)
        self.videoStream.sourceChanged.connect(self.onSourceChanged)
        self.videoStream.stateChanged.connect(self.onStateChanged)

        self.inputFilename = None
        self.outputFilename = None

        self.setupUi()

        self.setRecordingControlsState()
        self.switchToWebCamBtn.setChecked(True)
        self.play()

    def setupUi(self):
        self.videoScreen = VideoScreen(self.videoStream, self, self)

        self.fileWidget = FileWidget(self.mainWindow, self, 'Set input')
        self.fileWidget.setObjectName(_fromUtf8("fileWidget"))
        self.fileWidget.fileChanged.connect(self.onInputChanged)
        self.switchToWebCamBtn = QtGui.QPushButton(QtGui.QIcon("webcam.png"), "", self)
        self.switchToWebCamBtn.setStyleSheet("margin-right: 7px; padding: 6px")
        self.switchToWebCamBtn.clicked.connect(self.onSwitchToWebCamBtnClicked)
        self.switchToWebCamBtn.setFlat(True)
        self.switchToWebCamBtn.setCheckable(True)

        self.spacer1 = QtGui.QWidget()
        self.spacer1.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)

        self.playAction = QtGui.QAction(
            self.style().standardIcon(QtGui.QStyle.SP_MediaPlay), "Play",
            self, shortcut="Ctrl+P", enabled=True)
        self.playAction.triggered.connect(self.play)

        self.pauseAction = QtGui.QAction(
            self.style().standardIcon(QtGui.QStyle.SP_MediaPause), "Pause",
            self, shortcut="Ctrl+A", enabled=True)
        self.pauseAction.triggered.connect(self.pause)

        self.stopAction = QtGui.QAction(
            self.style().standardIcon(QtGui.QStyle.SP_MediaStop), "Stop",
            self, shortcut="Ctrl+S", enabled=True)
        self.stopAction.triggered.connect(self.stop)

        self.inputBar = QtGui.QToolBar(self)
        self.inputBar.addWidget(self.fileWidget)
        self.inputBar.addWidget(self.spacer1)
        self.inputBar.addAction(self.playAction)
        self.inputBar.addAction(self.pauseAction)
        self.inputBar.addAction(self.stopAction)
        self.inputBar.addWidget(self.switchToWebCamBtn)

        self.saveAsWidget = FileWidget(self.mainWindow, self, 'Set output', True)
        self.saveAsWidget.fileChanged.connect(self.onOutputChanged)

        self.recordAction = QtGui.QAction(
            QtGui.QIcon("record.png"), "Record",
            self, shortcut="Ctrl+R", enabled=True)
        self.recordAction.triggered.connect(self.record)
        self.recordAction.setVisible(True)

        self.recordStopAction = QtGui.QAction(
            QtGui.QIcon("recording.png"), "Stop recording",
            self, shortcut="Ctrl+R", enabled=True)
        self.recordStopAction.triggered.connect(self.recordStop)
        self.recordStopAction.setVisible(False)

        self.spacer2 = QtGui.QWidget()
        self.spacer2.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)

        self.processBtn = QtGui.QPushButton("Process", self)
        self.processBtn.setStyleSheet("margin-right: 7px; padding: 5px; width: 140px")
        self.processBtn.clicked.connect(self.onProcessBtnClicked)

        self.outputBar = QtGui.QToolBar(self)
        self.outputBar.addWidget(self.saveAsWidget)
        self.outputBar.addWidget(self.spacer2)
        self.outputBar.addAction(self.recordAction)
        self.outputBar.addAction(self.recordStopAction)
        self.outputBar.addWidget(self.processBtn)

        hbox = QtGui.QVBoxLayout(self)
        hbox.addWidget(self.videoScreen)
        hbox.addStretch()
        hbox.addWidget(self.inputBar)
        hbox.addWidget(self.outputBar)
        hbox.setContentsMargins(-1, -1, -1, 2)

    def onProcessBtnClicked(self):
        self.play()
        self.videoStream.record()

    def onSwitchToWebCamBtnClicked(self):
        if self.switchToWebCamBtn.isChecked():
            self.videoStream.resetSource(0)
            self.play()
        else:
            self.videoStream.resetSource()

    def onInputChanged(self, filename):
        self.inputFilename = filename
        self.switchToWebCamBtn.setChecked(False)
        self.setRecordingControlsState()
        self.videoStream.resetSource(str(filename))

    def onOutputChanged(self, filename):
        self.outputFilename = filename
        self.setRecordingControlsState()
        self.videoStream.resetOutput(str(filename))

    def pause(self):
        self.videoStream.pause()

    def play(self):
        self.videoStream.play()

    def stop(self):
        self.videoStream.stop()

    def record(self):
        self.recordAction.setVisible(False)
        self.recordStopAction.setVisible(True)
        self.videoStream.record()

    def recordStop(self):
        self.recordAction.setVisible(True)
        self.recordStopAction.setVisible(False)
        self.videoStream.recordStop()

    @QtCore.pyqtSlot(np.ndarray)
    def onNewFrame(self, frame):
        self.newFrame.emit(frame)

    def onSourceChanged(self):
        self.sourceChanged.emit()

    @QtCore.pyqtSlot(int)
    def onStateChanged(self, state):
        if state == VideoStream.State.CLOSED:
            self.setClosedState()
        elif state == VideoStream.State.STOPPED:
            self.setStoppedState()
        elif state == VideoStream.State.PAUSED:
            self.setPausedState()
        elif state == VideoStream.State.PLAYING:
            self.setPlayingState()
        self.stateChanged.emit(state)

    def setClosedState(self):
        self.playAction.setEnabled(False)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(False)

    def setStoppedState(self):
        self.playAction.setEnabled(True)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(False)

    def setPausedState(self):
        self.playAction.setEnabled(True)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(True)

    def setPlayingState(self):
        self.playAction.setEnabled(False)
        self.pauseAction.setEnabled(True)
        self.stopAction.setEnabled(True)

    def setRecordingControlsState(self):
        self.recordAction.setEnabled(bool(self.outputFilename))
        self.recordStopAction.setEnabled(bool(self.outputFilename))
        self.processBtn.setEnabled(bool(self.outputFilename) and bool(self.inputFilename))
Example #23
0
class VideoWidget(QtGui.QWidget):
    newFrame = QtCore.pyqtSignal(np.ndarray)
    sourceChanged = QtCore.pyqtSignal()
    stateChanged = QtCore.pyqtSignal(int)

    def __init__(self, mainWindow, parent=None):
        super(VideoWidget, self).__init__(parent)
        self.mainWindow = mainWindow

        self.videoStream = VideoStream(0)
        self.videoStream.newFrame.connect(self.onNewFrame)
        self.videoStream.sourceChanged.connect(self.onSourceChanged)
        self.videoStream.stateChanged.connect(self.onStateChanged)

        self.inputFilename = None
        self.outputFilename = None

        self.setupUi()

        self.setRecordingControlsState()
        self.switchToWebCamBtn.setChecked(True)
        self.play()

    def setupUi(self):
        self.videoScreen = VideoScreen(self.videoStream, self, self)

        self.fileWidget = FileWidget(self.mainWindow, self, 'Set input')
        self.fileWidget.setObjectName(_fromUtf8("fileWidget"))
        self.fileWidget.fileChanged.connect(self.onInputChanged)
        self.switchToWebCamBtn = QtGui.QPushButton(QtGui.QIcon("webcam.png"),
                                                   "", self)
        self.switchToWebCamBtn.setStyleSheet("margin-right: 7px; padding: 6px")
        self.switchToWebCamBtn.clicked.connect(self.onSwitchToWebCamBtnClicked)
        self.switchToWebCamBtn.setFlat(True)
        self.switchToWebCamBtn.setCheckable(True)

        self.spacer1 = QtGui.QWidget()
        self.spacer1.setSizePolicy(QtGui.QSizePolicy.Expanding,
                                   QtGui.QSizePolicy.Expanding)

        self.playAction = QtGui.QAction(self.style().standardIcon(
            QtGui.QStyle.SP_MediaPlay),
                                        "Play",
                                        self,
                                        shortcut="Ctrl+P",
                                        enabled=True)
        self.playAction.triggered.connect(self.play)

        self.pauseAction = QtGui.QAction(self.style().standardIcon(
            QtGui.QStyle.SP_MediaPause),
                                         "Pause",
                                         self,
                                         shortcut="Ctrl+A",
                                         enabled=True)
        self.pauseAction.triggered.connect(self.pause)

        self.stopAction = QtGui.QAction(self.style().standardIcon(
            QtGui.QStyle.SP_MediaStop),
                                        "Stop",
                                        self,
                                        shortcut="Ctrl+S",
                                        enabled=True)
        self.stopAction.triggered.connect(self.stop)

        self.inputBar = QtGui.QToolBar(self)
        self.inputBar.addWidget(self.fileWidget)
        self.inputBar.addWidget(self.spacer1)
        self.inputBar.addAction(self.playAction)
        self.inputBar.addAction(self.pauseAction)
        self.inputBar.addAction(self.stopAction)
        self.inputBar.addWidget(self.switchToWebCamBtn)

        self.saveAsWidget = FileWidget(self.mainWindow, self, 'Set output',
                                       True)
        self.saveAsWidget.fileChanged.connect(self.onOutputChanged)

        self.recordAction = QtGui.QAction(QtGui.QIcon("record.png"),
                                          "Record",
                                          self,
                                          shortcut="Ctrl+R",
                                          enabled=True)
        self.recordAction.triggered.connect(self.record)
        self.recordAction.setVisible(True)

        self.recordStopAction = QtGui.QAction(QtGui.QIcon("recording.png"),
                                              "Stop recording",
                                              self,
                                              shortcut="Ctrl+R",
                                              enabled=True)
        self.recordStopAction.triggered.connect(self.recordStop)
        self.recordStopAction.setVisible(False)

        self.spacer2 = QtGui.QWidget()
        self.spacer2.setSizePolicy(QtGui.QSizePolicy.Expanding,
                                   QtGui.QSizePolicy.Expanding)

        self.processBtn = QtGui.QPushButton("Process", self)
        self.processBtn.setStyleSheet(
            "margin-right: 7px; padding: 5px; width: 140px")
        self.processBtn.clicked.connect(self.onProcessBtnClicked)

        self.outputBar = QtGui.QToolBar(self)
        self.outputBar.addWidget(self.saveAsWidget)
        self.outputBar.addWidget(self.spacer2)
        self.outputBar.addAction(self.recordAction)
        self.outputBar.addAction(self.recordStopAction)
        self.outputBar.addWidget(self.processBtn)

        hbox = QtGui.QVBoxLayout(self)
        hbox.addWidget(self.videoScreen)
        hbox.addStretch()
        hbox.addWidget(self.inputBar)
        hbox.addWidget(self.outputBar)
        hbox.setContentsMargins(-1, -1, -1, 2)

    def onProcessBtnClicked(self):
        self.play()
        self.videoStream.record()

    def onSwitchToWebCamBtnClicked(self):
        if self.switchToWebCamBtn.isChecked():
            self.videoStream.resetSource(0)
            self.play()
        else:
            self.videoStream.resetSource()

    def onInputChanged(self, filename):
        self.inputFilename = filename
        self.switchToWebCamBtn.setChecked(False)
        self.setRecordingControlsState()
        self.videoStream.resetSource(str(filename))

    def onOutputChanged(self, filename):
        self.outputFilename = filename
        self.setRecordingControlsState()
        self.videoStream.resetOutput(str(filename))

    def pause(self):
        self.videoStream.pause()

    def play(self):
        self.videoStream.play()

    def stop(self):
        self.videoStream.stop()

    def record(self):
        self.recordAction.setVisible(False)
        self.recordStopAction.setVisible(True)
        self.videoStream.record()

    def recordStop(self):
        self.recordAction.setVisible(True)
        self.recordStopAction.setVisible(False)
        self.videoStream.recordStop()

    @QtCore.pyqtSlot(np.ndarray)
    def onNewFrame(self, frame):
        self.newFrame.emit(frame)

    def onSourceChanged(self):
        self.sourceChanged.emit()

    @QtCore.pyqtSlot(int)
    def onStateChanged(self, state):
        if state == VideoStream.State.CLOSED:
            self.setClosedState()
        elif state == VideoStream.State.STOPPED:
            self.setStoppedState()
        elif state == VideoStream.State.PAUSED:
            self.setPausedState()
        elif state == VideoStream.State.PLAYING:
            self.setPlayingState()
        self.stateChanged.emit(state)

    def setClosedState(self):
        self.playAction.setEnabled(False)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(False)

    def setStoppedState(self):
        self.playAction.setEnabled(True)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(False)

    def setPausedState(self):
        self.playAction.setEnabled(True)
        self.pauseAction.setEnabled(False)
        self.stopAction.setEnabled(True)

    def setPlayingState(self):
        self.playAction.setEnabled(False)
        self.pauseAction.setEnabled(True)
        self.stopAction.setEnabled(True)

    def setRecordingControlsState(self):
        self.recordAction.setEnabled(bool(self.outputFilename))
        self.recordStopAction.setEnabled(bool(self.outputFilename))
        self.processBtn.setEnabled(
            bool(self.outputFilename) and bool(self.inputFilename))
Example #24
0
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, GRAPH_NAME)

# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH, MODEL_NAME, LABELMAP_NAME)

# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
    labels = [line.strip() for line in f.readlines()]

# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()

# Initialize video stream
videostream = VideoStream(ip_cam_link=IP_CAM_LINK, resolution=(imW, imH), save_mode=SAVE_MODE, save_filename=save_filename).start()
time.sleep(1)

# Initialize model
model = SSDMobileNet(PATH_TO_CKPT)

# Initialize servo
servo = ServoControl(imW, imH, SERVO_PIN)

# Coordinates for servo to handle rotation to. Initialize with center
target_coords = [int(imW/2), int(imH/2)]

# Bounding boxes for cv2 to draw on frame
bboxes = []

# For fps plotting
def main():
    ''' Main method '''

    global RUNNING

    # Create a socket and connect to the server
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect((HOST, PORT))

    PRINT('Connected to ' + ENC_VALUE(HOST + ':' + str(PORT)) + '.', SUCCESS)

    # connect to the arduino via serial
    ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
    ser.flush()

    cr = '\n'

    # Start the camera video thread
    stream = VideoStream().start()
    last_serial_time = time.time(
    )  # stores the last time a serial value was sent

    while RUNNING:
        # Get the current frame read by the video stream
        try:
            stream_frame = stream.read()
            _, frame = cv2.imencode('.jpg', stream_frame, ENCODE_PARAM)

            # Send data
            send(s, [frame])

        except Exception as e:  # Prints Error
            PRINT(str(e), ERROR)

        # Recieve data
        recv_data = recv(s)

        print(recv_data[1])

        # Check if a command was sent
        if recv_data[
                DATA_IDX_COMMAND] == COMMAND_QUIT:  # If quit command was recieved RUNNING = false
            PRINT('Recieved command ' + ENC_VALUE(COMMAND_QUIT) + '.', INFO)

            joy_vrt = round(4 * (1 - 0))
            joy_fwd = round(4 * (1 - 0))
            joy_rot = round(4 * (1 + 0))

            submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot)

            ser.write(submit.encode('utf-8'))
            ser.write(cr.encode('utf-8'))

            RUNNING = False
        elif time.time() - last_serial_time >= 1:
            if recv_data and len(
                    recv_data[1]) == 3:  # checks if recv data is empty
                print('running' + str(recv_data))
                joy_vrt = round(4 * (1 - recv_data[1][1][3]))
                joy_fwd = round(4 * (1 - recv_data[1][1][1]))
                joy_rot = round(4 * (1 + recv_data[1][1][2]))

                submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot)
                print(submit)

                ser.write(submit.encode('utf-8'))
                ser.write(cr.encode('utf-8'))
                line = ser.readline().decode('utf-8').rstrip()
                print(line)

            last_serial_time = time.time()

    s.close()  # Closes socket
    stream.stop()  # Stops stream

    PRINT('Quit.', SUCCESS)
Example #26
0
import cv2

from video_stream import VideoStream
from gray_converter import GrayConverter
from face_detector import FaceDetector
from face_marker import FaceMarker
from face_decorator import allEAR
from face_decorator import FaceDecorator
from eye_aspect_ratio import EyeAspectRatio
from config import args

vs = VideoStream(args["path"]).start()
workers = [ GrayConverter(), FaceDetector(), FaceMarker(args["landmark_model"]), EyeAspectRatio(), FaceDecorator() ]

winname = args["display"]
graphWin = "Graph"
cv2.namedWindow(winname)
cv2.moveWindow(winname, 200, 300)
cv2.namedWindow(graphWin)
cv2.moveWindow(graphWin, 900, 300)
while True:
    frame = vs.read()
    if frame is None: break

    info = {}
    for w in workers: w.workon(frame, info)
    
    cv2.imshow(winname, info["output"])
    cv2.imshow(graphWin, info["graph"])
    key = cv2.waitKey(10) & 0xFF
    if key == ord('q') or key == 27:
Example #27
0
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]

floating_model = (input_details[0]['dtype'] == np.float32)

input_mean = 127.5
input_std = 127.5

# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()

# Initialize video stream
videostream = VideoStream(resolution=(imW, imH), framerate=30).start()
time.sleep(1)

# Create window
cv2.namedWindow('Object detector', cv2.WINDOW_NORMAL)

#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:

    # Start timer (for calculating frame rate)
    t1 = cv2.getTickCount()

    # Grab frame from video stream
    frame1 = videostream.read()

    # Acquire frame and resize to expected shape [1xHxWx3]