コード例 #1
0
ファイル: ball_tracking.py プロジェクト: myshepherd/tritoneye
		# otherwise, compute the thickness of the line and
		# draw the connecting lines
		thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
		cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

	# show the frame to our screen
	cv2.imshow("Frame", frame)

	if record_video and record_track:
		video_writer.write(frame)

	key = cv2.waitKey(1) & 0xFF

	# if the 'q' key is pressed, stop the loop
	if key == ord("q"):
		break

# cleanup the camera and close any open windows
if not from_vfile:
	if hasattr(camera, 'release'): # Pycamera may not have the release function
		camera.release()
else:
	video_file.release()

if video_writer is not None:
	video_writer.release()

cv2.destroyAllWindows()

コード例 #2
0
    if writer is not None:
        writer.write(frame)
    
    # show the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the 'q' key was pressed, break from the  loop
    if key == ord('q'):
        break
    
    # increment the total no.of frames processed thus far and
    # then update the FPS counter
    totalFrames += 1
    fps.update()

# stop the counter and display FPS information
fps.stop()
print("[INFO] elapsed time: {: .2f}".format(fps.elapsed()))
print("[INFO] approx FPS: {: .2f}".format(fps.fps()))

if writer is not None:
    writer.release()

if not args.get("input", False):
    vs.stop()
else:
    vs.release()

# close any open windows
cv2.destroyAllWindows()
コード例 #3
0
    if len(faces)>0:
        print ("Found {0} faces!".format(len(faces)))

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

    return frame

while True:
    # Capture frame-by-frame
    _,frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if ENABLE_FACE_DETECT:
        resultFrame = faceDetect(gray)
        # Display the resulting frame
        cv2.imshow('Video', resultFrame)

    if ENABLE_FPS:
        print("fps:",t.fps())

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
コード例 #4
0
ファイル: videoutil.py プロジェクト: myshepherd/tritoneye
class TEVideoHandler:
	def __init__(self):
		self.FRAME_WIDTH = conf.DEF_FRAME_WIDTH
		self.FRAME_HEIGHT = conf.DEF_FRAME_HEIGHT

		# devices
		self.video_file = None
		self.camera = None
		self.picamera = None
		
	def set_frame_size(w, h):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Frame size need to be set before initialization")

		self.FRAME_WIDTH = w
		self.FRAME_HEIGHT = h

	def initialize_with_file(self, filename):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.video_file = cv2.VideoCapture(filename)

	def initialize_with_configured_cam(self):
		cam_selector = {
			conf.CameraType.PYCAMERA:			lambda: self.initialize_with_pycamera(),
			conf.CameraType.PYCAMERA_ROBUST:	lambda: self.initialize_with_pycamera2(),
			conf.CameraType.WEBCAM:				lambda: self.initialize_with_webcam(),
		}

		cam_selector[conf.CAMERA_TYPE]()


	def initialize_with_pycamera(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream(usePiCamera=True).start()
		time.sleep(2.0)

	# It uses picamera library to disable auto control feature
	def initialize_with_pycamera2(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.picamera = PiCamera()
		self.picamera.resolution = (self.FRAME_WIDTH, self.FRAME_HEIGHT)
		self.picamera.framerate = 30
		self.rawCapture = PiRGBArray(self.picamera, size=(self.FRAME_WIDTH, self.FRAME_HEIGHT))

		time.sleep(0.1)

		self.picamera.shutter_speed = self.picamera.exposure_speed
		self.picamera.exposure_mode = 'off'
		g = self.picamera.awb_gains
		self.picamera.awb_mode = 'off'
		self.picamera.awb_gains = g
		self.stream = self.picamera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)


	# Tested with a monitor webcam, but didn't checked with the webcam macbook
	def initialize_with_webcam(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream().start()
		time.sleep(2.0)

	# Read a frame	
	# Return: frame (pixel array)
	# Note: if not grapped (for video file), raise exception
	def read(self):
		frame = None
		if self.video_file is not None:
			(grabbed, frame) = self.video_file.read()
			if not grabbed:
				raise TEInvalidFrameException()
		elif self.camera is not None:
			frame = self.camera.read()
		elif self.picamera is not None:
			data = self.stream.next()
			frame = data.array
			self.rawCapture.truncate(0)

		# If still null frame,
		if frame is None:
			raise TEInvalidFrameException()

		# resize the frame
		frame = imutils.resize(frame, width=self.FRAME_WIDTH)

		return frame

	def release(self):
		if self.video_file is not None:
			self.video_file.release()
		elif self.camera is not None:
			# Pycamera may not have the release function
			if hasattr(self.camera, 'release'): 
				self.camera.release()
コード例 #5
0
ファイル: ball_tracking.py プロジェクト: BYU-ELC/StalkerCar
    # loop over the set of tracked points
    for i in range(1, len(pts)):

        # if either of the tracked points are None, ignore
        # them

        if pts[i - 1] is None or pts[i] is None:
            continue

        # otherwise, compute the thickness of the line and
        # draw the connecting lines
        thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
        cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

    # show the frame to our screen
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
    # if the 'q' key is pressed, stop the loop
    if key == ord("q"):
        break

# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
    vs.stop()

# otherwise, release the camera
else:
    vs.release()

# close all windows
cv2.destroyAllWindows()
コード例 #6
0
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < args["min_area"]:
            continue

        # compute the bounding box for the contour, draw it on the frame,
        # and update the text
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        text = "Occupied"
# draw the text and timestamp on the frame
    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame,
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                (0, 0, 255), 1)

    # show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key is pressed, break from the lop
    if key == ord("q"):
        break

# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
コード例 #7
0
			cv2.circle(frame, (int(x), int(y)), int(radius),
				(0, 255, 255), 2)
			cv2.circle(frame, center, 5, (0, 0, 255), -1)

	# update the points queue
	pts.appendleft(center)

	# loop over the set of tracked points
	for i in xrange(1, len(pts)):
		# if either of the tracked points are None, ignore
		# them
		if pts[i - 1] is None or pts[i] is None:
			continue

		# otherwise, compute the thickness of the line and
		# draw the connecting lines
		thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
		cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

	# show the frame to our screen
	cv2.imshow("Frame", frame)
	key = cv2.waitKey(1) & 0xFF

	# if the 'q' key is pressed, stop the loop
	if key == ord("q"):
		break

# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
コード例 #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        help='Path of the video you want to test on.',
                        default=0)
    args = parser.parse_args()

    MINSIZE = 20
    THRESHOLD = [0.6, 0.7, 0.7]
    FACTOR = 0.709
    IMAGE_SIZE = 182
    INPUT_IMAGE_SIZE = 160
    CLASSIFIER_PATH = 'Models/facemodel.pkl'
    VIDEO_PATH = args.path
    FACENET_MODEL_PATH = 'Models/20180402-114759.pb'

    # Load The Custom Classifier
    with open(CLASSIFIER_PATH, 'rb') as file:
        model, class_names = pickle.load(file)
    print("Custom Classifier, Successfully loaded")

    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))

        with sess.as_default():

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(FACENET_MODEL_PATH)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            pnet, rnet, onet = align.detect_face.create_mtcnn(
                sess, "src/align")

            people_detected = set()
            person_detected = collections.Counter()

            cap = VideoStream(src=0).start()

            while (True):
                frame = cap.read()
                frame = imutils.resize(frame, width=600)
                frame = cv2.flip(frame, 1)

                bounding_boxes, _ = align.detect_face.detect_face(
                    frame, MINSIZE, pnet, rnet, onet, THRESHOLD, FACTOR)

                faces_found = bounding_boxes.shape[0]
                try:
                    if faces_found > 1:
                        cv2.putText(frame,
                                    "Only one face", (0, 100),
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                    1, (255, 255, 255),
                                    thickness=1,
                                    lineType=2)
                    elif faces_found > 0:
                        det = bounding_boxes[:, 0:4]
                        bb = np.zeros((faces_found, 4), dtype=np.int32)
                        for i in range(faces_found):
                            bb[i][0] = det[i][0]
                            bb[i][1] = det[i][1]
                            bb[i][2] = det[i][2]
                            bb[i][3] = det[i][3]
                            print(bb[i][3] - bb[i][1])
                            print(frame.shape[0])
                            print((bb[i][3] - bb[i][1]) / frame.shape[0])
                            if (bb[i][3] - bb[i][1]) / frame.shape[0] > 0.25:
                                cropped = frame[bb[i][1]:bb[i][3],
                                                bb[i][0]:bb[i][2], :]
                                scaled = cv2.resize(
                                    cropped,
                                    (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE),
                                    interpolation=cv2.INTER_CUBIC)
                                scaled = facenet.prewhiten(scaled)
                                scaled_reshape = scaled.reshape(
                                    -1, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3)
                                feed_dict = {
                                    images_placeholder: scaled_reshape,
                                    phase_train_placeholder: False
                                }
                                emb_array = sess.run(embeddings,
                                                     feed_dict=feed_dict)
                                print(emb_array.shape)
                                predictions = model.predict_proba(emb_array)
                                best_class_indices = np.argmax(predictions,
                                                               axis=1)
                                best_class_probabilities = predictions[
                                    np.arange(len(best_class_indices)),
                                    best_class_indices]
                                best_name = class_names[best_class_indices[0]]
                                print("Name: {}, Probability: {}".format(
                                    best_name, best_class_probabilities))

                                if best_class_probabilities > 0.8:
                                    cv2.rectangle(frame, (bb[i][0], bb[i][1]),
                                                  (bb[i][2], bb[i][3]),
                                                  (0, 255, 0), 2)
                                    text_x = bb[i][0]
                                    text_y = bb[i][3] + 20

                                    name = class_names[best_class_indices[0]]
                                    cv2.putText(frame,
                                                name, (text_x, text_y),
                                                cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                                1, (255, 255, 255),
                                                thickness=1,
                                                lineType=2)
                                    cv2.putText(
                                        frame,
                                        str(
                                            round(best_class_probabilities[0],
                                                  3)), (text_x, text_y + 17),
                                        cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                        1, (255, 255, 255),
                                        thickness=1,
                                        lineType=2)
                                    person_detected[best_name] += 1
                                else:
                                    name = "Unknown"

                except:
                    pass

                cv2.imshow('Face Recognition', frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            cap.release()
            cv2.destroyAllWindows()