def start(self): """ Starts sending the stream to the Viewer. Creates a camera, takes a image frame converts the frame to string and sends the string across the network :return: None """ print("Streaming Started...") camera = Camera() camera.start_capture() self.keep_running = True while self.footage_socket and self.keep_running: try: time.sleep( 0.05 ) # Sleep to decrease processing time per frame, VERY EFFECTIVE frame = camera.current_frame.read() # grab the current frame scale_percent = 100 # percent of original size, SET BY YOUR DESIRE width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) dim = (width, height) frameSmall = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) frameSmall = cv2.cvtColor( frameSmall, cv2.COLOR_BGR2GRAY ) #Convert the image to grey scale, decreasing processing time and network trafic, YOU CAN DELETE THIS LINE TO GET A COLOR IMAGE image_as_string = image_to_string(frameSmall) self.footage_socket.send(image_as_string) except KeyboardInterrupt: cv2.destroyAllWindows() break print("Streaming Stopped!") cv2.destroyAllWindows()
def start(self, framerate): """ Starts sending the stream to the Viewer. Creates a camera, takes a image frame converts the frame to string and sends the string across the network :return: None """ self.framerates = [framerate / 3., framerate] print("Streaming Started...") camera = Camera() camera.start_capture() self.keep_running = True id = 0 separator = "______".encode() time.sleep(2) start = time.time() while self.footage_socket and self.footage_socket_tiny and self.keep_running: try: try: normal_framerate = int( self.footage_socket_tiny.recv(flags=zmq.NOBLOCK)) framerate = self.framerates[normal_framerate] except zmq.error.Again: pass time.sleep(0.6 / framerate) frame = camera.current_frame.read() # grab the current frame small_frame = resize_img(frame)[0] # resize image_as_string = image_to_string(frame) # encode the frame small_image_as_string = image_to_string( small_frame) # encode the small frame self.footage_socket_viewer.send(image_as_string + separator + str(id).encode()) self.footage_socket.send( small_image_as_string + separator + str(id).encode() + separator + str(round(time.time(), 2)).encode()) # send it id += 1 print('Frame: %d, framerate: %2.2f fps' % (id, round(id / (time.time() - start), 2))) except KeyboardInterrupt: cv2.destroyAllWindows() break print("Streaming Stopped!") cv2.destroyAllWindows()
def frame_capture(): global buffer camera = Camera() camera.start_capture() while True: frame = camera.current_frame.read() # grab the current frame frame = rescale_frame(frame) buffer.append(image_to_string(frame)) print(len(buffer))
def main(): person_group_id = FACE_GROUP_ID display_image = False face_api_wrapper = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL) create_dir_if_not_exists('captured_images/' + CAMERA_NAME) print("Capturing Image every ", CAPTURE_INTERVAL, " seconds...") i = 0 while 1: try: image_filename = 'captured_images/' + CAMERA_NAME + "/" + current_time_to_string() + ".jpg" image = Camera().capture_image() cv2.imwrite(image_filename, image) if display_image: cv2.imshow("Camera Image", image) cv2.waitKey(1) image_link = upload_to_s3(image_filename) image_url = 'https://mybuckfucket.s3-eu-west-1.amazonaws.com/public_folder/'+image_filename face_ids = face_api_wrapper.detect_faces(image=image_filename) i += 1 print(i, "Captured at ", current_time_to_string()) print("S3 url",image_url) if face_ids: person_ids = \ face_api_wrapper.identify_faces(face_ids=face_ids, large_person_group=person_group_id) req_ids = [{id} for id in person_ids] print("Detected Faces...", req_ids) contents=load_dict_from_file('json.txt') #my_data=(contents[0]['faceAttributes']) requests.post('http://127.0.0.1:8000/students/', data={ 'attributes' : contents, 'name' : "Darragh", 'time_date' : current_time_to_string(), 'face_ids' : person_ids, 'image_link' : image_url, }) print("#####",data) #print(contents[0]['faceAttributes']['emotion']) #requests.post('http://127.0.0.1:8000/students/', data=contents) time.sleep(CAPTURE_INTERVAL) except Exception as e: print(e)
def main(): init_logging() print("Starting model") cam = Camera(resolution=(1024, 768)) model = CNN(cam, 'cnn/model/complexCNN.h5') # Capture image stream and classify the parking spots while True: model.detect() time.sleep(5) # When everything is done, release the capture cv2.destroyAllWindows()
def main(): info = {"parkspot_id": "", "token": ""} # Load Parkspot ID and Token infoPath = Path(settings.infoFile) if infoPath.is_file(): info = (pickle.load(open(settings.infoFile, "rb"))) tokenAvailable = True if (info['token'] == ""): tokenAvailable = False networkAvailable = hasNetwork() if networkAvailable is False or tokenAvailable is False: setupParkspot(info) api = ParkspotApi(info) # Create a new parkspot if we did not create one yet if info["parkspot_id"] == "": if api.createParkspot() is False: print("Could not create Parkspot. Resetting") reset() pickle.dump(info, open(settings.infoFile, "w+b")) # Tell the backend our IP if api.updateLocalIp() is False: print("Could update IP. Resetting") shutdown() # Initialize the camera and the model print("Starting model") cam = Camera(usePiCamera=False, resolution=(1280, 720)) model = CNN(cam, 'cnn/model/complexCNN.h5', api) # Create the parkspot grpc service service = ParkspotService(cam, model, api) # Run model while True: model.detect() time.sleep(5) # When everything is done, release everything cv2.destroyAllWindows() service.stop()
def main(): person_group_id = FACE_GROUP_ID display_image = False face_api_wrapper = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL) create_dir_if_not_exists('captured_images/' + CAMERA_NAME) print("Capturing Image every ", CAPTURE_INTERVAL, " seconds...") i = 0 while 1: try: image_filename = 'captured_images/' + CAMERA_NAME + "/" + current_time_to_string( ) + ".jpg" image = Camera().capture_image() cv2.imwrite(image_filename, image) if display_image: cv2.imshow("Camera Image", image) cv2.waitKey(1) image_link = upload_to_s3(image_filename) face_ids = face_api_wrapper.detect_faces(image=image_filename) i += 1 print(i, "Captured at ", time.time()) if face_ids: person_ids = \ face_api_wrapper.identify_faces(face_ids=face_ids, large_person_group=person_group_id) req_ids = [{id} for id in person_ids] print("Detected Faces...", req_ids) requests.post(REST_SERVER_URL + 'time-face-id', data={ 'lecture_number': get_lecture_number(), 'face_ids': req_ids, 'image-link': image_link, 'camera-name': CAMERA_NAME, }) print("Present IDs:", req_ids) time.sleep(CAPTURE_INTERVAL) except Exception as e: print(e)
def start(self): """ Starts sending the stream to the Viewer. Creates a camera, takes a image frame converts the frame to string and sends the string across the network :return: None """ print("Streaming Started...") camera = Camera() camera.start_capture() self.keep_running = True while self.footage_socket and self.keep_running: try: frame = camera.current_frame.read() # grab the current frame image_as_string = image_to_string(frame) self.footage_socket.send(image_as_string) except KeyboardInterrupt: cv2.destroyAllWindows() break print("Streaming Stopped!") cv2.destroyAllWindows()
import os import time from api.API import APIClient from camera.Camera import Camera from flask import Flask app = Flask(__name__) SAVE_PICTURE_PATH = "/data" cam = Camera() @app.route('/picture') def take_picture(): picture_path = cam.shoot(SAVE_PICTURE_PATH, "python-test") print("Picture taked") api = APIClient("https://api.pizzia.k8s.jeremychauvin.fr") api.predict(picture_path)
def test_camera_image(self): self.assertIsInstance(Camera().capture_image(), numpy.ndarray)
def setUpClass(cls): super(TestCameraStream, cls).setUpClass() cls.camera = Camera() cls.camera.start_capture()
def test_camera_image_not_null(self): self.assertIsNotNone(Camera().capture_image())
from camera.Camera import Camera c = Camera() c.rotate(300)
def main(): person_group_id = 'students' display_image = False face_api_wrapper = FaceAPIWrapper("##MY_KEY###", 'https://projectface.cognitiveservices.azure.com/face/v1.0') create_dir_if_not_exists('capturedImages/' + "Camera 1") print("Capture Image every ", 60, " seconds...") i = 0 while 1: try: image_filename = 'capturedImages/' + "Camera 1" + "/" + current_time_to_string() + ".jpg" image = Camera().capture_image() cv2.imwrite(image_filename, image) if display_image: cv2.imshow("Camera Image", image) cv2.waitKey(1) image_link = upload_to_s3(image_filename) face_ids = face_api_wrapper.detect_faces(image=image_filename) i += 1 print(i, "Captured at ", time.time()) if face_ids: person_ids = \ face_api_wrapper.identify_faces(face_ids=face_ids, large_person_group=person_group_id) req_ids = [{id} for id in person_ids] print("Detected Faces...", req_ids time.sleep(60) except Exception as e: print(e) if __name__ == '__main__': # initial_setup() main() def current_time_to_string(): from datetime import datetime return datetime.now().strftime("%Y%m%d_%H%M%S%f") def upload_to_s3(key): print("Uploading file to S3...") bucket_name = 'mybuckfucket' folder_name = "public_folder" output_name = folder_name + "/" + key location = 'us-west-1' s3 = boto3.client('s3') s3.upload_file(key, bucket_name, output_name, ExtraArgs={'ACL': 'public-read'}) url = "https://s3.amazonaws.com/%s/%s/%s" % (bucket_name, folder_name, key) return url def create_dir_if_not_exists(output_dir): try: os.makedirs(output_dir) except OSError: if not os.path.isdir(output_dir): raise