def get_jpg(camera_id): if not motionctl.running(): return None if camera_id not in MjpgClient.clients: # mjpg client not started yet for this camera logging.debug('creating mjpg client for camera %(camera_id)s' % { 'camera_id': camera_id}) camera_config = config.get_camera(camera_id) if not camera_config['@enabled'] or not utils.local_motion_camera(camera_config): logging.error('could not start mjpg client for camera id %(camera_id)s: not enabled or not local' % { 'camera_id': camera_id}) return None port = camera_config['stream_port'] username, password = None, None if camera_config.get('stream_auth_method') > 0: username, password = camera_config.get('stream_authentication', ':').split(':') client = MjpgClient(camera_id, port, username, password) client.connect() MjpgClient.last_access[camera_id] = datetime.datetime.utcnow() return MjpgClient.last_jpgs.get(camera_id)
def cleanup_media(media_type): logging.debug('cleaning up %(media_type)ss...' % {'media_type': media_type}) if media_type == 'picture': exts = _PICTURE_EXTS else: # media_type == 'movie' exts = _MOVIE_EXTS + ['.thumb'] for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.is_local_motion_camera(camera_config): continue preserve_media = camera_config.get('@preserve_%(media_type)ss' % {'media_type': media_type}, 0) if preserve_media == 0: continue # preserve forever still_images_enabled = bool(camera_config['picture_filename']) or bool(camera_config['snapshot_filename']) movies_enabled = bool(camera_config['ffmpeg_output_movies']) if media_type == 'picture' and not still_images_enabled: continue # only cleanup pictures for cameras with still images enabled elif media_type == 'movie' and not movies_enabled: continue # only cleanup movies for cameras with movies enabled preserve_moment = datetime.datetime.now() - datetime.timedelta(days=preserve_media) target_dir = camera_config.get('target_dir') if os.path.exists(target_dir): # create a sentinel file to make sure the target dir is never removed open(os.path.join(target_dir, '.keep'), 'w').close() _remove_older_files(target_dir, preserve_moment, exts=exts)
def make_next_movie_preview(): global _previewless_movie_files logging.debug('making preview for the next movie...') if _previewless_movie_files: (camera_config, path) = _previewless_movie_files.pop(0) make_movie_preview(camera_config, path) else: logging.debug('gathering movies without preview...') count = 0 for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue target_dir = camera_config['target_dir'] for (full_path, st) in _list_media_files(target_dir, _MOVIE_EXTS): # @UnusedVariable if os.path.exists(full_path + '.thumb'): continue logging.debug('found a movie without preview: %(path)s' % { 'path': full_path}) _previewless_movie_files.append((camera_config, full_path)) count += 1 logging.debug('found %(count)d movies without preview' % {'count': count}) if count: make_next_movie_preview()
def cleanup_media(media_type): logging.debug('cleaning up %(media_type)ss...' % {'media_type': media_type}) if media_type == 'picture': exts = _PICTURE_EXTS elif media_type == 'movie': exts = _MOVIE_EXTS + ['.thumb'] for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue preserve_media = camera_config.get('@preserve_%(media_type)ss' % {'media_type': media_type}, 0) if preserve_media == 0: return # preserve forever still_images_enabled = bool( ((camera_config['emulate_motion'] or camera_config['output_pictures']) and camera_config['picture_filename']) or (camera_config['snapshot_interval'] and camera_config['snapshot_filename'])) movies_enabled = camera_config['ffmpeg_output_movies'] if media_type == 'picture' and not still_images_enabled: continue # only cleanup pictures for cameras with still images enabled elif media_type == 'movie' and not movies_enabled: continue # only cleanup movies for cameras with movies enabled preserve_moment = datetime.datetime.now() - datetime.timedelta(days=preserve_media) target_dir = camera_config.get('target_dir') _remove_older_files(target_dir, preserve_moment, exts=exts)
def get_jpg(camera_id): if camera_id not in MjpgClient.clients: # mjpg client not started yet for this camera logging.debug('creating mjpg client for camera %(camera_id)s' % { 'camera_id': camera_id}) camera_config = config.get_camera(camera_id) if not camera_config['@enabled'] or not utils.is_local_motion_camera(camera_config): logging.error('could not start mjpg client for camera id %(camera_id)s: not enabled or not local' % { 'camera_id': camera_id}) return None port = camera_config['stream_port'] username, password = None, None auth_mode = None if camera_config.get('stream_auth_method') > 0: username, password = camera_config.get('stream_authentication', ':').split(':') auth_mode = 'digest' if camera_config.get('stream_auth_method') > 1 else 'basic' client = MjpgClient(camera_id, port, username, password, auth_mode) client.connect() MjpgClient.clients[camera_id] = client client = MjpgClient.clients[camera_id] return client.get_last_jpg()
def get_jpg(camera_id): if camera_id not in MjpgClient.clients: # mjpg client not started yet for this camera logging.debug("creating mjpg client for camera %(camera_id)s" % {"camera_id": camera_id}) camera_config = config.get_camera(camera_id) if not camera_config["@enabled"] or not utils.local_motion_camera(camera_config): logging.error( "could not start mjpg client for camera id %(camera_id)s: not enabled or not local" % {"camera_id": camera_id} ) return None port = camera_config["stream_port"] username, password = None, None if camera_config.get("stream_auth_method") > 0: username, password = camera_config.get("stream_authentication", ":").split(":") client = MjpgClient(camera_id, port, username, password) client.connect() MjpgClient.last_access[camera_id] = datetime.datetime.utcnow() return MjpgClient.last_jpgs.get(camera_id)
def admincapture(): if request.method == 'GET': # Prefix for positive training image filenames. POSITIVE_FILE_PREFIX = 'positive_' camera = config.get_camera() # Create the directory for positive training images if it doesn't exist. if not os.path.exists(config.POSITIVE_DIR): os.makedirs(config.POSITIVE_DIR) # Find the largest ID of existing positive images. # Start new images after this ID value. files = sorted(glob.glob(os.path.join(config.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm'))) count = 0 if len(files) > 0: # Grab the count from the last filename. count = int(files[-1][-7:-4])+1 while True: print 'Capturing image...' image = camera.read() # Convert image to grayscale. image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Get coordinates of single face in captured image. result = face.detect_single(image) if result is None: print 'Could not detect single face! Check the image in capture.pgm' \ ' to see what was captured and try again with only one face visible.' return 'Could not detect single face! Check the image in capture.pgm' \ ' to see what was captured and try again with only one face visible.' break #continue x, y, w, h = result # Crop image as close as possible to desired face aspect ratio. # Might be smaller if face is near edge of image. crop = face.crop(image, x, y, w, h) # Save image to file. filename = os.path.join(config.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '%03d.pgm' % count) cv2.imwrite(filename, crop) print 'Found face and wrote training image', filename return 'Found face and wrote training image' if True: print"succesful capture" delay() if True: return redirect(url_for('/new')) #<----*****THis count will need to be edited --> count += 1 if count >4: break
def _disable_initial_motion_detection(): for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue if not camera_config['@motion_detection']: logging.debug('motion detection disabled by config for camera with id %s' % camera_id) set_motion_detection(camera_id, False)
def main(): # Load training data into model print 'Loading training data...' model = cv2.createEigenFaceRecognizer() model.load(config.TRAINING_FILE) print 'Training data loaded!' # Initialize camer and box. camera = config.get_camera() door = hardware.Door() # Move box to locked position. door.lock() print 'Running Lock...' print 'Press button to lock (if unlocked), or unlock if the correct face is detected.' print 'Press Ctrl-C to quit.' while True: try: # Check if capture should be made. # TODO: Check if button is pressed. if door.is_button_up() or is_letter_input('l'): if not door.is_locked: # Lock the door if it is unlocked door.lock() print 'Door is now locked.' else: print 'Button pressed, looking for face...' # Check for the positive face and unlock if found. image = camera.read() # Convert image to grayscale. image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Get coordinates of single face in captured image. result = face.detect_single(image) if result is None: print 'Could not detect single face! Check the image in capture.pgm' \ ' to see what was captured and try again with only one face visible.' soundChannelC.play(soundC) sleep(.01) continue x, y, w, h = result # Crop and resize image to face. crop = face.resize(face.crop(image, x, y, w, h)) # Test face against model. label, confidence = model.predict(crop) print 'Predicted {0} face with confidence {1} (lower is more confident).'.format( 'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', confidence) if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD: print 'Recognized face! Unlocking Door Now...' door.unlock() soundChannelA.play(soundA) sleep(.01) else: print 'Did not recognize face!' soundChannelB.play(soundB) sleep(.01) except KeyboardInterrupt: door.clean() sys.exit()
def _check_ws(): # schedule the next call io_loop = IOLoop.instance() io_loop.add_timeout(datetime.timedelta(seconds=10), _check_ws) if not motionctl.running(): return def on_motion_detection_status(camera_id, must_be_enabled, working_schedule_type, enabled=None, error=None): if error: # could not detect current status return logging.warn("skipping motion detection status update for camera with id %(id)s" % {"id": camera_id}) if enabled and not must_be_enabled: logging.debug( "must disable motion detection for camera with id %(id)s (%(what)s working schedule)" % {"id": camera_id, "what": working_schedule_type} ) motionctl.set_motion_detection(camera_id, False) elif not enabled and must_be_enabled: logging.debug( "must enable motion detection for camera with id %(id)s (%(what)s working schedule)" % {"id": camera_id, "what": working_schedule_type} ) motionctl.set_motion_detection(camera_id, True) now = datetime.datetime.now() for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue working_schedule = camera_config.get("@working_schedule") motion_detection = camera_config.get("@motion_detection") working_schedule_type = camera_config.get("@working_schedule_type") or "outside" if not working_schedule: # working schedule disabled, motion detection left untouched continue if not motion_detection: # motion detection explicitly disabled continue now_during = _during_working_schedule(now, working_schedule) must_be_enabled = (now_during and working_schedule_type == "during") or ( not now_during and working_schedule_type == "outside" ) motionctl.get_motion_detection( camera_id, functools.partial(on_motion_detection_status, camera_id, must_be_enabled, working_schedule_type) )
def thread_id_to_camera_id(thread_id): # find the corresponding camera_id # (which can be different from thread_id) camera_ids = config.get_camera_ids() tid = 0 for cid in camera_ids: camera_config = config.get_camera(cid) if utils.local_motion_camera(camera_config): tid += 1 if tid == thread_id: return cid return None
def main(argv): pid = int(sys.argv[1]) print 'PID is: ', pid # Load training data into model print 'Loading training data...' model = cv2.createEigenFaceRecognizer() print 'Model created' model.load(config.TRAINING_FILE) print 'Training data loaded!' # Initialize camera and box. camera = config.get_camera() print 'Press Ctrl-C to quit.' goodpicture = False; while goodpicture == False: print 'Looking for face...' print 'Check for the positive face and unlock if found.' image = camera.read() print 'Convert image to grayscale.' image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) print 'Get coordinates of single face in captured image.' result = face.detect_single(image) if result is None: print 'Could not detect single face! Check the image in capture.pgm to see what was captured and try again with only one face visible.' #continue else: goodpicture = True; x, y, w, h = result print 'Crop and resize image to face.' crop = face.resize(face.crop(image, x, y, w, h)) print 'Test face against model.' label, confidence = model.predict(crop) print 'Predicted {0} face with confidence {1} (lower is more confident).'.format( 'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', confidence) print 'Starting to print in file' fo = open("foo.txt", "wr") if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD: print 'Recognized face!' fo.write("recognized") else: print 'Did not recognize face!' fo.write("echec") fo.close() os.kill(pid, signal.SIGUSR2)
def camera_id_to_thread_id(camera_id): # find the corresponding thread_id # (which can be different from camera_id) camera_ids = config.get_camera_ids() thread_id = 0 for cid in camera_ids: camera_config = config.get_camera(cid) if utils.local_motion_camera(camera_config): thread_id += 1 if cid == camera_id: return thread_id or None return None
def make_message(subject, message, camera_id, moment, timespan, callback): camera_config = config.get_camera(camera_id) # we must start the IO loop for the media list subprocess polling io_loop = IOLoop.instance() def on_media_files(media_files): io_loop.stop() timestamp = time.mktime(moment.timetuple()) if media_files: logging.debug('got media files') media_files = [m for m in media_files if abs(m['timestamp'] - timestamp) < timespan] # filter out non-recent media files media_files.sort(key=lambda m: m['timestamp'], reverse=True) media_files = [os.path.join(camera_config['target_dir'], re.sub('^/', '', m['path'])) for m in media_files] logging.debug('selected %d pictures' % len(media_files)) format_dict = { 'camera': camera_config['@name'], 'hostname': socket.gethostname(), 'moment': moment.strftime('%Y-%m-%d %H:%M:%S'), } if settings.LOCAL_TIME_FILE: format_dict['timezone'] = tzctl.get_time_zone() else: format_dict['timezone'] = 'local time' m = message % format_dict s = subject % format_dict s = s.replace('\n', ' ') m += '\n\n' m += 'motionEye.' callback(s, m, media_files) if not timespan: return on_media_files([]) logging.debug('waiting for pictures to be taken') time.sleep(timespan) # give motion some time to create motion pictures logging.debug('creating email message') mediafiles.list_media(camera_config, media_type='picture', callback=on_media_files) io_loop.start()
def _set_streameye_settings(camera_id, s): s = dict(s) s.setdefault('sePort', 8081) s.setdefault('seAuthMode', 'disabled') main_config = config.get_main() username = main_config['@normal_username'] password = main_config['@normal_password'] realm = 'motionEyeOS' logging.debug('writing streameye settings to %s' % STREAMEYE_CONF) lines = [ 'PORT="%s"' % s['sePort'], 'AUTH="%s"' % s['seAuthMode'], 'CREDENTIALS="%s:%s:%s"' % (username, password, realm) ] with open(STREAMEYE_CONF, 'w') as f: for line in lines: f.write(line + '\n') if 1 in config.get_camera_ids(): # a workaround to update the camera username and password # since we cannot call set_camera() from here if s['seAuthMode'] == 'basic': url = 'http://%s:%[email protected]:%s/' % (username, password, s['sePort']) else: url = 'http://127.0.0.1:%s/' % s['sePort'] if 1 in config._camera_config_cache: logging.debug('updating streaming authentication in config cache') config._camera_config_cache[1]['@url'] = url lines = config.get_camera(1, as_lines=True) for i, line in enumerate(lines): if line.startswith('# @url'): lines[i] = '# @url %s' % url config_file = os.path.join(settings.CONF_PATH, config._CAMERA_CONFIG_FILE_NAME % {'id': 1}) logging.debug('updating streaming authentication in camera config file %s' % config_file) with open(config_file, 'w') as f: for line in lines: f.write(line + '\n') logging.debug('restarting streameye') if os.system('streameye.sh restart'): logging.error('streameye restart failed')
def make_media_folders(): import config config.get_main() # just to have main config already loaded camera_ids = config.get_camera_ids() for camera_id in camera_ids: camera_config = config.get_camera(camera_id) if 'target_dir' in camera_config: if not os.path.exists(camera_config['target_dir']): try: os.makedirs(camera_config['target_dir']) except Exception as e: logging.error('failed to create root media folder "%s" for camera with id %s: %s' % ( camera_config['target_dir'], camera_id, e))
def _check_ws(): # schedule the next call ioloop = tornado.ioloop.IOLoop.instance() ioloop.add_timeout(datetime.timedelta(seconds=10), _check_ws) if not motionctl.running(): return now = datetime.datetime.now() for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue working_schedule = camera_config.get('@working_schedule') motion_detection = camera_config.get('@motion_detection') working_schedule_type = camera_config.get('@working_schedule_type') or 'outside' if not working_schedule: # working schedule disabled, motion detection left untouched continue if not motion_detection: # motion detection explicitly disabled continue now_during = _during_working_schedule(now, working_schedule) must_be_enabled = (now_during and working_schedule_type == 'during') or (not now_during and working_schedule_type == 'outside') currently_enabled = motionctl.get_motion_detection(camera_id) if currently_enabled is None: # could not detect current status logging.warn('skipping motion detection status update for camera with id %(id)s' % {'id': camera_id}) continue if currently_enabled and not must_be_enabled: logging.debug('must disable motion detection for camera with id %(id)s (%(what)s working schedule)' % { 'id': camera_id, 'what': working_schedule_type}) motionctl.set_motion_detection(camera_id, False) elif not currently_enabled and must_be_enabled: logging.debug('must enable motion detection for camera with id %(id)s (%(what)s working schedule)' % { 'id': camera_id, 'what': working_schedule_type}) motionctl.set_motion_detection(camera_id, True)
def comp(): #import cv2 #import config #import face # Load training data into model print 'Loading training data...' model = cv2.createEigenFaceRecognizer() model.load(config.TRAINING_FILE) print 'Training data loaded!' # Initialize camera. camera = config.get_camera() print 'Capturing Profile...' while True: image = camera.read() # Convert image to grayscale. image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Get coordinates of single face in captured image. result = face.detect_single(image) if result is None: print 'Could not detect one face! Check the image capture.pgm' return "User Not Detected" break x, y, w, h = result # Crop and resize image to face. crop = face.resize(face.crop(image, x, y, w, h)) # Test face against model. label, confidence = model.predict(crop) print 'Predicted {0} face with confidence {1} (lower is more confident).'.format( 'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', confidence) #user_login for the redirect refers to the def user_login not /user_login #return redirect(url_for('user_login')) if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD: break else: print 'Did not recognize face!' return 'User Not Accepted !'
def cleanup_media(media_type): logging.debug("cleaning up %(media_type)ss..." % {"media_type": media_type}) if media_type == "picture": exts = _PICTURE_EXTS elif media_type == "movie": exts = _MOVIE_EXTS + [".thumb"] for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if not utils.local_motion_camera(camera_config): continue preserve_media = camera_config.get("@preserve_%(media_type)ss" % {"media_type": media_type}, 0) if preserve_media == 0: return # preserve forever still_images_enabled = bool( ( (camera_config["emulate_motion"] or camera_config["output_pictures"]) and camera_config["picture_filename"] ) or (camera_config["snapshot_interval"] and camera_config["snapshot_filename"]) ) movies_enabled = camera_config["ffmpeg_output_movies"] if media_type == "picture" and not still_images_enabled: continue # only cleanup pictures for cameras with still images enabled elif media_type == "movie" and not movies_enabled: continue # only cleanup movies for cameras with movies enabled preserve_moment = datetime.datetime.now() - datetime.timedelta(days=preserve_media) target_dir = camera_config.get("target_dir") if os.path.exists(target_dir): # create a sentinel file to make sure the target dir is never removed open(os.path.join(target_dir, ".keep"), "w").close() _remove_older_files(target_dir, preserve_moment, exts=exts)
def _get_streameye_enabled(): global _streameye_enabled if _streameye_enabled is not None: return _streameye_enabled camera_ids = config.get_camera_ids(filter_valid=False) # filter_valid prevents infinte recursion if len(camera_ids) != 1: _streameye_enabled = False return False camera_config = config.get_camera(camera_ids[0], as_lines=True) # as_lines prevents infinte recursion camera_config = config._conf_to_dict(camera_config) if camera_config.get('@proto') != 'mjpeg': _streameye_enabled = False return False if '127.0.0.1:' not in camera_config.get('@url', ''): _streameye_enabled = False return False _streameye_enabled = True return True
def capture(): camera = config.get_camera() # Create the directory for positive training images if it doesn't exist. if not os.path.exists(config.TRAINING_DIR + CAPTURE_DIR): os.makedirs(config.TRAINING_DIR + CAPTURE_DIR) # Find the largest ID of existing positive images. # Start new images after this ID value. files = sorted(glob.glob(os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '[0-9][0-9][0-9].pgm'))) count = 0 if len(files) > 0: # Grab the count from the last filename. count = int(files[-1][-7:-4])+1 print 'Capturing positive training images.' print 'Type c (and press enter) to capture an image.' print 'Press Ctrl-C to quit.' while True: # Check if button was pressed or 'c' was received, then capture image. if is_letter_input('c'): print 'Capturing image...' image = camera.read() # Convert image to grayscale. image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Get coordinates of single face in captured image. result = face.detect_single(image) if result is None: print 'Could not detect single face! Check the image in capture.pgm' \ ' to see what was captured and try again with only one face visible.' continue x, y, w, h = result # Crop image as close as possible to desired face aspect ratio. # Might be smaller if face is near edge of image. crop = face.crop(image, x, y, w, h) # Save image to file. filename = os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '%03d.pgm' % count) cv2.imwrite(filename, crop) print 'Found face and wrote training image', filename count += 1
def make_message(subject, message, camera_id, moment, timespan, callback): camera_config = config.get_camera(camera_id) def on_media_files(media_files): timestamp = time.mktime(moment.timetuple()) media_files = [m for m in media_files if abs(m['timestamp'] - timestamp) < timespan] # filter out non-recent media files media_files.sort(key=lambda m: m['timestamp'], reverse=True) media_files = [os.path.join(camera_config['target_dir'], re.sub('^/', '', m['path'])) for m in media_files] format_dict = { 'camera': camera_config['@name'], 'hostname': socket.gethostname(), 'moment': moment.strftime('%Y-%m-%d %H:%M:%S'), } if settings.LOCAL_TIME_FILE: format_dict['timezone'] = tzctl._get_time_zone() else: format_dict['timezone'] = 'local time' m = message % format_dict s = subject % format_dict s = s.replace('\n', ' ') m += '\n\n' m += 'motionEye.' callback(s, m, media_files) if not timespan: return on_media_files([]) time.sleep(timespan) # give motion some time to create motion pictures mediafiles.list_media(camera_config, media_type='picture', callback=on_media_files)
def _set_streameye_enabled(enabled): global _streameye_enabled if enabled: logging.debug('removing all cameras from cache') config._camera_config_cache = {} config._camera_ids_cache = [] logging.debug('disabling all cameras in motion.conf') cmd = 'sed -r -i "s/^thread (.*)/#thread \1/" /data/etc/motion.conf &>/dev/null' if os.system(cmd): logging.error('failed to disable cameras in motion.conf') logging.debug('renaming thread files') for name in os.listdir(settings.CONF_PATH): if re.match('^thread-\d+.conf$', name): os.rename(os.path.join(settings.CONF_PATH, name), os.path.join(settings.CONF_PATH, name + '.bak')) logging.debug('adding simple mjpeg camera') streameye_settings = _get_streameye_settings(1) main_config = config.get_main() device_details = { 'proto': 'mjpeg', 'host': '127.0.0.1', 'port': streameye_settings['sePort'], 'username': '', 'password': '', 'scheme': 'http', 'uri': '/' } if streameye_settings['seAuthMode'] == 'basic': device_details['username'] = main_config['@normal_username'] device_details['password'] = main_config['@normal_password'] _streameye_enabled = True config._additional_structure_cache = {} camera_config = config.add_camera(device_details) # call set_camera again so that the streamEye-related defaults are saved config.set_camera(camera_config['@id'], camera_config) _set_motioneye_add_remove_cameras(False) else: # disabled logging.debug('removing simple mjpeg camera') for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if camera_config.get('@proto') == 'mjpeg': config.rem_camera(camera_id) logging.debug('renaming thread files') for name in os.listdir(settings.CONF_PATH): if re.match('^thread-\d+.conf.bak$', name): os.rename(os.path.join(settings.CONF_PATH, name), os.path.join(settings.CONF_PATH, name[:-4])) _streameye_enabled = False config.invalidate() logging.debug('enabling all cameras') for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) camera_config['@enabled'] = True config.set_camera(camera_id, camera_config) _set_motioneye_add_remove_cameras(True)
# Prefix for positive training image filenames. POSITIVE_FILE_PREFIX = 'louis_' def is_letter_input(letter): # Utility function to check if a specific character is available on stdin. # Comparison is case insensitive. if select.select([sys.stdin,],[],[],0.0)[0]: input_char = sys.stdin.read(1) return input_char.lower() == letter.lower() return False if __name__ == '__main__': camera = config.get_camera() # Create the directory for positive training images if it doesn't exist. if not os.path.exists(config.POSITIVE_DIR): os.makedirs(config.POSITIVE_DIR) # Find the largest ID of existing positive images. # Start new images after this ID value. files = sorted(glob.glob(os.path.join(config.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm'))) count = 0 if len(files) > 0: # Grab the count from the last filename. count = int(files[-1][-7:-4])+1 print 'Capturing positive training images.' print 'Press button or type c (and press enter) to capture an image.' print 'Press Ctrl-C to quit.' while True:
# Utility function to check if a specific character is available on stdin. # Comparison is case insensitive. if select.select([sys.stdin], [], [], 0.0)[0]: input_char = sys.stdin.read(1) return input_char.lower() == letter.lower() return False if __name__ == "__main__": # Load training data into model print "Loading training data..." model = cv2.createEigenFaceRecognizer() model.load(config.TRAINING_FILE) print "Training data loaded!" # Initialize camer and box. camera = config.get_camera() personPresent = 0 print "Running Hal 9000 recognition" print "Press Ctrl-C to quit." while True: # Check if capture should be made. # TODO: Check if button is pressed. # if is_letter_input('c'): # Check for the positive face and unlock if found. image = camera.read() # Convert image to grayscale. image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Get coordinates of single face in captured image. result = face.detect_single(image) if result is None:
def _set_streameye_enabled(enabled): global _streameye_enabled if enabled: logging.debug('removing all cameras from cache') config._camera_config_cache = {} config._camera_ids_cache = [] logging.debug('disabling all cameras in motion.conf') cmd = 'sed -r -i "s/^thread (.*)/#thread \\1/" /data/etc/motion.conf &>/dev/null' if os.system(cmd): logging.error('failed to disable cameras in motion.conf') logging.debug('renaming thread files') for name in os.listdir(settings.CONF_PATH): if re.match('^thread-\d+.conf$', name): os.rename(os.path.join(settings.CONF_PATH, name), os.path.join(settings.CONF_PATH, name + '.bak')) logging.debug('adding simple mjpeg camera') streameye_settings = _get_streameye_settings(1) main_config = config.get_main() device_details = { 'proto': 'mjpeg', 'host': '127.0.0.1', 'port': streameye_settings['sePort'], 'username': '', 'password': '', 'scheme': 'http', 'uri': '/' } if streameye_settings['seAuthMode'] == 'basic': device_details['username'] = main_config['@normal_username'] device_details['password'] = main_config['@normal_password'] _streameye_enabled = True config._additional_structure_cache = {} camera_config = config.add_camera(device_details) # call set_camera again so that the streamEye-related defaults are saved config.set_camera(camera_config['@id'], camera_config) _set_motioneye_add_remove_cameras(False) else: # disabled logging.debug('removing simple mjpeg camera') for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) if camera_config.get('@proto') == 'mjpeg': config.rem_camera(camera_id) logging.debug('renaming thread files') for name in os.listdir(settings.CONF_PATH): if re.match('^thread-\d+.conf.bak$', name): os.rename(os.path.join(settings.CONF_PATH, name), os.path.join(settings.CONF_PATH, name[:-4])) _streameye_enabled = False config.invalidate() logging.debug('enabling all cameras') for camera_id in config.get_camera_ids(): camera_config = config.get_camera(camera_id) camera_config['@enabled'] = True config.set_camera(camera_id, camera_config) _set_motioneye_add_remove_cameras(True)