Beispiel #1
0
def recognise_faces_many(fc,
                         img_dir_path,
                         person_group_id,
                         out_dir_path,
                         label_and_save=False):
    """
    Identify a face against a defined PersonGroup for all images in a specified directory
    """
    logger.info(f'Preparing images in {img_dir_path} ...')
    test_image_array = [
        file for file in glob.glob('{}/*.*'.format(img_dir_path))
    ]
    no_files = len(test_image_array)
    no_fails = 0
    result_dict = {}

    for image_path in test_image_array:
        if not image_path.endswith('.jpg'):
            continue
        basename = os.path.basename(image_path)
        logger.info(f'Processing {image_path}...')
        try:
            faces = recognise_faces(fc, image_path, person_group_id)
            if label_and_save:
                label_image(faces, image_path,
                            os.path.join(out_dir_path, basename))
            result_dict[os.path.basename(basename)] = faces
        except (APIErrorException, APIError) as ex:
            logger.error(f'Failed to process {basename}', ex)
            no_fails += 1
    logger.info('Result: Total {} images, {} processing failed...'.format(
        no_files, no_fails))
    # Returns the face & coord dict
    return result_dict
Beispiel #2
0
 def bulk_insert_csv(self, file_path, table_name, cols):
     try:
         # Reading from CSV file
         df = pd.read_csv(file_path, encoding='utf8', usecols=cols)
         df = df.values.tolist()
         if not df:
             logger.info('No entries to insert into database.')
             return
         logger.info('Successfully read {} rows from CSV file {}'.format(
             len(df), file_path))
     except pd.io.common.EmptyDataError as ex:
         logger.error(ex)
         raise ex
     try:
         column_str = str(tuple(cols)).replace("'", "\"")
         wildcard_str = str(tuple(map(lambda x: "?",
                                      cols))).replace("'", "")
         query_template = 'INSERT INTO {} {} VALUES {}'.format(
             table_name, column_str, wildcard_str)
         logger.debug(f'executemany query template: \'{query_template}\'')
         # Performing Bulk Insert into RDS
         logger.debug(df)
         self.cursor.executemany(query_template, df)
         self.cursor.commit()
         logger.info('Insert success.')
     except pyodbc.Error as ex:
         logger.error(ex)
         raise ex
Beispiel #3
0
 def run(self):
     try:
         logger.info('Estimating burned members...')
         self.process_results()
         logger.info('Updating database...')
         self.update_database()
         self.upload_cached_files()
         self.save_cached_files()
     except Exception as ex:
         logger.error('Phase 3 failed')
         raise ex
Beispiel #4
0
def s3_download(region_name, bucket_name, filename):
    #adapted from
    #https://www.thetechnologyupdates.com/image-processing-opencv-with-aws-lambda/
    s3 = boto3.client('s3', region_name=region_name)
    logger.info(f'Downloading: [{bucket_name}/{filename}]')
    try:
        file_obj = s3.get_object(Bucket=bucket_name, Key=filename)
        file_data = file_obj["Body"].read()
        logger.info(f'Complete: [{bucket_name}/{filename}]')
        return file_data
    except BaseException as ex:
        logger.error(f'Download failed')
        raise ex
Beispiel #5
0
 def run(self):
     try:
         logger.info('Phase 2 start')
         paths = self.get_imagepaths()
         logger.info('Processing images in input directory')
         results = self.process_images(paths)
         logger.info('Updating result CSV file')
         self.update_results(results)
         self.upload_cached_files()
         self.save_cached_files()
         logger.info('Phase 2 complete')
     except Exception as ex:
         logger.error('Phase 2 failed')
         raise ex
Beispiel #6
0
    def __init__(self, db_endpoint, db_name, db_uid, db_pw):
        connection_string = 'DRIVER={ODBC Driver 17 for SQL Server}; ' \
                                 + f'SERVER={db_endpoint}; ' \
                                   f'DATABASE={db_name}; ' \
                                   f'UID={db_uid}; ' \
                                   f'PWD={db_pw}'

        try:
            logger.debug(connection_string)
            self.conn = pyodbc.connect(connection_string)
            self.cursor = self.conn.cursor()
            logger.info('Able to connect.')
        except pyodbc.Error as ex:
            logger.error('Failed to connect.')
            raise ex
Beispiel #7
0
 def execute(self, query, type):
     logger.info(query)
     try:
         self.cursor.execute(query)
         # SELECT OPERATION
         if type == 'SELECT':
             columns = [column[0] for column in self.cursor.description]
             results = [
                 dict(zip(columns, row)) for row in self.cursor.fetchall()
             ]
             logger.info(results)
             return results
         # SINGLE INSERT OPERATION
         elif type == 'INSERT':
             self.conn.commit()
     except pyodbc.Error as ex:
         logger.error(ex)
         raise ex
Beispiel #8
0
 def __init__(self, config, episode_filename):
     logger.info('Initializing phase 1 parameters')
     self.episode_filename = episode_filename
     self.episode_number = get_episode_number_from_filename(
         episode_filename)
     # prepare directory for caching
     self.cache_dir = TemporaryDirectory()
     self.results = Results.blank()
     # prepare directory for local saving
     self.save_images = config.getboolean('save_images')
     self.save_results = config.getboolean('save_results')
     if self.save_images or self.save_results:
         out_dir_path = os.path.join(config['output_directory_path'],
                                     f'episode{self.episode_number}')
         if not os.path.exists(out_dir_path):
             os.makedirs(out_dir_path, exist_ok=True)
         self.output_directory_path = out_dir_path
     # for uploading cached files
     self.upload_unlabelled = config.getboolean('upload_unlabelled')
     self.upload_labelled = config.getboolean('upload_labelled')
     self.upload_results = config.getboolean('upload_results')
     # for video processing
     self.display = config.getboolean('display')
     self.video_sample_rate = config.getint('video_sample_rate')
     self.skull_confidence_threshold = config.getfloat(
         'skull_confidence_threshold')
     self.skull_model_version = config['skull_model_version']
     try:
         self.azure_key = os.environ['IC_AZURE_KEY_SKULL']
     except KeyError as ex:
         logger.error('Missing required environment variable')
         raise ex
     # for google drive
     self.gdrive = GDrive(
         token_path=os.environ['IC_GDRIVE_AUTH_TOKEN_PATH'],
         client_secrets_path=os.environ['IC_GDRIVE_CLIENT_SECRETS_PATH'])
Beispiel #9
0
    def run(self):
        try:
            logger.info('Phase 1 start')
            ep_no = self.episode_number
            # get episode from google drive
            logger.info(f'Downloading episode {ep_no} from Google Drive')
            episode_filepath = self.download_episode()
            # process episode
            logger.info(f'Finding frames with skulls in episode {ep_no}')
            extracted_frames = self.process_episode(episode_filepath)
            # update results and cache image locally on container
            logger.info(f'Caching frames with skulls in episode {ep_no}')
            self.cache_extracted_frames(extracted_frames)

            logger.info(f'Updating results CSV file')
            self.update_results(extracted_frames)

            self.upload_cached_files()
            self.save_cached_files()

            logger.info('Phase 1 complete')
        except Exception as ex:
            logger.error('Phase 1 failed')
            raise ex