コード例 #1
0
def main():
    working_folder = os.getenv('WORK_DIR', './')

    # configure logging
    setup_logging(working_folder, 'app-logging.yaml')
    logger = logging.getLogger(__name__)

    args = get_cmd_args()
    video_path = args.video_path

    logger.debug('the app started with the following parameters: %s', args)

    video_evaluator = VideoEvaluator()
    video_evaluator.evaluate(video_path)

    logger.debug('the app operation is completed')
コード例 #2
0
def main():
    working_folder = os.getenv('WORK_DIR', './')

    # configure logging
    setup_logging(working_folder, 'learning-logging.yaml')
    logger = logging.getLogger(__name__)

    args = get_cmd_args()
    no_epochs = int(args.no_epochs)
    no_steps_per_epoch = args.no_steps
    model_name = args.model
    executor_name = model_name

    logger.debug('learning operation started with the following parameters: %s', args)

    model = get_model(model_name)
    executor = get_executor(executor_name, model)
    executor.train_model(no_epochs, no_steps_per_epoch)

    logger.debug('learning operation is completed')
コード例 #3
0
def main():
    working_folder = os.getenv('WORK_DIR', './')

    # configure logging
    setup_logging(working_folder, 'scraping-logging.yaml')
    logger = logging.getLogger(__name__)

    args = get_cmd_args()
    base_url = args.base_url
    url = args.url
    storage_location = args.storage_location

    if base_url:
        logger.debug('Files will be scraped from: %s', base_url)

    if storage_location:
        logger.debug('Scraped files to be stored at: %s', storage_location)

    scraper = SignsLanguageScraper(base_url, storage_location)
    scraper.scrap(url)
コード例 #4
0
import logger_config

logger_config.setup_logging()
コード例 #5
0
    dataset_type = args.dataset_type
    dataset_path = args.dataset_path.split(',') if dataset_type == COMBINED else args.dataset_path
    output_dir_path = args.output_dir_path
    output_prefix = args.output_prefix
    output_max_size: float = float(args.output_max_size)
    shuffle_buffer_size = args.shuffle_buffer_size

    logger.debug('Source dataset path: %s', dataset_path)
    logger.debug('Source dataset type: %s', dataset_type)
    logger.debug('Output dir path: %s', output_dir_path)
    logger.debug('Output prefix: %s', output_prefix)
    logger.debug('Max size per output file: %s', output_max_size)
    logger.debug('Shuffle buffer size: %s', shuffle_buffer_size)

    # obtain an instance of a dataset creator
    dataset_creator = TFRecordDatasetCreator(dataset_type, dataset_path, shuffle_buffer_size)
    # serialize samples into the TFRecord format for better I/O
    dataset_creator.create(output_dir_path, output_prefix, output_max_size)

    logger.info('Dataset generation process completed')


if __name__ == '__main__':
    working_folder = os.getenv('WORK_DIR', './')
    setup_logging(working_folder, 'dataset-logging.yaml')
    logger = logging.getLogger(__name__)

    logger.info('Dataset generation process started')

    main()
コード例 #6
0
    existing_files = [f for f in os.listdir(incoming_queue) if os.path.isfile(os.path.join(incoming_queue, f))]
    for file_name in existing_files:
        logger.debug('processing ''%s''', file_name)
        with open(os.path.join(incoming_queue, file_name)) as f:
            data = json.load(f)
            FileProcessingHandler().handle(data)

    observer = Observer()
    observer.schedule(IncomingQueueWatcher(), path=incoming_queue)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join()


if __name__ == '__main__':
    # obtain working folder from WORK_DIR environment variable
    working_folder = os.getenv('WORK_DIR', './')

    # configure logging
    setup_logging(working_folder, 'pre-processing-logging.yaml')
    logger = logging.getLogger(__name__)

    # execute video pre_processing logic
    main()