def test_transaction(self): @transaction(Core.logger, Contexts.em) def test_commit(): self.em._session_.add(RepositoryORM(None, 'test', '127.0.0.1')) wrapper = ResultWrapper() wrapper2 = ResultWrapper() wrapper.EXPECTED_RESPONSE = 'Commit' wrapper2.EXPECTED_RESPONSE = 'Close session' when(self.em._session_).commit().thenAnswer(wrapper.exec) when(self.em._session_).close().thenAnswer(wrapper2.exec) test_commit() self.assertEqual(wrapper.RESPONSE, wrapper.EXPECTED_RESPONSE) self.assertEqual(wrapper2.RESPONSE, wrapper2.EXPECTED_RESPONSE) unstub(self.em._session_) @transaction(Core.logger, Contexts.em) def test_rollback(): raise Exception('Rollback') wrapper = ResultWrapper() wrapper.EXPECTED_RESPONSE = 'Rollback' when(self.em._session_).rollback().thenAnswer(wrapper.exec) when(Core.logger()).warning(...).thenReturn(None) with self.assertRaises(Exception): test_rollback() self.assertEqual(wrapper.RESPONSE, wrapper.EXPECTED_RESPONSE) unstub(self.em._session_) unstub(Core.logger())
def __init__(self, configfile): if not os.path.exists(configfile): raise IOError('No such configuration file: %s' % configfile) self.configparser.read(configfile) check_configuration_keys(self.expected_keys, self.configparser) logging_config = self.configparser.get('logging', 'configuration') if not os.path.exists(logging_config): raise IOError('No such configuration file: %s' % logging_config) setup_logging(logging_config) database_dsn = self.configparser.get('database', 'dsn') broker_dsn = self.configparser.get('broker', 'dsn') storage_uri = '%(proto)s://%(uri)s' % ( { 'proto': self.configparser.get('storage', 'proto'), 'uri': self.configparser.get('storage', 'uri') }) component_token = self.configparser.get('application', 'cpnttoken') data = { 'broker': { 'dsn': broker_dsn }, 'database': { 'dsn': database_dsn }, 'app': { 'storage_uri': storage_uri }, 'tokens': { 'component': component_token } } set_configuration(data) celery_app = Factories.celery_factory() celery_app.config_from_object(celery_config()) celery_app.conf.task_routes = ROUTES Core.logger.override(providers.Singleton(logging.getLogger, 'bpm')) Core.profiler.override( providers.Singleton(logging.getLogger, 'bpm_profiling')) self.loglevel = (logging.getLevelName( logging.getLogger().getEffectiveLevel()) or logging.WARN) profiling = ( self.configparser.getboolean('application', 'profiling') if (self.configparser.has_section('application') and self.configparser.has_option('application', 'profiling')) else False) if profiling: Core.logger().info('Running with profiling active') setup_task_profiling()
def process_image(self, uid, metadata): """ Image processing that generates kpis, overlay & image. Keywords: uid -- Image uid image_metadata -- Computation request payload sent w/o pixelmap """ logger = Core.logger() logger.info('Running image processing for image uid %(uid)s' % ( {'uid': uid} )) try: metrics, processed_image = wrapped_process_image(uid, metadata) if processed_image: req = StorageClient.post_metrics_image( {'Content-Type': 'application/json'}, json.dumps({'metrics_uid': metrics.uid, 'image': list(processed_image)}) ) if req.status_code != 200: raise ValueError('Could not persist processed image') logger.info('Image %(uid)s has been processed' % ({'uid': uid})) except Exception as e: logger.exception(e) self.retry(exc=e)
def test_match_procedure_images(self): (issuer, repository, exam, exam_status, procedure, procedure_status, modality_type, sop_class, image_metadata, image, image_status, image_metrics, image_metrics_display) = self.create_processed_image_stack() wf = Services.workflow() wrapper = ResultWrapper() wrapper.EXPECTED_RESPONSE = 'Retry called' # Prevents logger to log exception as expected in unittests when(Core.logger()).exception(...).thenReturn(None) when(Core.logger()).warning(...).thenReturn(None) when(wf).match_procedure(...).thenRaise(Exception()) (when(matching.match_procedure_images).retry(...).thenAnswer( wrapper.exec)) matching.match_procedure_images(procedure.uid) self.assertEqual(wrapper.RESPONSE, wrapper.EXPECTED_RESPONSE) unstub()
def test_process_image(self): when(Core.logger()).warning(...).thenReturn(None) (issuer, repository, exam, exam_status, procedure, procedure_status, modality_type, sop_class, image_metadata, image, image_status) = self.create_image_stack() bytes_json = SimpleImageModel(bytes=[]) bytes_ = mock({ 'status_code': 200, 'text': json.dumps(bytes_json) }, spec=requests.Response) when(bytes_).json().thenReturn(bytes_json) status = StatusModel(message='success') metrics_resp = mock({ 'status_code': 200, 'text': json.dumps(status) }, spec=requests.Response) when(metrics_resp).json().thenReturn(status) when(StorageClient).get_image(...).thenReturn(bytes_) when(StorageClient).post_metrics_image(...).thenReturn(metrics_resp) (when(BPMBusiness).process_image(...).thenReturn(({}, {}, None))) processing.process_image(image.uid, COMPUTATION_REQUEST) image_status = self.em.get_image_status('image_uid', image.uid) self.assertEqual(image_status.status, ImageStatusEnum.Processed.value) unstub(when(BPMBusiness).process_image(...)) error = mock({ 'status_code': 500, 'text': 'Failed' }, spec=requests.Response) wrapper = ResultWrapper() wrapper.EXPECTED_RESPONSE = {'retry': 'ok'} # Prevents logger to log exception as expected in unittests when(Core.logger()).exception(...).thenReturn(None) when(error).json().thenReturn({'status': 'Failed'}) when(StorageClient).post_metrics_image(...).thenReturn(error) when(processing.process_image).retry(...).thenAnswer(wrapper.exec) res = processing.process_image(image.uid, COMPUTATION_REQUEST) self.assertEqual(wrapper.RESPONSE, wrapper.EXPECTED_RESPONSE) unstub()
def identity_and_match_processed_procedures(): broker = Factories.celery_factory() wf = Services.workflow() logger = Core.logger() procedures = wf.update_processed_procedures() for proc in procedures: logger.info('Processed procedure %(uid)s has been sent to matching' % ({ 'uid': proc })) produce_matching_task(broker, proc)
def match_procedure_images(self, procedure_uid): """ Matches up to two sets of two images for a procedure. Keyword arguments: procedure_uid -- Int, Procedure UID images_metadata -- List of tuple which contains for each image uid the image_laterality, view_position, acquisition_time and overlay_data """ logger = Core.logger() logger.info('Running matching for procedure %(uid)s' % ({ 'uid': procedure_uid })) try: wrapped_match_procedure_images(procedure_uid) logger.info('Procedure %(uid)s has been matched' % ({ 'uid': procedure_uid })) except Exception as e: logger.exception(e) self.retry(exc=e)
def tearDown(self): super(TasksTestCase, self).tearDown() unstub(when(Core.logger()).warning(...))
def setUp(self): when(Core.logger()).warning(...).thenReturn(None) super(TasksTestCase, self).setUp()
def __init__(self, configfile): if not os.path.exists(configfile): raise IOError('No such configuration file: %s' % configfile) self.configparser.read(configfile) check_configuration_keys(self.expected_keys, self.configparser) logging_config = self.configparser.get('logging', 'configuration') if not os.path.exists(logging_config): raise IOError('No such configuration file: %s' % logging_config) setup_logging(logging_config) database_dsn = self.configparser.get('database', 'dsn') host = self.configparser.get('application', 'host') if ( self.configparser.has_section('application') and self.configparser.has_option('application', 'host')) \ else '0.0.0.0' port = int(self.configparser.get('application', 'port')) if ( self.configparser.has_section('application') and self.configparser.has_option('application', 'port')) \ else 8080 storage_uri = '%(proto)s://%(uri)s' % ({ 'proto': self.configparser.get('storage', 'proto') if ( self.configparser.has_section('storage') and self.configparser.has_option('storage', 'proto')) else 'http', 'uri': self.configparser.get('storage', 'uri') if ( self.configparser.has_section('storage') and self.configparser.has_option('storage', 'uri')) else ( 'localhost:9080') }) role_token = self.configparser.get('application', 'roletoken') component_token = self.configparser.get('application', 'cpnttoken') data = { 'database': {'dsn': database_dsn}, 'app': {'identifier': self.identifier, 'title': self.title, 'host': host, 'port': port, 'storage_uri': storage_uri}, 'tokens': {'role': role_token, 'component': component_token}, 'webdav': {'options': {}, 'client_options': {}}, 'swagger': {'security_definitions': self.security_defs}, } set_configuration(data) self.build_webdav_opts() Core.logger.override(providers.Singleton( logging.getLogger, 'bpm')) Core.profiler.override(providers.Singleton( logging.getLogger, 'bpm_profiling')) profiling = (self.configparser.getboolean('application', 'profiling') if ( self.configparser.has_section('application') and self.configparser.has_option('application', 'profiling')) else False) ignite = (self.configparser.getboolean('application', 'ignite') if ( self.configparser.has_section('application') and self.configparser.has_option('application', 'ignite')) else False) if profiling: Core.logger().info('Running with profiling active') setup_resource_profiling() if ignite: Core.logger().info('Igniting application') metadata.create_all(Gateways.session().bind)
def setUp(self): when(Core.logger()).warning(...).thenReturn(None) super(WorkflowTestCase, self).setUp() self.wf = Services.workflow()
def _logger_(self): return Core.logger()