def test(self): session = boto3.Session(profile_name='dev') sns = session.client('sns') listener_arn = 'arn:aws:sns:us-east-1:726075243133:leech-dev-Listener-1EV4D8VOW7L37' bull_horn = Bullhorn(sns) schema = Schema.retrieve() schema_entry = schema['ExternalId'] message = ajson.dumps({ 'task_name': 'generate_source_vertex', 'task_args': None, 'task_kwargs': { 'schema': schema, 'schema_entry': schema_entry, 'extracted_data': { 'source': { 'id_source': 'Algernon', 'id_type': 'Employees', 'id_name': 'emp_id', 'id_value': 1001 } } } }) message_id = bull_horn.publish('new_event', listener_arn, message) assert message_id
def test_bullhorn_batch_send(self): with patch('botocore.client.BaseClient._make_api_call') as mock: bullhorn = Bullhorn({'test': 'test_arn'}) with bullhorn as batch: for _ in range(100): batch.publish('test', 'some_arn', f'hello world, {_}') assert mock.called assert mock.call_count == 100
def _process_queued_parse_task(event, context): bullhorn = Bullhorn.retrieve() topic_arn = bullhorn.find_task_arn('aio_leech') batch = [] for entry in event['Records']: entry_body = rapidjson.loads(entry['body']) original_payload = rapidjson.loads(entry_body['Message']) original_payload['topic_arn'] = topic_arn batch.append((original_payload, context)) pool = ThreadPool(len(batch)) pool.starmap(parse_batch_encounters, batch) pool.close() pool.join()
def _publish_results(encounter_id, parser_id, id_source, parser_results): bullhorn = Bullhorn.retrieve(profile=os.getenv('AWS_PROFILE')) publish_kwargs = { 'encounter_id': encounter_id, 'id_source': id_source, 'parser_id': parser_id, 'bullhorn': bullhorn } _publish_documentation_node(**publish_kwargs) for field_name, field_documentation in parser_results.items(): publish_kwargs.update({ 'field_name': field_name, 'field_documentation': field_documentation }) _publish_documentation_field_node(**publish_kwargs)
def audit_surgeon_graph(id_source, client_id, encounter_id, encounter_data, **kwargs): legacy_flow_ids = [ f"leech-psi-201905291215#get_client_encounter_ids-{client_id}#get_encounter-{encounter_id}" ] bullhorn = Bullhorn.retrieve() gql_endpoint = _retrieve_soft_variable('graph_gql_endpoint', kwargs) state_gql_endpoint = _retrieve_soft_variable('state_gql_endpoint', kwargs) bucket_name = _retrieve_soft_variable('encounter_bucket', kwargs) encounter = gql_tasks.check_encounter_id(id_source, encounter_id, gql_endpoint) publish_kwargs = { 'bullhorn': bullhorn, 'flow_id': f'review_surgeon#{client_id}#{encounter_id}', 'id_source': id_source, 'patient_id': client_id, 'provider_id': encounter_data['Staff ID'], 'encounter_id': encounter_id, 'encounter_datetime_in': encounter_data['Time In'], 'encounter_datetime_out': encounter_data['Time Out'], 'encounter_type': encounter_data['Visit Type'], 'patient_last_name': encounter_data['Last Name'], 'patient_first_name': encounter_data['First Name'], 'patient_dob': encounter_data['DOB'] } results = {'client_id': client_id, 'encounter_id': encounter_id} if not encounter: encounter = aws_tasks.check_for_archived_encounter( bucket_name, id_source, client_id, encounter_id) if not encounter: encounter = _check_legacy_flows(legacy_flow_ids, state_gql_endpoint) if not encounter: publish_results = aws_tasks.publish_to_incredible(**publish_kwargs) results.update({ 'publish_results': publish_results, 'destination': 'get_encounter' }) return results publish_kwargs.update({'encounter_text': encounter}) publish_results = aws_tasks.publish_to_leech(**publish_kwargs) results.update({ 'publish_results': publish_results, 'destination': 'leech' }) return results
def test_deployment(self): bullhorn = Bullhorn.retrieve(profile='dev') msg = {'task_name': 'review', 'task_kwargs': {}, 'flow_id': ''}
import rapidjson from algernon.aws import Bullhorn from algernon import ajson file_name = '413dadbf-d939-42da-9e3d-3634cee1388d!1561142042.186607.json' bullhorn = Bullhorn.retrieve() with open(file_name) as file: json_file = rapidjson.load(file) encounters = json_file['data_string']['encounters'] id_source = json_file['data_string']['id_source'] task_name = 'check_credible_object' topic_arn = bullhorn.find_task_arn(task_name) progress = 122635 total = len(encounters) with bullhorn as batch: for pointer, encounter in enumerate(encounters): if pointer <= 122635: continue encounter['id_source'] = id_source msg = { 'task_name': task_name, 'task_kwargs': { 'obj': encounter, 'object_type': 'Encounter' } } topic_arn = bullhorn.find_task_arn(task_name) results = bullhorn.publish('test_event', topic_arn, ajson.dumps(msg)) progress += 1
def test_bullhorn_retrieve(self): bullhorn = Bullhorn.retrieve(profile='dev') assert bullhorn
def test_bullhorn_batch_live_send(self): bullhorn = Bullhorn.retrieve(profile='dev') arn = 'arn:aws:sns:us-east-1:726075243133:test' with bullhorn as batch: for _ in range(100): batch.publish('test', arn, f'hello world, {_}')
def test_bullhorn_live_send(self): bullhorn = Bullhorn.retrieve(profile='dev') arn = 'arn:aws:sns:us-east-1:726075243133:test' bullhorn.publish('test', arn, f'hello world, {1}')