def __init__(self): try: params = config(section='event_processor') self.conn = psycopg2.connect(**params) except (Exception) as error: print(error) self.conn = None raise
def __init__(self): try: params = config(section='bc_registries') self.conn = psycopg2.connect(**params) except (Exception) as error: print(error) self.conn = None raise
def dump_corp_credential_queue(path): sql = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT, LAST_EVENT, CORP_NUM, CREDENTIAL_TYPE_CD, SCHEMA_NAME, SCHEMA_VERSION, CREDENTIAL_JSON, ENTRY_DATE FROM CREDENTIAL_LOG""" """ Connect to the PostgreSQL database server """ conn = None cur = None try: # read connection parameters params = config(section='event_processor') # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') conn = psycopg2.connect(**params) # create a cursor cur = conn.cursor() cur.execute(sql) row = cur.fetchone() creds = [] while row is not None: # print(row) creds.append({ 'RECORD_ID': row[0], 'SYSTEM_TYPE_CD': row[1], 'PREV_EVENT_ID': row[2], 'LAST_EVENT_ID': row[3], 'CORP_NUM': row[4], 'CREDENTIAL_TYPE_CD': row[5], 'SCHEMA_NAME': row[6], 'SCHEMA_VERSION': row[7], 'CREDENTIAL_JSON': row[8], 'ENTRY_DATE': row[9] }) row = cur.fetchone() cur.close() cur = None for i, cred in enumerate(creds): filename = cred['CORP_NUM'] + '.' + str( cred['RECORD_ID']) + '.' + cred['SCHEMA_NAME'] + '.' + cred[ 'SCHEMA_VERSION'] + '.json' dumpfile(path, filename, cred['CREDENTIAL_JSON']) except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if cur is not None: cur.close() print('Cursor closed.') if conn is not None: conn.close() print('Database connection closed.')
def load_json_mappers(system_typ_cd): # load mappers for each credential type sql = """SELECT RECORD_ID, SYSTEM_TYPE_CD, CREDENTIAL_TYPE_CD, MAPPING_TRANSFORM, SCHEMA_NAME, SCHEMA_VERSION FROM CREDENTIAL_TRANSFORM WHERE SYSTEM_TYPE_CD = %s""" """ Connect to the PostgreSQL database server """ conn = None cur = None try: # read connection parameters params = config(section='event_processor') # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') conn = psycopg2.connect(**params) # create a cursor cur = conn.cursor() cur.execute(sql, (system_typ_cd, )) row = cur.fetchone() transforms = [] while row is not None: # print(row) transforms.append({ 'SYSTEM_TYPE_CD': row[1], 'CREDENTIAL_TYPE_CD': row[2], 'MAPPING_TRANSFORM': row[3], 'SCHEMA_NAME': row[4], 'SCHEMA_VERSION': row[5] }) row = cur.fetchone() cur.close() cur = None return transforms except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if cur is not None: cur.close() print('Cursor closed.') if conn is not None: conn.close() print('Database connection closed.')
def process_corp_history_queue(transforms): sql = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT_ID, LAST_EVENT_ID, CORP_NUM, CORP_JSON, ENTRY_DATE FROM CORP_HISTORY_LOG WHERE PROCESS_DATE is null""" sql2 = """INSERT INTO CREDENTIAL_LOG (SYSTEM_TYPE_CD, PREV_EVENT_ID, LAST_EVENT_ID, CORP_NUM, CREDENTIAL_TYPE_CD, SCHEMA_NAME, SCHEMA_VERSION, CREDENTIAL_JSON, ENTRY_DATE) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING RECORD_ID;""" sql3 = """UPDATE CORP_HISTORY_LOG SET PROCESS_DATE = %s WHERE RECORD_ID = %s""" """ Connect to the PostgreSQL database server """ conn = None cur = None try: # read connection parameters params = config(section='event_processor') # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') conn = psycopg2.connect(**params) # create a cursor cur = conn.cursor() cur.execute(sql) row = cur.fetchone() corps = [] while row is not None: # print(row) corps.append({ 'RECORD_ID': row[0], 'SYSTEM_TYPE_CD': row[1], 'PREV_EVENT_ID': row[2], 'LAST_EVENT_ID': row[3], 'CORP_NUM': row[4], 'CORP_JSON': row[5], 'ENTRY_DATE': row[6] }) row = cur.fetchone() cur.close() cur = None for i, corp in enumerate(corps): print(corp) for j, transform in enumerate(transforms): corp_creds = get_corp_credentials( corp['CORP_NUM'], corp['CORP_JSON'], transform['MAPPING_TRANSFORM']) print(corp_creds) # create row(s) for corp creds json info cur = conn.cursor() cur.execute(sql2, ( corp['SYSTEM_TYPE_CD'], corp['PREV_EVENT_ID'], corp['LAST_EVENT_ID'], corp['CORP_NUM'], transform['CREDENTIAL_TYPE_CD'], transform['SCHEMA_NAME'], transform['SCHEMA_VERSION'], json.dumps(corp_creds, cls=DateTimeEncoder), datetime.datetime.now(), )) cur.close() cur = None # update process date cur = conn.cursor() cur.execute(sql3, ( datetime.datetime.now(), corp['RECORD_ID'], )) cur.close() cur = None # commit the changes to the database conn.commit() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if cur is not None: cur.close() print('Cursor closed.') if conn is not None: conn.close() print('Database connection closed.')
async def process_credential_queue(self, single_thread=False): sql1 = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT, LAST_EVENT, CORP_NUM, CORP_STATE, CREDENTIAL_TYPE_CD, CREDENTIAL_ID, CREDENTIAL_JSON, CREDENTIAL_REASON, SCHEMA_NAME, SCHEMA_VERSION, ENTRY_DATE FROM CREDENTIAL_LOG WHERE RECORD_ID IN ( SELECT RECORD_ID FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null ORDER BY RECORD_ID LIMIT """ + str(CREDS_BATCH_SIZE) + """ ) ORDER BY RECORD_ID;""" sql1a = """SELECT count(*) cnt FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null""" sql1_active = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT, LAST_EVENT, CORP_NUM, CORP_STATE, CREDENTIAL_TYPE_CD, CREDENTIAL_ID, CREDENTIAL_JSON, CREDENTIAL_REASON, SCHEMA_NAME, SCHEMA_VERSION, ENTRY_DATE FROM CREDENTIAL_LOG WHERE RECORD_ID IN ( SELECT RECORD_ID FROM CREDENTIAL_LOG WHERE CORP_STATE = 'ACT' and PROCESS_DATE is null ORDER BY RECORD_ID LIMIT """ + str(CREDS_BATCH_SIZE) + """ ) ORDER BY RECORD_ID;""" sql1a_active = """SELECT count(*) cnt FROM CREDENTIAL_LOG WHERE corp_state = 'ACT' and PROCESS_DATE is null;""" """ Connect to the PostgreSQL database server """ #conn = None cur = None # Track the current set of tasks. # When gathering tasks at the end we don't want to include these in the list. external_tasks = asyncio.Task.all_tasks() try: params = config(section='event_processor') pool = mpool.ThreadPool(MAX_CREDS_REQUESTS) loop = asyncio.get_event_loop() tasks = [] http_client = aiohttp.ClientSession() # create a cursor cred_count = 0 cur = self.conn.cursor() cur.execute(sql1a) row = cur.fetchone() if row is not None: cred_count = row[0] cur.close() cur = None i = 0 cred_count_remaining = cred_count start_time = time.perf_counter() processing_time = 0 processed_count = 0 max_processing_time = 10 * 60 while 0 < cred_count_remaining and processing_time < max_processing_time: active_cred_count = 0 cur = self.conn.cursor() cur.execute(sql1a_active) row = cur.fetchone() if row is not None: active_cred_count = row[0] cur.close() cur = None # create a cursor cur = self.conn.cursor() if 0 < active_cred_count: cur.execute(sql1_active) else: cur.execute(sql1) row = cur.fetchone() credentials = [] cred_owner_id = '' while row is not None: i = i + 1 processed_count = processed_count + 1 if processed_count >= 100: print('>>> Processing {} of {} credentials.'.format(i, cred_count)) processing_time = time.perf_counter() - start_time print('Processing: ' + str(processing_time)) processed_count = 0 credential = {'RECORD_ID':row[0], 'SYSTEM_TYP_CD':row[1], 'PREV_EVENT':row[2], 'LAST_EVENT':row[3], 'CORP_NUM':row[4], 'CORP_STATE':row[5], 'CREDENTIAL_TYPE_CD':row[6], 'CREDENTIAL_ID':row[7], 'CREDENTIAL_JSON':row[8], 'CREDENTIAL_REASON':row[9], 'SCHEMA_NAME':row[10], 'SCHEMA_VERSION':row[11], 'ENTRY_DATE':row[12]} # make sure to include all credentials for the same client id within the same batch if (CREDS_REQUEST_SIZE <= len(credentials) and credential['CORP_NUM'] != cred_owner_id) or (len(credentials) >= 2*CREDS_REQUEST_SIZE): post_creds = credentials.copy() creds_task = loop.create_task(post_credentials(http_client, self.conn, post_creds)) tasks.append(creds_task) #await asyncio.sleep(1) if single_thread: # running single threaded - wait for each task to complete await creds_task else: # multi-threaded, check if we are within MAX_CREDS_REQUESTS active requests active_tasks = len([task for task in tasks if not task.done()]) #print("Added task - active = ", active_tasks, ", posted creds = ", len(post_creds)) while active_tasks >= MAX_CREDS_REQUESTS: #await asyncio.gather(*tasks) done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) active_tasks = len(pending) # print("Waited task - active = ", active_tasks) credentials = [] cred_owner_id = '' credentials.append(credential) cred_owner_id = credential['CORP_NUM'] row = cur.fetchone() cur.close() cur = None if 0 < len(credentials): post_creds = credentials.copy() tasks.append(loop.create_task(post_credentials(http_client, self.conn, post_creds))) credentials = [] cred_owner_id = '' # wait for the current batch of credential posts to complete for response in await asyncio.gather(*tasks): pass # print('response:' + response) tasks = [] print('>>> Processing {} of {} credentials.'.format(i, cred_count)) processing_time = time.perf_counter() - start_time print('Processing: ' + str(processing_time)) cur = self.conn.cursor() cur.execute(sql1a) row = cur.fetchone() if row is not None: cred_count_remaining = row[0] cur.close() cur = None except (Exception, psycopg2.DatabaseError) as error: print(error) log_error('An exception was encountered while processing the credential queue:\n{}'.format(str(error))) print(traceback.print_exc()) finally: await http_client.close() # Gather all remaining tasks that were spawned during processing ... remaining_tasks = asyncio.Task.all_tasks() for task in external_tasks: # Remove any that were not created during processing ... remaining_tasks.discard(task) if len(remaining_tasks) > 0: await asyncio.gather(*remaining_tasks) if cur is not None: cur.close()
async def process_credential_queue(self, single_thread=False): sql1 = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT, LAST_EVENT, CORP_NUM, CORP_STATE, CREDENTIAL_TYPE_CD, CREDENTIAL_ID, CREDENTIAL_JSON, CREDENTIAL_REASON, SCHEMA_NAME, SCHEMA_VERSION, ENTRY_DATE FROM CREDENTIAL_LOG WHERE RECORD_ID IN ( SELECT RECORD_ID FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null AND RECORD_ID > %s AND (CREDENTIAL_JSON->>'expiry_date' = '' or CREDENTIAL_JSON->>'expiry_date' is null or CREDENTIAL_JSON->>'expiry_date' <= %s) ORDER BY RECORD_ID LIMIT """ + str(CREDS_BATCH_SIZE) + """ ) ORDER BY RECORD_ID;""" sql1a = """SELECT count(*) cnt FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null AND RECORD_ID > %s AND (CREDENTIAL_JSON->>'expiry_date' = '' or CREDENTIAL_JSON->>'expiry_date' is null or CREDENTIAL_JSON->>'expiry_date' <= %s) """ """ Connect to the PostgreSQL database server """ #conn = None cur = None # Track the current set of tasks. # When gathering tasks at the end we don't want to include these in the list. external_tasks = asyncio.Task.all_tasks() cutoff_time = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) cutoff_time_str = cutoff_time.strftime("%Y-%m-%dT%H:%M:%S") try: params = config(section='event_processor') pool = mpool.ThreadPool(MAX_CREDS_REQUESTS) loop = asyncio.get_event_loop() tasks = [] max_rec_id = 0 # ensure controller is available if not await check_controller_health(wait=True): raise Excecption("Error Issuer Controller is not available") # create a cursor cred_count = 0 cur = self.conn.cursor() cur.execute(sql1a, ( max_rec_id, cutoff_time_str, )) row = cur.fetchone() if row is not None: cred_count = row[0] cur.close() cur = None i = 0 cred_count_remaining = cred_count start_time = time.perf_counter() processing_time = 0 processed_count = 0 perf_proc_count = 0 max_processing_time = 60 * MAX_PROCESSING_MINS success_count = 0 failed_count = 0 progressive_error_count = 0 progressive_success_count = 0 progressive_error_factor = 0 success_seq = 0 current_max_creds_requests = MAX_CREDS_REQUESTS while 0 < cred_count_remaining and processing_time < max_processing_time and failed_count <= CONTROLLER_MAX_ERRORS: # create a cursor cur = self.conn.cursor() cur.execute(sql1, ( max_rec_id, cutoff_time_str, )) row = cur.fetchone() credentials = [] cred_owner_id = '' while row is not None: i = i + 1 processed_count = processed_count + 1 perf_proc_count = perf_proc_count + 1 if processed_count >= PROCESS_LOOP_REPORT_CT: print('>>> Processing {} of {} credentials.'.format( i, cred_count)) processing_time = time.perf_counter() - start_time print('Processing: ' + str(processing_time)) processed_count = 0 credential = { 'RECORD_ID': row[0], 'SYSTEM_TYP_CD': row[1], 'PREV_EVENT': row[2], 'LAST_EVENT': row[3], 'CORP_NUM': row[4], 'CORP_STATE': row[5], 'CREDENTIAL_TYPE_CD': row[6], 'CREDENTIAL_ID': row[7], 'CREDENTIAL_JSON': row[8], 'CREDENTIAL_REASON': row[9], 'SCHEMA_NAME': row[10], 'SCHEMA_VERSION': row[11], 'ENTRY_DATE': row[12] } if max_rec_id < row[0]: max_rec_id = row[0] # make sure to include all credentials for the same client id within the same batch # but also - limit batch size to avoid timeouts if (CREDS_REQUEST_SIZE <= len(credentials) and credential['CORP_NUM'] != cred_owner_id) or ( len(credentials) >= 2 * CREDS_REQUEST_SIZE): post_creds = credentials.copy() creds_task = loop.create_task( post_credentials(self.conn, post_creds)) tasks.append(creds_task) #await asyncio.sleep(1) if single_thread: # running single threaded - wait for each task to complete await creds_task else: # If we start getting too many errors, drop down our concurrent request count. # This will introduce a delay in posting any new credentials, because we need # to wait for many of the active threads to complete. if current_max_creds_requests > 1 and progressive_error_factor > PROGRESSIVE_FAIL_THRESHOLD: current_max_creds_requests = int( 0.5 + current_max_creds_requests * FAILURE_REDUCTION_FACTOR / 100) progressive_error_count = 0 progressive_success_count = 0 progressive_error_factor = 0 success_seq = 0 print( ">>> Too many errors, reducing concurrent threads to:", current_max_creds_requests) elif progressive_error_factor == 0: # if we have a long run of successful posts then bump our throughput back up success_seq = success_seq + 1 if success_seq > SUCCESS_INCREASE_FACTOR and current_max_creds_requests < MAX_CREDS_REQUESTS: current_max_creds_requests = min( MAX_CREDS_REQUESTS, int(0.5 + 1.6 * current_max_creds_requests)) success_seq = 0 print( ">>> Stable success, increasing concurrent threads to:", current_max_creds_requests) else: success_seq = 0 # multi-threaded, check if we are within MAX_CREDS_REQUESTS active requests active_tasks = len( [task for task in tasks if not task.done()]) while active_tasks >= current_max_creds_requests: # done is cumulative, includes the full set of "done" tasks done, pending = await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED) active_tasks = len(pending) # reset counters since we are counting *all* done tasks prev_failed_count = failed_count failed_count = 0 prev_success_count = success_count success_count = 0 prev_progressive_error_factor = progressive_error_factor for finished in done: done_result = finished.result() failed_count = failed_count + done_result[ 'failed'] success_count = success_count + done_result[ 'success'] progressive_error_count = failed_count - prev_failed_count progressive_success_count = success_count - prev_success_count progressive_error_factor = max( 0, prev_progressive_error_factor + (progressive_error_count * PROGRESSIVE_FAIL_FACTOR) - progressive_success_count) credentials = [] cred_owner_id = '' credentials.append(credential) cred_owner_id = credential['CORP_NUM'] row = cur.fetchone() cur.close() cur = None if 0 < len(credentials): post_creds = credentials.copy() tasks.append( loop.create_task( post_credentials(self.conn, post_creds))) credentials = [] cred_owner_id = '' # wait for the current batch of credential posts to complete print('>>> Processing {} of {} credentials.'.format( i, cred_count)) processing_time = time.perf_counter() - start_time print('*** Processing: ' + str(processing_time)) if perf_proc_count > 2 * (CREDS_REQUEST_SIZE * MAX_CREDS_REQUESTS): cpm = 60 * (perf_proc_count - (0.5 * CREDS_REQUEST_SIZE * MAX_CREDS_REQUESTS)) / processing_time print(cpm, "credentials per minute") # ensure controller is (still) available if 0 < failed_count and not await check_controller_health( wait=True): raise Excecption( "Error Issuer Controller is not available") cur = self.conn.cursor() cur.execute(sql1a, ( max_rec_id, cutoff_time_str, )) row = cur.fetchone() if row is not None: cred_count_remaining = row[0] cur.close() cur = None # wait for the current batch of credential posts to complete print(">>> Waiting for all outstanding tasks to complete ...") for response in await asyncio.gather(*tasks): pass tasks = [] print('>>> Completed.') processing_time = time.perf_counter() - start_time print('Processing: ' + str(processing_time)) print(60 * perf_proc_count / processing_time, "credentials per minute") except (Exception, psycopg2.DatabaseError) as error: print(error) print(traceback.format_exc()) log_error( 'An exception was encountered while processing the credential queue:\n{}' .format(str(error))) finally: # Gather all remaining tasks that were spawned during processing ... remaining_tasks = asyncio.Task.all_tasks() for task in external_tasks: # Remove any that were not created during processing ... remaining_tasks.discard(task) if len(remaining_tasks) > 0: await asyncio.gather(*remaining_tasks) if cur is not None: cur.close()
async def process_credential_queue(http_client): sql1 = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT_ID, LAST_EVENT_ID, CORP_NUM, CORP_STATE, CREDENTIAL_TYPE_CD, CREDENTIAL_ID, CREDENTIAL_JSON, SCHEMA_NAME, SCHEMA_VERSION, ENTRY_DATE FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null ORDER BY RECORD_ID LIMIT """ + str(CREDS_BATCH_SIZE) sql1a = """SELECT count(*) cnt FROM CREDENTIAL_LOG WHERE PROCESS_DATE is null""" sql1_active = """SELECT RECORD_ID, SYSTEM_TYPE_CD, PREV_EVENT_ID, LAST_EVENT_ID, CORP_NUM, CORP_STATE, CREDENTIAL_TYPE_CD, CREDENTIAL_ID, CREDENTIAL_JSON, SCHEMA_NAME, SCHEMA_VERSION, ENTRY_DATE FROM CREDENTIAL_LOG WHERE corp_state = 'ACT' and PROCESS_DATE is null ORDER BY RECORD_ID LIMIT """ + str(CREDS_BATCH_SIZE) sql1a_active = """SELECT count(*) cnt FROM CREDENTIAL_LOG WHERE corp_state = 'ACT' and PROCESS_DATE is null""" sql2 = """UPDATE CREDENTIAL_LOG SET PROCESS_DATE = %s, PROCESS_SUCCESS = 'Y' WHERE RECORD_ID = %s""" sql3 = """UPDATE CREDENTIAL_LOG SET PROCESS_DATE = %s, PROCESS_SUCCESS = 'N', PROCESS_MSG = %s WHERE RECORD_ID = %s""" """ Connect to the PostgreSQL database server """ conn = None cur = None cur2 = None try: params = config(section='event_processor') conn = psycopg2.connect(**params) # create a cursor cred_count = 0 cur = conn.cursor() cur.execute(sql1a) row = cur.fetchone() if row is not None: cred_count = row[0] cur.close() cur = None i = 0 cred_count_remaining = cred_count while 0 < cred_count_remaining: active_cred_count = 0 cur = conn.cursor() cur.execute(sql1a_active) row = cur.fetchone() if row is not None: active_cred_count = row[0] cur.close() cur = None # create a cursor cur = conn.cursor() if 0 < active_cred_count: cur.execute(sql1_active) else: cur.execute(sql1) row = cur.fetchone() while row is not None: i = i + 1 print('>>> Processing {} of {} credentials.'.format(i, cred_count)) credential = {'RECORD_ID':row[0], 'SYSTEM_TYP_CD':row[1], 'PREV_EVENT_ID':row[2], 'LAST_EVENT_ID':row[3], 'CORP_NUM':row[4], 'CORP_STATE':row[5], 'CREDENTIAL_TYPE_CD':row[6], 'CREDENTIAL_ID':row[7], 'CREDENTIAL_JSON':row[8], 'SCHEMA_NAME':row[9], 'SCHEMA_VERSION':row[10], 'ENTRY_DATE':row[11]} # post credential try: result_json = await submit_cred(http_client, credential['CREDENTIAL_JSON'], credential['SCHEMA_NAME'], credential['SCHEMA_VERSION']) result = result_json # json.loads(result_json)[0] if result['success']: print("log success to database") cur2 = conn.cursor() cur2.execute(sql2, (datetime.datetime.now(), credential['RECORD_ID'],)) conn.commit() cur2.close() cur2 = None else: print("log error to database") cur2 = conn.cursor() if 255 < len(result['result']): res = result['result'][:250] + '...' else: res = result['result'] cur2.execute(sql3, (datetime.datetime.now(), res, credential['RECORD_ID'],)) conn.commit() cur2.close() cur2 = None except (Exception) as error: print("log exception to database") cur2 = conn.cursor() cur2.execute(sql3, (datetime.datetime.now(), str(error), credential['RECORD_ID'],)) conn.commit() cur2.close() cur2 = None row = cur.fetchone() cur.close() cur = None cur = conn.cursor() cur.execute(sql1a) row = cur.fetchone() if row is not None: cred_count_remaining = row[0] cur.close() cur = None except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if cur is not None: cur.close() if cur2 is not None: cur2.close() if conn is not None: conn.commit() conn.close()
p_corp_num = 'BC' + corp_num elif corp_typ_cd == 'BEN': p_corp_num = 'BC' + corp_num return p_corp_num def bare_corp_num(corp_num): if corp_num.startswith("BC"): return corp_num[2:] else: return corp_num conn = None try: params = config(section='org_book') conn = psycopg2.connect(**params) except (Exception) as error: print(error) raise # get all the corps from orgbook print("Get corp stats from OrgBook DB", datetime.datetime.now()) sql4 = """select topic.source_id, attribute.value from topic left join credential on credential.topic_id = topic.id and credential.latest = true and credential_type_id = 1 left join attribute on attribute.credential_id = credential.id and attribute.type = 'entity_type'""" corp_types = {} try: cur = conn.cursor() cur.execute(sql4) for row in cur: