def cycle_through_syncs(): my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('INFO: cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config = readDictFile('oli.config') # set from this config the survey id, sid, because it used everywhere sid = int(config['sid']) # create a connection to the postgresql database conn = ConnToOliDB() my_report.append_to_report(conn.init_result) # initialize the oc-webservice myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) tokens = {} tokens_list = read_ls_tokens(config, 0, 10) for token in tokens_list: tokens[token['token']] = token['participant_info']['firstname'] tokens_list = read_ls_tokens(config, 10, 10) for token in tokens_list: tokens[token['token']] = token['participant_info']['firstname'] print(tokens) # close the file so we can send it my_report.close_file() MailThisLogFile('logs/report.txt')
def cycle_through_syncs(): my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('INFO: cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config=readDictFile('oli.config') # set from this config the sid, because it used everywhere sid = int(config['sid']) # create a connection to the postgresql database conn = ConnToOliDB() my_report.append_to_report(conn.init_result) # initialize the oc-webservice myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) #start the cycling here while True: # get the responses as a list responses_list = read_ls_responses(config) # process the responses one by one for one_response in responses_list: #print(one_response) # get the response_id, for administrative purposes response_id = one_response['id'] # check if this combination sid-response-id already exists and if not, add it conn.TryToAddSubjectToDB(sid, response_id) # now see if we can do something with the data: start with the child code # reset study_subject_id and study_subject_oid study_subject_id = None study_subject_oid = None if (one_response['ChildCode'] is None): # write this to error report my_report.append_to_report('ERROR: Missing ChildCode for resp.id. %i' % response_id ) else: # add leading zero's and the study prefix study_subject_id = config['childcode_prefix'] + ('0000' + str(int(float(one_response['ChildCode']))))[-8:] if (len(study_subject_id) != 13): # write this to error report my_report.append_to_report('ERROR: Incorrect ChildCode for resp.id. %i: %i' % (response_id, int(float(one_response['ChildCode'])))) else: # write the child-code / study subject id to the database if (conn.DLookup('study_subject_id', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) is None): conn.WriteStudySubjectID(sid, response_id, study_subject_id) # check if we already have a valid study subject oid study_subject_oid = conn.DLookup('study_subject_oid', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) if (study_subject_oid is None or study_subject_oid =='None'): # try to get a valid study subject oid study_subject_oid = PGSubject(study_subject_id).GetSSOID() # we don't know if we now have study_subject_oid, # but the procedure only writes the study subject oid to the database for later use # if it is not null conn.WriteStudySubjectOID(sid, response_id, study_subject_oid) # only continue if we have both study subject id and study subject oid if (study_subject_oid is None): # write this to error report my_report.append_to_report('ERROR: missing OID for resp.id. %i : ChildCode %s' % (response_id, study_subject_id)) else: # only compose the odm and try to import the result # if this wasn't done before, so look at date_completed if(conn.DLookup('date_completed', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) is None): #print(one_response) print('resp.id. %i' % response_id) # we try to compose the request, but if we can't convert an item to the correct data type # then we put that in the report ws_request = compose_odm(study_subject_oid, one_response) if (ws_request.find('CONVERSION-ERROR') != -1): #print(ws_request) item_starts_at = ws_request.find('CONVERSION-ERROR') my_report.append_to_report('ERROR: conversion for resp.id. %i %s failed with message "%s" and more' % (response_id, study_subject_id, ws_request[item_starts_at:item_starts_at + 100])) else: #print(ws_request) conn.WriteDataWSRequest(sid, response_id, ws_request) import_result = myDataWS.importData(ws_request) #print(import_result) import_result = import_result.replace("'", "") conn.WriteDataWSResponse(sid, response_id, import_result) if (import_result.find('Success') == 0): my_report.append_to_report('INFO: Successfully imported data for %s (%s)' % (study_subject_id, study_subject_oid)) conn.SetResponseComplete(sid, response_id) else: item_starts_at = import_result.find('I_') my_report.append_to_report('ERROR: import for resp.id %i %s failed with message "%s" and more' % (response_id, study_subject_id, import_result[item_starts_at:])) # move on with the next response # check if we must continue looping, or break the loop # first sleep a bit, so we do not eat up all CPU time.sleep(int(config['sleep_this_long'])) current_time = datetime.datetime.now() difference = current_time - start_time loop_this_long = config['loop_this_long'] max_diff_list = loop_this_long.split(sep=':') max_difference = datetime.timedelta(hours=int(max_diff_list[0]), minutes=int(max_diff_list[1]), seconds=int(max_diff_list[2])) if difference > max_difference: break my_report.append_to_report('INFO: finished looping from %s till %s.' % (start_time, current_time)) # close the file so we can send it my_report.close_file() MailThisLogFile('logs/report.txt')
def cycle_through_syncs(): my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config = readDictFile('oli.config') # read with combinations of StudyEventOIDs and LimeSurvey sids event_survey_pairs = readDictFile('event_survey_pairs') # initialise the oc-webservice myWebService = studySubjectWS(config['userName'], config['password'], config['baseUrl']) myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) # create a connection to the postgresql database conn = ConnToOliDB() my_report.append_to_report(conn.init_result) while True: # retrieve all StudySubjectEvents, using the webservice allStudySubjectEvents = myWebService.getListStudySubjectEvents( config['studyIdentifier']) # now we have the StudySubjectIDs, run them against the postgresql table subjects # retrieve the subjects, using the connection to the postgresql database subjects_in_db = conn.ReadSubjectsFromDB() for studysubject_event in allStudySubjectEvents: # check if StudySubjectID is already in pg_database add_subject_to_db = True for subject_in_db in subjects_in_db: # check if we must check this event if (studysubject_event[0] == subject_in_db[1]): add_subject_to_db = False if (add_subject_to_db): myPgSubject = PGSubject(studysubject_event[0]) conn.AddSubjectsToDB([(myPgSubject.GetSSOID(), studysubject_event[0])]) my_report.append_to_report('added %s to database' % studysubject_event[0]) # now all StudySubjects in OpenClinica are also in our postgresql-database # so we refresh our list subjects_in_db = conn.ReadSubjectsFromDB() # collecting LimeSurvey data # Make a session, which is a bit of overhaed, but the script will be running for hours. api = LimeSurveyRemoteControl2API(config['lsUrl']) session_req = api.sessions.get_session_key(config['lsUser'], config['lsPassword']) session_key = session_req.get('result') # initialise a new list for all tokens of all surveys # so we can check if a new token must be created all_tokens = [] for event_oid, sid in event_survey_pairs.items(): participants_req = api.tokens.list_participants(session_key, sid) participants = participants_req.get('result') for participant in participants: #loop through the participants, but only if there are any if participant != 'status': p_info = participant.get('participant_info') all_tokens.append((p_info.get('firstname'), event_oid, sid, participant.get('token'), participant.get('completed'))) for studysubject_event in allStudySubjectEvents: # check if we must check this event if studysubject_event[1] in event_survey_pairs: # yes, we must check this event blnAddTokens = True for one_token in all_tokens: if one_token[0] == studysubject_event[0] and one_token[ 1] == studysubject_event[1]: # a token exists blnAddTokens = False if blnAddTokens: #self._logger.debug("add token for " + studysubject_event[0] + ", " + studysubject_event[1]) print("add token for " + studysubject_event[0] + " " + str(event_survey_pairs[studysubject_event[1]]) + ", " + studysubject_event[1]) participant_data = {'firstname': studysubject_event[0]} #add_participant_req = api.tokens.add_participants( session_key, event_survey_pairs[studysubject_event[1]], participant_data) my_report.append_to_report( 'created token for survey %s for subject %s' % (sid, studysubject_event[0])) # we may have added tokens, so refresh all_tokens # TODO: lets's make this a method all_tokens = [] for event_oid, sid in event_survey_pairs.items(): participants_req = api.tokens.list_participants(session_key, sid) participants = participants_req.get('result') for participant in participants: #loop through the participants, but only if there are any if participant != 'status': p_info = participant.get('participant_info') all_tokens.append((p_info.get('firstname'), event_oid, sid, participant.get('token'), participant.get('completed'))) # now import the LimeSurvey results into OpenClinica # sorted by study subject id sorted_tokens = sorted(all_tokens, key=itemgetter(0)) last_ssid = 'x' lime_survey_header = 'ev. token completed --------------------------- ' lime_survey_data_to_import = lime_survey_header for token in sorted_tokens: survey_friendly_name = conn.DLookup("friendly_name", "ls_sids", "ls_sid=%d" % (int(token[2]))) if last_ssid != token[0]: # new study subject ID, so write the previous one ssoid = conn.DLookup("study_subject_oid", "subjects", "study_subject_id='%s'" % (last_ssid)) # skip the start-value if last_ssid != 'x': ls_data_in_db = conn.DLookup( "ls_data", "subjects", "study_subject_oid='%s'" % (ssoid)) if lime_survey_data_to_import != ls_data_in_db: myImport = myDataWS.importLSData( ssoid, lime_survey_data_to_import) conn.WriteLSDataToDB(ssoid, lime_survey_data_to_import, myImport) my_report.append_to_report( 'wrote ls_data for subject %s 1' % (ssoid)) # reset the variables last_ssid = token[0] lime_survey_data_to_import = lime_survey_header + survey_friendly_name + ' ' + token[ 3] + ' ' + token[4] + ' ' else: lime_survey_data_to_import = lime_survey_data_to_import + survey_friendly_name + ' ' + token[ 3] + ' ' + token[4] + ' ' # print the last one ssoid = conn.DLookup("study_subject_oid", "subjects", "study_subject_id='%s'" % (last_ssid)) ls_data_in_db = conn.DLookup("ls_data", "subjects", "study_subject_oid='%s'" % (ssoid)) if lime_survey_data_to_import != ls_data_in_db: myImport = myDataWS.importLSData(ssoid, lime_survey_data_to_import) conn.WriteLSDataToDB(ssoid, lime_survey_data_to_import, myImport) my_report.append_to_report('wrote ls_data for subject %s 2' % (ssoid)) # some book keeping to check if we must continue looping, or break the loop # first sleep a bit, so we do not eat up all CPU time.sleep(int(config['sleep_this_long'])) current_time = datetime.datetime.now() difference = current_time - start_time loop_this_long = config['loop_this_long'] max_diff_list = loop_this_long.split(sep=':') max_difference = datetime.timedelta(hours=int(max_diff_list[0]), minutes=int(max_diff_list[1]), seconds=int(max_diff_list[2])) if difference > max_difference: break my_report.append_to_report('finished looping from %s till %s.' % (start_time, current_time)) # close the file so we can send it my_report.close_file() MailThisLogFile('logs/report.txt')
def cycle_through_syncs(): # we start by reading the config file and preparing the connections to the databases my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config = readDictFile('odkoc.config') # initialise the oc-webservices myWebService = studySubjectWS(config['userName'], config['password'], config['baseUrl']) myEventWS = studyEventWS(config['userName'], config['password'], config['baseUrl']) myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) # create connections to the postgresql databases conn_util = ConnToOdkUtilDB() my_report.append_to_report('try to connect to util database, result: %s ' % conn_util.init_result) conn_odk = ConnToOdkDB() my_report.append_to_report('try to connect to odk database, result: %s ' % conn_odk.init_result) # our cycle starts here and ends at the break while True: ''' start with form READER ''' # 1: start with retrieving the rows of odk-table HS_RDT_READER_1_V1_CORE odk_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"HS_RDT_READER_1_V1_CORE\"") # for the study subject id look in: # odk_result['GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # 2: create subject in oc, if necessary # retrieve all StudySubjectEvents from oc, using the webservice allStudySubjectsInOC = myWebService.getListStudySubjects( config['studyIdentifier']) for odk_result in odk_results: # check if StudySubjectID from odk is already in oc add_subject_to_db = True study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # compare with all oc subjects events for studysubjectid_oc in allStudySubjectsInOC: if (studysubjectid_oc == study_subject_id): add_subject_to_db = False if (add_subject_to_db): # add study_subject_id to the oc add_results = myWebService.addStudySubject( config['studyIdentifier'], config['siteIdentifier'], study_subject_id) #print(add_results) # TODO: add error-handling for fail of creating subject # and schedule the event study_subject_oid = myEventWS.scheduleEvent( config['studyIdentifier'], study_subject_id, config['studyEventOID'], 'def', '1980-01-01') #TODO: add errorhandling for fail of scheduling event # now add the combination id oid to the util database # only add the pair if the oid starts with SS_ if (study_subject_oid.find('SS_') == 0): conn_util.AddSubjectToDB(study_subject_oid, study_subject_id) # extra check: maybe we somehow missed the study subject oid and then there will be no record in table study_subject_oc if (conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) == ''): new_subject = PGSubject(study_subject_id) conn_util.AddSubjectToDB(new_subject.GetSSOID(), study_subject_id) # only import the data if this hasn't been done before if (not conn_util.UriComplete(odk_result['_URI'])): # now we should have the study subject id plus oid, so we can compose the odm for import study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] study_subject_oid = conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) complete_odm = compose_reader(study_subject_oid, odk_result) import_results = myDataWS.importData(complete_odm) # if our import was successful, then the result should start with Success # and if so, we can mark this uri as complete if (import_results.find('Success') == 0): conn_util.MarkUriComplete(odk_result['_URI'], 'reader') my_report.append_to_report('reader ' + study_subject_id + ': ' + import_results) ''' go on with with form SCREENING ''' odk_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"SCREEN19M__V3_CORE\"", 'not \"INFORMED_CONSENT_STUDY_SUBJECT_ID\" is null') # for the study subject id look in: # odk_result['INFORMED_CONSENT_STUDY_SUBJECT_ID'] # 2: create subject in oc, if necessary # retrieve all StudySubjectEvents from oc, using the webservice allStudySubjectsInOC = myWebService.getListStudySubjects( config['studyIdentifier']) for odk_result in odk_results: # check if StudySubjectID from odk is already in oc add_subject_to_db = True study_subject_id = odk_result['INFORMED_CONSENT_STUDY_SUBJECT_ID'] print(study_subject_id) # compare with all oc subjects events for studysubjectid_oc in allStudySubjectsInOC: if (studysubjectid_oc == study_subject_id): add_subject_to_db = False if (add_subject_to_db): # add study_subject_id to the oc add_results = myWebService.addStudySubject( config['studyIdentifier'], config['siteIdentifier'], study_subject_id) # TODO: add error-handling for fail of creating subject # and schedule the event study_subject_oid = myEventWS.scheduleEvent( config['studyIdentifier'], study_subject_id, config['studyEventOID'], 'def', '1980-01-01') #TODO: add errorhandling for fail of scheduling event # now add the combination id oid to the util database # only add the pair if the oid starts with SS_ if (study_subject_oid.find('SS_') == 0): conn_util.AddSubjectToDB(study_subject_oid, study_subject_id) # extra check: maybe we somehow missed the study subject oid and then there will be no record in table study_subject_oc if (conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) == ''): new_subject = PGSubject(study_subject_id) conn_util.AddSubjectToDB(new_subject.GetSSOID(), study_subject_id) print('we have study subject id %s and oid %s' % (study_subject_id, conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)))) print(odk_result['_URI'], conn_util.UriComplete(odk_result['_URI'])) # only import the data if this hasn't been done before if (not conn_util.UriComplete(odk_result['_URI'])): print('dive into it') # now we should have the study subject id plus oid, so we can compose the odm for import study_subject_id = odk_result[ 'INFORMED_CONSENT_STUDY_SUBJECT_ID'] study_subject_oid = conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) complete_odm = compose_screening(study_subject_oid, odk_result) print(complete_odm) # we'll make an exception for I_MA006_OTHER_DISEASE_HX, because this is a group of check-boxes # in complete_odm we have a placeholder {OTHER_DISEASE_HX} parent_uri = odk_result['_URI'] hx_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"SCREEN19M__V3_MED_HISTO_CONCO_MED_OTHER_DISEASE_HX\"", '\"_PARENT_AURI\"=\'%s\'' % (parent_uri)) other_disease_hx = '' for hx in hx_results: other_disease_hx = other_disease_hx + hx['VALUE'] + ',' if (other_disease_hx != ''): # take off the last comma other_disease_hx = other_disease_hx[:-1] # finally we can replace the placeholder with the actual values complete_odm = complete_odm.replace('{OTHER_DISEASE_HX}', other_disease_hx) # import the odm data import_results = myDataWS.importData(complete_odm) if (import_results.find('Success') != 0): # if something went wrong, print it print(import_results) import_screening_core_success = False else: # if our import was successful, then make a note of it import_screening_core_success = True my_report.append_to_report('screening ' + study_subject_id + ': ' + import_results) ''' now we can look at the repeating item group for miscarriages ''' odk_misca_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"SCREEN19M__V3_OBSETRIC_INFO_EXAM_BABY_DIED\"", '\"_PARENT_AURI\"=\'%s\'' % (parent_uri)) for misca in odk_misca_results: # print('misca ' + parent_uri + ' ' + misca['_URI']) complete_odm = compose_misca(study_subject_oid, misca) import_results = myDataWS.importData(complete_odm) if (import_results.find('Success') != 0): # if something went wrong, print it print(import_results) import_screening_misca_success = False else: # if our import was successful, then make a note of it import_screening_misca_success = True my_report.append_to_report('misca ' + study_subject_id + ': ' + import_results) # now do the bookkeeping if (import_screening_core_success and import_screening_misca_success): conn_util.MarkUriComplete(odk_result['_URI'], 'screening') ''' go on with with form LAMP ''' odk_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"LAMP_TESTING_V1_CORE\"", 'not \"GENERAL_INFORMATION_STUDY_SUBJECT_ID\" is null') # for the study subject id look in: # odk_result['GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # 2: create subject in oc, if necessary # retrieve all StudySubjectEvents from oc, using the webservice allStudySubjectsInOC = myWebService.getListStudySubjects( config['studyIdentifier']) for odk_result in odk_results: # check if StudySubjectID from odk is already in oc add_subject_to_db = True study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # compare with all oc subjects events for studysubjectid_oc in allStudySubjectsInOC: if (studysubjectid_oc == study_subject_id): add_subject_to_db = False if (add_subject_to_db): # add study_subject_id to the oc add_results = myWebService.addStudySubject( config['studyIdentifier'], config['siteIdentifier'], study_subject_id) # TODO: add error-handling for fail of creating subject # and schedule the event study_subject_oid = myEventWS.scheduleEvent( config['studyIdentifier'], study_subject_id, config['studyEventOID'], 'def', '1980-01-01') #TODO: add errorhandling for fail of scheduling event # now add the combination id oid to the util database # only add the pair if the oid starts with SS_ if (study_subject_oid.find('SS_') == 0): conn_util.AddSubjectToDB(study_subject_oid, study_subject_id) # extra check: maybe we somehow missed the study subject oid and then there will be no record in table study_subject_oc if (conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) == ''): new_subject = PGSubject(study_subject_id) conn_util.AddSubjectToDB(new_subject.GetSSOID(), study_subject_id) # only import the data if this hasn't been done before if (not conn_util.UriComplete(odk_result['_URI'])): # now we should have the study subject id plus oid, so we can compose the odm for import study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] study_subject_oid = conn_util.DLookup( 'study_subject_oid', 'odkoc.study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) complete_odm = compose_lamp(study_subject_oid, odk_result) import_results = myDataWS.importData(complete_odm) if (import_results.find('Success') != 0): # if something went wrong, print it print(import_results) import_lamp_success = False else: # if our import was successful, then make a note of it import_lamp_success = True my_report.append_to_report('lamp ' + study_subject_id + ': ' + import_results) if (import_lamp_success): conn_util.MarkUriComplete(odk_result['_URI'], 'lamp') # some book keeping to check if we must continue looping, or break the loop # first sleep a bit, so we do not eat up all CPU time.sleep(int(config['sleep_this_long'])) current_time = datetime.datetime.now() difference = current_time - start_time loop_this_long = config['loop_this_long'] max_diff_list = loop_this_long.split(sep=':') max_difference = datetime.timedelta(hours=int(max_diff_list[0]), minutes=int(max_diff_list[1]), seconds=int(max_diff_list[2])) if difference > max_difference: break my_report.append_to_report('finished looping from %s till %s.' % (start_time, current_time)) # close the file so we can send it my_report.close_file()
def cycle_through_syncs(): my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('INFO: cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config = readDictFile('oli.config') # set from this config the survey id, sid, because it used everywhere sid = int(config['sid']) # create a connection to the postgresql database conn = ConnToOliDB() my_report.append_to_report(conn.init_result) # initialize the oc-webservice myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) #start the cycling here while True: # get the responses as a list tokens_list = read_ls_tokens(config) # now we want to transfer the token - study subject id into a dict tokens = {} for token in tokens_list: tokens[token['token']] = token['participant_info']['firstname'] #print (tokens) # now we have the dict, we can reset the list tokens_list = [] #now get the responses responses_list = read_ls_responses(config) # process the responses one by one for one_response in responses_list: for response_data in one_response: # make a dict of one response one_response_data = one_response[response_data] #check if the survey was started by looking at the last page if (one_response_data['lastpage'] != 0): # get the response_id, for administrative purposes response_id = one_response_data['id'] #print(sid, one_response_data) # check if this combination sid-response-id already exists and if not, add it conn.TryToAddSubjectToDB(sid, response_id) # now see if we can do something with the data: start with the child code # reset study_subject_id and study_subject_oid study_subject_id = '' study_subject_oid = None if (one_response_data['token'] is None): # write this to error report my_report.append_to_report( 'ERROR: Missing token for response id %i' % response_id) else: # find the study subject id in the dict tokens this_token = one_response_data['token'] # check if we have this token in our array tokens if this_token in tokens: study_subject_id = tokens[this_token] else: my_report.append_to_report( 'ERROR: No study subject id for token %s' % this_token) #print(study_subject_id) if (len(study_subject_id) != 13): # write this to error report my_report.append_to_report( 'ERROR: Incorrect study subject id for response id %i: %s' % (response_id, study_subject_id)) else: # write the child-code / study subject id to the database if (conn.DLookup( 'study_subject_id', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) is None): conn.WriteStudySubjectID( sid, response_id, study_subject_id) # check if we already have a valid study subject oid study_subject_oid = conn.DLookup( 'study_subject_oid', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) if (study_subject_oid is None or study_subject_oid == ''): # try to get a valid study subject oid study_subject_oid = PGSubject( study_subject_id).GetSSOID() # we don't know if we now have study_subject_oid, # but the procedure only writes the study subject oid to the database for later use # if it is not null conn.WriteStudySubjectOID( sid, response_id, study_subject_oid) # only continue if we have both study subject id and study subject oid if (study_subject_oid is None): # write this to error report my_report.append_to_report( 'ERROR: Missing OID for ChildCode %s' % study_subject_id) else: # only compose the odm and try to import the result # if this wasn't done before, so look at date_completed if (conn.DLookup( 'date_completed', 'ls_responses', 'sid=%i and response_id=%i' % (sid, response_id)) is None): ws_request = compose_odm( study_subject_oid, one_response_data) conn.WriteDataWSRequest( sid, response_id, ws_request) import_result = myDataWS.importData(ws_request) conn.WriteDataWSResponse( sid, response_id, import_result) if (import_result.find('Success') == 0): my_report.append_to_report( 'INFO: Successfully imported data for %s (%s)' % (study_subject_id, study_subject_oid)) conn.SetResponseComplete(sid, response_id) else: item_starts_at = import_result.find('I_') if (item_starts_at == -1): my_report.append_to_report( 'ERROR: import for %s failed with message "%s"' % (study_subject_id, import_result)) else: my_report.append_to_report( 'ERROR: import for %s failed with message "%s" and more' % (study_subject_id, import_result[item_starts_at:])) # move on with the next response # check if we must continue looping, or break the loop # first sleep a bit, so we do not eat up all CPU time.sleep(int(config['sleep_this_long'])) current_time = datetime.datetime.now() difference = current_time - start_time loop_this_long = config['loop_this_long'] max_diff_list = loop_this_long.split(sep=':') max_difference = datetime.timedelta(hours=int(max_diff_list[0]), minutes=int(max_diff_list[1]), seconds=int(max_diff_list[2])) if difference > max_difference: break my_report.append_to_report('INFO: finished looping from %s till %s.' % (start_time, current_time)) # close the file so we can send it my_report.close_file() MailThisLogFile('logs/report.txt')
def cycle_through_syncs(): # we start by reading the config file and preparing the connections to the databases my_report = Reporter() start_time = datetime.datetime.now() my_report.append_to_report('cycle started at ' + str(start_time)) # read configuration file for usernames and passwords and other parameters config = readDictFile('odkoc.config') # initialise the oc-webservices myWebService = studySubjectWS(config['userName'], config['password'], config['baseUrl']) myEventWS = studyEventWS(config['userName'], config['password'], config['baseUrl']) myDataWS = dataWS(config['userName'], config['password'], config['baseUrl']) # create connections to the postgresql databases conn_util = ConnToOdkUtilDB() my_report.append_to_report('try to connect to util database, result: %s ' % conn_util.init_result) conn_odk = ConnToOdkDB() my_report.append_to_report('try to connect to odk database, result: %s ' % conn_odk.init_result) # our cycle starts here and ends at the break while True: ''' start with form READER ''' # 1: start with retrieving the rows of odk-table HS_RDT_READER_1_V1_CORE odk_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"HS_RDT_READER_1_V1_CORE\"") # for the study subject id look in: # odk_result['GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # 2: create subject in oc, if necessary # retrieve all StudySubjectEvents from oc, using the webservice allStudySubjectsInOC = myWebService.getListStudySubjects( config['studyIdentifier']) for odk_result in odk_results: # check if StudySubjectID from odk is already in oc add_subject_to_db = True study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] # compare with all oc subjects events for studysubjectid_oc in allStudySubjectsInOC: if (studysubjectid_oc == study_subject_id): add_subject_to_db = False if (add_subject_to_db): # add study_subject_id to the oc add_results = myWebService.addStudySubject( config['studyIdentifier'], study_subject_id) # TODO: add error-handling for fail of creating subject # and schedule the event study_subject_oid = myEventWS.scheduleEvent( config['studyIdentifier'], study_subject_id, config['studyEventOID'], 'def', '1980-01-01') #TODO: add errorhandling for fail of scheduling event # now add the combination id oid to the util database # only add the pair if the oid starts with SS_ if (study_subject_oid.find('SS_') == 0): conn_util.AddSubjectToDB(study_subject_oid, study_subject_id) # now we should have the study subject id plus oid, so we can compose the odm for import study_subject_id = odk_result[ 'GENERAL_INFORMATION_STUDY_SUBJECT_ID'] study_subject_oid = conn_util.DLookup( 'study_subject_oid', 'study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) complete_odm = compose_reader(study_subject_oid, odk_result) import_results = myDataWS.importData(complete_odm) my_report.append_to_report(study_subject_id + ': ' + import_results) ''' go on with with form SCREENING ''' odk_results = conn_odk.ReadDataFromOdkTable( "odk_prod.\"SCREEN19M__V3_CORE\"", 'not \"INFORMED_CONSENT_STUDY_SUBJECT_ID\" is null') # for the study subject id look in: # odk_result['INFORMED_CONSENT_STUDY_SUBJECT_ID'] # 2: create subject in oc, if necessary # retrieve all StudySubjectEvents from oc, using the webservice allStudySubjectsInOC = myWebService.getListStudySubjects( config['studyIdentifier']) for odk_result in odk_results: # check if StudySubjectID from odk is already in oc add_subject_to_db = True study_subject_id = odk_result['INFORMED_CONSENT_STUDY_SUBJECT_ID'] # compare with all oc subjects events for studysubjectid_oc in allStudySubjectsInOC: if (studysubjectid_oc == study_subject_id): add_subject_to_db = False if (add_subject_to_db): # add study_subject_id to the oc add_results = myWebService.addStudySubject( config['studyIdentifier'], study_subject_id) # TODO: add error-handling for fail of creating subject # and schedule the event study_subject_oid = myEventWS.scheduleEvent( config['studyIdentifier'], study_subject_id, config['studyEventOID'], 'def', '1980-01-01') #TODO: add errorhandling for fail of scheduling event # now add the combination id oid to the util database # only add the pair if the oid starts with SS_ if (study_subject_oid.find('SS_') == 0): conn_util.AddSubjectToDB(study_subject_oid, study_subject_id) # now we should have the study subject id plus oid, so we can compose the odm for import study_subject_id = odk_result['INFORMED_CONSENT_STUDY_SUBJECT_ID'] study_subject_oid = conn_util.DLookup( 'study_subject_oid', 'study_subject_oc', 'study_subject_id=\'%s\'' % (study_subject_id)) complete_odm = compose_screening(study_subject_oid, odk_result) import_results = myDataWS.importData(complete_odm) my_report.append_to_report(study_subject_id + ': ' + import_results) # some book keeping to check if we must continue looping, or break the loop # first sleep a bit, so we do not eat up all CPU time.sleep(int(config['sleep_this_long'])) current_time = datetime.datetime.now() difference = current_time - start_time loop_this_long = config['loop_this_long'] max_diff_list = loop_this_long.split(sep=':') max_difference = datetime.timedelta(hours=int(max_diff_list[0]), minutes=int(max_diff_list[1]), seconds=int(max_diff_list[2])) if difference > max_difference: break my_report.append_to_report('finished looping from %s till %s.' % (start_time, current_time)) # close the file so we can send it my_report.close_file()