def migrate_users(): m_user_list = MUserSet.iterator() d_user_list = [] for m_user in m_user_list: with error_handler: # Get information about the Participant's Study m_study_id = m_user['study_id'] try: d_study_info = study_id_dict[m_study_id] except KeyError: print('Study {} is referenced by a User but does not exist.'.format(m_study_id)) continue # Django convention is to use the empty string rather than None in CharFields device_id = m_user['device_id'] or '' os_type = m_user['os_type'] or '' # Build a new Django Participant d_user = DUser( patient_id=m_user['_id'], device_id=device_id, os_type=os_type, study_id=d_study_info['pk'], password=m_user['password'], salt=m_user['salt'], deleted=d_study_info['deleted'], ) # Validate the Participant and add it to the bulk_create list d_user.full_clean() d_user_list.append(d_user) # Bulk_create the Participants DUser.objects.bulk_create(d_user_list) for m_user in MUserSet.iterator(): with error_handler: m_user_id = m_user['_id'] try: d_user_id = DUser.objects.filter(patient_id=m_user['_id']).values('pk').get() except DUser.DoesNotExist: msg = 'User {} was not created.'.format(m_user_id) print(msg) # raise ObjectCreationException(msg) user_id_dict[m_user_id] = d_user_id
def completely_purge_study(study_id, actually_delete=False): if not isinstance(study_id, ObjectId): study_id = ObjectId(study_id) study = Study(study_id) surveys = study["surveys"] device_settings = study["device_settings"] users = Users(study_id=study_id) chunks = ChunksRegistry(study_id=study_id) files_to_process = FilesToProcess(study_id=study_id) if not actually_delete: print "if you actually delete this you will not be able to decrypt anything " \ "from this study. Don't do it unless you know what you are doing." print study.name # print len(study) # print len(device_settings) print len(surveys) print len(users) print len(chunks) print len(files_to_process) else: StudyDeviceSettings(device_settings).remove() [Survey(s).remove() for s in surveys] [User(u).remove() for u in users] [ChunkRegistry(c).remove() for c in chunks] [FileToProcess(f).remove() for f in files_to_process] study.remove()
def get_users_in_study(): try: study_id = ObjectId(request.values["study_id"]) except InvalidId: study_id = None study_obj = Study(study_id) if not study_obj: return abort(404) _ = get_and_validate_admin(study_obj) return json.dumps([str(user._id) for user in Users(study_id=study_id)])
def view_study(study_id=None): study = Study(study_id) tracking_survey_ids = study.get_survey_ids_for_study('tracking_survey') audio_survey_ids = study.get_survey_ids_for_study('audio_survey') return render_template('view_study.html', study=study, patients=Users( study_id = study_id ), audio_survey_ids=audio_survey_ids, tracking_survey_ids=tracking_survey_ids, study_name=study.name, allowed_studies=get_admins_allowed_studies(), system_admin=admin_is_system_admin())
def data_api_web_form_page(): # TODO: Josh, provide access to this route via a link in the top navbar admin = Admin(session['admin_username']) warn_admin_if_hasnt_yet_generated_access_key(admin) allowed_studies = get_admins_allowed_studies() # dict of {study ids : list of user ids} users_by_study = { str(study["_id"]): [user["_id"] for user in Users(study_id=study['_id'])] for study in allowed_studies } return render_template("data_api_web_form.html", allowed_studies=allowed_studies, users_by_study=users_by_study, ALL_DATA_STREAMS=ALL_DATA_STREAMS, system_admin=admin_is_system_admin())
def get_all_timings_files( ): # get users associated with studies study_users = { str( s._id ):Users( study_id=s._id, field='_id' ) for s in Studies( ) } all_user_timings = [] for sid, users in study_users.items( ): # construct prefixes all_user_timings.extend( [sid + "/" + u + "/" + "surveyTimings" for u in users] ) # use a threadpool to efficiently get all those strings of s3 paths we # will need pool = ThreadPool( len( all_user_timings ) ) try: files_lists = pool.map( s3_list_files, all_user_timings ) except Exception: raise finally: pool.close( ) pool.terminate( ) files_list = [] for l in files_lists: files_list.extend( l ) # we need to purge the occasional pre-multistudy file, and ensure it is utf encoded. return [f.decode( "utf8" ) for f in files_list if f.count( '/' ) == 4]
print "migrate_upload_trackers..." migrate_upload_trackers() if __name__ == '__main__': study_referents = {} study_id_dict = {} user_id_dict = {} survey_id_dict = {} orphaned_surveys = {} d_study_admin_list = [] # A list of study-researcher pairs d_study_survey_dict = {} # A mapping of surveys to their associated studies d_study_settings_dict = {} # A mapping of device settings to their associated studies CHUNK_SIZE = 10000 # error_handler = ErrorHandler() error_handler = null_error_handler() print(MStudySet.count(), MSurveySet.count(), MSettingsSet.count(), MAdminSet.count(), MUserSet.count(), MChunkSet.count(), MUploadSet.count()) with error_handler: run_all_migrations() print(DStudy.objects.count(), DSurvey.objects.count(), DSettings.objects.count(), DAdmin.objects.count(), DUser.objects.count(), DChunk.objects.count(), DUpload.objects.count()) print("end:", datetime.now()) error_handler.raise_errors()