def get_current_membership(user_group_id): user_ids_in_group = pg_utils.execute_query( base_sql=sql_q.SQL_USER_IDS_IN_USER_GROUP, params=[user_group_id], jsonize_sql=False, ) return user_ids_in_group
def test_04_populate_user_group_for_follow_up_task_ok(self): paramedics_user_group_id = "06533020-333c-4046-a1c9-40031e160a23" create_user_group = CreateUserGroup( csvfile_path=QUALTRICS_RESPONSES_CSV) create_user_group.set_or_create_user_group( ug_id=paramedics_user_group_id) create_user_group.populate_user_group() user_ids_in_group = pg_utils.execute_query( sql_q.SQL_USER_IDS_IN_USER_GROUP, [paramedics_user_group_id], jsonize_sql=False, ) self.assertEqual(16, len(create_user_group.user_ids)) self.assertCountEqual(create_user_group.user_ids, user_ids_in_group) # check output message message = create_user_group.output_user_ids_str() expected_message = ( "User_ids for users in user group 06533020-333c-4046-a1c9-40031e160a23:\n\nd1070e81-557e-40eb-a7ba-b951ddb7ebdc;\n" "851f7b34-f76c-49de-a382-7e4089b744e2;\n851f7b34-f76c-49de-a382-7e4089b744e2;\n8518c7ed-1df4-45e9-8dc4-d49b57ae0663;\n" "8518c7ed-1df4-45e9-8dc4-d49b57ae0663;\n35224bd5-f8a8-41f6-8502-f96e12d6ddde;\n8518c7ed-1df4-45e9-8dc4-d49b57ae0663;\n" "35224bd5-f8a8-41f6-8502-f96e12d6ddde;\n1cbe9aad-b29f-46b5-920e-b4c496d42515;\nd1070e81-557e-40eb-a7ba-b951ddb7ebdc;\n" "8518c7ed-1df4-45e9-8dc4-d49b57ae0663;\n35224bd5-f8a8-41f6-8502-f96e12d6ddde;\n8518c7ed-1df4-45e9-8dc4-d49b57ae0663;\n" "1cbe9aad-b29f-46b5-920e-b4c496d42515;\n35224bd5-f8a8-41f6-8502-f96e12d6ddde;\n8518c7ed-1df4-45e9-8dc4-d49b57ae0663" ) self.assertEqual(expected_message, message)
def __init__(self): project_task_id = input("Please enter the id of the project task " "this reminder is about:") self.project_task = pg_utils.execute_query( base_sql=sql_q.TASK_REMINDER_SQL, params=(project_task_id, ))[0] csv_import = input("Would you like to import a csv file containing " "anon_project_specific_user_ids? (y/N)") if csv_import in ["y", "Y"]: importer = CsvImporter( anon_project_specific_user_id_column= "anon_project_specific_user_id", csvfile_path=None, ) anon_project_specific_user_ids = ( importer.output_list_of_anon_project_specific_user_ids()) else: anon_project_specific_user_ids = input( "Please paste a list of anon_project_specific_user_ids separated by commas:" ) anon_ids = anon_project_specific_user_ids.split(",") self.anon_ids = [x.strip() for x in anon_ids] self.users = list() for anon_id in anon_ids: user = u.get_user_by_anon_project_specific_user_id(anon_id)[0] if user: self.users.append(user) else: raise ValueError(f"User {anon_id} could not be found") self.core_api_client = CoreApiClient()
def _get_task_from_user_group_id(self): tasks = pg_utils.execute_query( base_sql=sql_q.TASKS_BY_USER_GROUP_ID_SQL, params=(self.user_group_id,) ) if len(tasks) != 1: raise ValueError( f"User group {self.user_group_id} is associated with {len(tasks)} tasks; expected 1" ) self.project_task = tasks[0]
def main_2(): projects = pg_utils.execute_query(sql_q.LIST_PROJECTS_SQL) with open(output_file_name, "w") as output_file: writer = csv.writer(output_file) writer.writerow(["project_id", "project_short_name", "users"]) for p in projects: project_id = p["id"] p_short_name = p["short_name"] users = get_users_in_project_groups(project_id) writer.writerow([project_id, p_short_name, len(users)])
def __init__(self, project_task_id=None): if project_task_id is None: self.project_task_id = input("Please enter the project task id:") else: self.project_task_id = project_task_id self.check_project_task_exists() self.project_id = pg_utils.execute_query( sq.GET_PROJECT_BY_PROJECT_TASK_ID_SQL, [self.project_task_id], return_json=False, )[0][0] self.users = list()
def get_anon_project_specific_user_ids(self): users_in_task_groups = pg_utils.execute_query( sq.GROUP_USERS_FOR_PROJECT_TASK, [self.project_task_id]) counter = 1 for user in users_in_task_groups: user_id = user["user_id"] print(f"Processing user {counter} of {len(users_in_task_groups)}") user_project = up.create_user_project_if_not_exists( user_id, self.project_id) user.update(anon_project_specific_user_id=user_project[ "anon_project_specific_user_id"]) self.users.append(user) counter += 1 return self.users
def test_user_group_membership_handler_ok(self): pg_utils.truncate_table("public.projects_usergroupmembership") result = test_tools.test_eb_request( local_method=user_group_membership_handler, aws_eb_event=TEST_USER_GROUP_MEMBERSHIP_EVENT, aws_processing_delay=10, ) if test_tools.tests_running_on_aws(): self.assertEqual(HTTPStatus.OK, result["ResponseMetadata"]["HTTPStatusCode"]) else: self.assertEqual(HTTPStatus.CREATED, result["statusCode"]) user_id = "8518c7ed-1df4-45e9-8dc4-d49b57ae0663" # Clive g3_group_id = "d279e19b-0995-4239-9640-80de966b09a3" result = pg_utils.execute_query(base_sql=SQL_USER_GROUP_MEMBERSHIP, params=(user_id, g3_group_id)) self.assertEqual(1, len(result))
def main(user_group_id, project_id): result = pg_utils.execute_query( GET_USERS_IN_GROUP_BUT_NOT_PROJECT, (user_group_id, project_id), ) if not result: print( f"Nothing to output. All members of user group {user_group_id} " f"have signed up to project {project_id} (or the user group is empty)" ) else: fieldnames = result[0].keys() with open( f"{os.path.splitext(os.path.basename(__file__))[0]}.csv", "w" ) as output_file: writer = csv.DictWriter(output_file, fieldnames=fieldnames) writer.writeheader() writer.writerows(result)
def check_task_signups(): sql = "select * from public.task_signups" signups_in_db = execute_query(sql) signups_notifications = get_notifications("type", ["task-signup"]) print("Database:" + str(len(signups_in_db)) + ", notifications:" + str(len(signups_notifications))) for db_signup in signups_in_db: user_task_id = db_signup["user_task_id"] found = False for notification in signups_notifications: if not found and notification["details"]["id"] == user_task_id: # print ('match: ' + db_signup['email']) found = True if not found: print("no match: " + db_signup["user_task_id"] + ", " + db_signup["email"] + ", " + db_signup["created"])
def main(): project_task_timestamps_sql = """ SELECT p.short_name as project_short_name, pt.short_name as task_short_name, task_type_id, earliest_start_date, closing_date, p.demo FROM public.projects_projecttask pt JOIN public.projects_project p on pt.project_id = p.id ORDER BY earliest_start_date """ tasks = pg_utils.execute_query(project_task_timestamps_sql) # pprint(tasks) for t in tasks: t_short_name = t["task_short_name"] try: t_start = parser.parse(t["earliest_start_date"]) except TypeError: print(f"Task {t_short_name} hasn't started yet. Skipped") continue # print(f"Working on {t_short_name}") js_categories.append(t_short_name) try: t_end = parser.parse(t["closing_date"]) except TypeError: t_end = datetime.date.today() t_js_dataseries = js_dataseries_str.format( year_0=t_start.year, month_0=(t_start.month - 1), # In JS, January is month 0 day_0=t_start.day, year_1=t_end.year, month_1=(t_end.month - 1), day_1=t_end.day, ) print(t_js_dataseries)
def translate_ids(col_to_fetch): logger = utils.get_logger() user_input = input( "Please paste list of anon_project_specific_user_ids separated by commas:" ) anon_ids = split_string_into_list_of_strings(user_input) user_ids = list() unresolved_ids = list() for anon_project_specific_user_id in anon_ids: result = pg_utils.execute_query( sql_q.GET_USER_BY_ANON_PROJECT_SPECIFIC_USER_ID_SQL, [str(anon_project_specific_user_id)], ) try: data_as_list = [] for col in col_to_fetch: data_as_list.append(str(result[0][col])) data_as_str = ", ".join(data_as_list) user_ids.append(data_as_str) except IndexError: logger.warning( f"anon_project_specific_user_id {anon_project_specific_user_id} could not be found" ) unresolved_ids.append(anon_project_specific_user_id) pg_utils.close_connection() if unresolved_ids: print("\nThe following anon_project_specific_user_ids could not be translated (%s):" % len(unresolved_ids)) print(";\n".join(unresolved_ids)) if user_ids: print("\nTranslated ids (%s):" % len(user_ids)) print(";\n".join(user_ids))
def _common_assertions(self, groups, view_name, view_entity="p", testing_groups=None): """ Args: groups (dict or list of dicts): One or more dictionaries representing groups of users and visible projects view_name (str): Name of the view being tested view_entity (str): 'p' if each view row represents a project or 't' if they represent tasks testing_groups (dict or list of dicts): One or more dictionaries representing testing groups of users and visible projects """ self.logger = utils.get_logger() if isinstance(groups, dict): groups = [groups] if isinstance(testing_groups, dict): testing_groups = [testing_groups] if view_entity == "p": entity_key = "projects" entity_name_column = "project_name" elif view_entity == "t": entity_key = "tasks" entity_name_column = "description" else: raise NotImplementedError( f'view_entity must be "p" or "t" (not {view_entity})') self.logger.info(f"Working on view {view_name}") view_result = pg_utils.execute_query( f"SELECT * FROM public.{view_name}") unexpected_rows = view_result.copy() if groups: for g in groups: for u in g["users"]: for e in g[entity_key]: for r in view_result: if ((r["email"] == u) and (r[entity_name_column] == e) and (r["group_name"] == g["group_name"]) and (r.get("testing", False) is False)): self.logger.debug( f"Removing expected row from list of unexpected rows", extra={"expected row": r}, ) unexpected_rows.remove(r) if testing_groups: for tg in testing_groups: for u in tg["users"]: for e in tg[entity_key]: for r in view_result: if ((r["email"] == u) and (r[entity_name_column] == e) and (r["group_name"] == tg["group_name"]) and (r.get("testing", True) is True)): self.logger.debug( f"Removing expected row from list of unexpected rows", extra={"expected row": r}, ) unexpected_rows.remove(r) self.assertFalse(unexpected_rows)
def get_user_group_ids(self): rows = pg_utils.execute_query( sql_q.NON_TEST_USERS_IN_PROJECT_TASK_GROUPS, [self.project_task_id]) self.user_group_ids = [x["user_id"] for x in rows] return self.user_group_ids
def get_test_group_ids(self): rows = pg_utils.execute_query(sql_q.TEST_GROUP_USERS_FOR_PROJECT_TASK, [self.project_task_id]) self.test_group_ids = [x["user_id"] for x in rows] return self.test_group_ids
def get_users_in_project_groups(project_id): return pg_utils.execute_query( GET_USERS_IN_PROJECT_GROUPS, (project_id,), )
""" import csv import os import api.local.dev_config # sets env variables import api.local.secrets # sets env variables import api.endpoints.common.pg_utilities as pg_utils import api.endpoints.common.sql_queries as sql_q HOME = os.path.expanduser("~") DOWNLOADS = os.path.join(HOME, "Downloads") if __name__ == "__main__": project_task_id = input( "Please type the project task id you would like to query:").strip() users = pg_utils.execute_query(sql_q.NON_TEST_USERS_IN_PROJECT_TASK_GROUPS, [project_task_id]) pg_utils.close_connection() output_filename = f"{project_task_id}_users_in_associated_groups.csv" user_ids = list() output_path = os.path.join(DOWNLOADS, output_filename) try: csvfile = open(output_path, "w") except FileNotFoundError: output_path = os.path.join(HOME, output_filename) csvfile = open(output_path, "w") writer = csv.writer(csvfile) writer.writerow(["user_id", "email"]) for u in users: u_id = u["user_id"] writer.writerow([u_id, u["email"]])
def check_no_testers_in_data(self): users_in_both_groups = list() users_in_test_group = list() users_in_user_group = list() user_ids_in_input_file = list() unique_user_ids_in_input_file = set() this_users = list() with open(self.input_filename) as csv_f: reader = csv.DictReader(csv_f) for row in reader: user_id = row.get(self.user_id_column) if user_id is None: anon_project_specific_user_id = row.get( self.anon_project_specific_user_id_column) try: utils.validate_uuid(anon_project_specific_user_id) except utils.DetailedValueError: self.logger.warning( f'anon_project_specific_user_id "{anon_project_specific_user_id}" not valid; row skipped' ) continue user = pg_utils.execute_query( sql_q.GET_USER_BY_ANON_PROJECT_SPECIFIC_USER_ID_SQL, [str(anon_project_specific_user_id)], )[0] user_id = user["id"] else: try: utils.validate_uuid(user_id) except utils.DetailedValueError: self.logger.warning( f'User id "{user_id}" not valid; row skipped') continue user = pg_utils.execute_query( f"{sql_q.BASE_USER_SELECT_SQL} WHERE id = %s", [str(user_id)], )[0] self.user_table.add_row([ user["first_name"], user["last_name"], user["email"], ]) user_ids_in_input_file.append(user_id) unique_user_ids_in_input_file.add(user_id) if (user_id in self.test_group_ids) and (user_id in self.user_group_ids): self.logger.warning( f"User {user_id} is both in user group and in test group for this project task" ) users_in_both_groups.append(user_id) elif user_id in self.test_group_ids: self.logger.error( f"User {user_id} is a tester; they are not in user this project task" ) users_in_test_group.append(user_id) elif user_id in self.user_group_ids: users_in_user_group.append(user_id) user = u.get_user_by_id(user_id)[0] user_email = user["email"] if user_email: if "@thisinstitute.cam.ac.uk" in user_email: self.logger.error( f"User {user_id} is a member of THIS") this_users.append(user_id) else: print("user_group_ids:") pprint(self.user_group_ids) print("\ntest_group_ids:") pprint(self.test_group_ids) err_message = ( f"User id {user_id} (anon_project_specific_user_id: {anon_project_specific_user_id}) could not be " f"found on user group or test group of project task {self.project_task_id}. Are " f"you sure you entered the correct project task id?") raise ValueError(err_message) number_of_rows_with_duplicate_user_ids = len( user_ids_in_input_file) - len(unique_user_ids_in_input_file) return ( users_in_both_groups, users_in_test_group, users_in_user_group, user_ids_in_input_file, this_users, number_of_rows_with_duplicate_user_ids, )