def main(dry_run=False): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] # fix graphs highcharts_re = re.compile('<div id="highcharts-(?P<graph_id>[\w-]+)"') everviz_script_re = re.compile( '<script src="https://app.everviz.com/inject/[\w-]+/" defer="defer"></script>' ) target_script = '<script src="https://app.everviz.com/inject/{graph_id}/" defer="defer"></script>' updated_questions_counter = 0 updated_questions_tags = list() graph_questions = ( list() ) # list of tuples: (question_id, question, graph_id, DataExportTag, everviz_script) for k, v in questions.items(): question_text = v["QuestionText"] tag = v["DataExportTag"] if (graph_m := highcharts_re.search(question_text)) : everviz_script = None if (script_m := everviz_script_re.search(question_text)) : everviz_script = script_m.group() graph_questions.append( (k, v, graph_m.group("graph_id"), tag, everviz_script) )
def __init__(self, survey_id=None, survey_name=None, input_dataset=None): super().__init__(csvfile_path=input_dataset) self.survey_client = SurveyDefinitionsClient(survey_id=survey_id) if survey_id is None: if survey_name == "Test survey": from random import randrange survey_name = f"Test survey {randrange(99999)}" self.survey_client.create_survey(survey_name) print(f"Created new survey: {survey_name}") self.survey = self.survey_client.get_survey()["result"] # pprint(self.survey) self.groups = dict() self.questions = dict() # rendered questions indexed by export tag self.thrash_block_id = None self.blocks_to_update = dict() self.blocks_to_add = dict() self.blocks_to_delete = dict() self.blocks_not_to_touch = dict() self.questions_to_update = dict() self.questions_to_add = dict() self.questions_to_delete = dict() self.questions_not_to_touch = dict()
def __init__(self, survey_id, account_name="cambridge"): self.logger = get_logger() self.survey_client = SurveyDefinitionsClient( qualtrics_account_name=account_name, survey_id=survey_id) self.survey_definition = self.survey_client.get_survey()["result"] self.questions = self.survey_definition["Questions"] self.updated_questions = dict()
def __init__(self, qualtrics_account_name="cambridge", survey_id=None, correlation_id=None): client = SurveyDefinitionsClient( qualtrics_account_name=qualtrics_account_name, survey_id=survey_id, correlation_id=correlation_id, ) response = client.get_survey() assert (response["meta"]["httpStatus"] == "200 - OK" ), f"Call to Qualtrics API failed with response {response}" self.survey_id = survey_id self.definition = response["result"] self.flow = self.definition["SurveyFlow"]["Flow"] self.blocks = self.definition["Blocks"] self.questions = self.definition["Questions"] self.modified = self.definition["LastModified"] self.ddb_client = Dynamodb(stack_name=const.STACK_NAME) self.logger = utils.get_logger() self.logger.debug( "Initialised SurveyDefinition", extra={ "__dict__": self.__dict__, "correlation_id": correlation_id, }, )
def main(dry_run): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=conf.CURRENT_SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] updated_questions = list() for k, v in questions.items(): updated_question = False export_tag = v["DataExportTag"] question_text = v["QuestionText"] # add t1 question scores and graphs if export_tag in conf.current_delphi_round_question_config: survey_question = SurveyQuestion( v, conf.current_delphi_round_question_config[export_tag] ) survey_question.inject_previous_round_delphi_score() # graphs # add question JS existing_question_js = v.get("QuestionJS") if not existing_question_js: existing_question_js = q_js.EMPTY_JS split_js = q_js.js_add_on_ready_re.split(existing_question_js, maxsplit=1) assert ( n := len(split_js) ) == 2, f"Unexpected split of existing_question_js ({n}): {split_js}" t1_tag = conf.this_task_to_previous_task_mapping[export_tag] v["QuestionJS"] = ( split_js[0] + "Qualtrics.SurveyEngine.addOnReady(function()\n{" + get_graph_js_for_qualtrics(t1_tag) + split_js[1] ) # add graph container container_id = f"highcharts-{t1_tag}" replacements_1 = conf.chart_placeholder_re.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text ) total_replacements = replacements_1[1] assert ( total_replacements == 1 ), f"Unexpected number of replacements ({total_replacements}) in question_text: {question_text}" replacements = replacements_1 v["QuestionText"] = replacements[0] updated_question = True if updated_question: updated_questions.append(export_tag) if not dry_run: survey_client.update_question(question_id=k, data=v) print( f"Updated {len(updated_questions)} questions: {updated_questions} {dry_run_postfix}" )
def main(dry_run=False): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] # # adjust choice order # adjusted_order_questions = adjust_choices_order( # questions=questions, # mapping=choices_mapping, # question_target_template=mc_question_target, # number_of_choices=7, # ) # if not dry_run: # for k, v in adjusted_order_questions.items(): # survey_client.update_question(question_id=k, data=v) # print( # f"Choice order of {len(adjusted_order_questions)} questions was updated {dry_run_postfix}" # ) # questions.update(adjusted_order_questions) # # logger.info("Questions", extra={"questions": questions}) # replace modal prompt modal_link_re = regex.compile( '(<p><span class="info-icon">ⓘ</span> <a href="#modal-(\d+)" rel="modal:open">' "Read more about this statement</a>.</p>){e<=3}", # allow up to 3 errors regex.ENHANCEMATCH, ) target_link = '<p><span class="info-icon">ⓘ</span> <a href="#modal-{}" rel="modal:open">Read more about this statement</a></p>' updated_questions_counter = 0 updated_questions_tags = list() for k, v in questions.items(): question_text = v["QuestionText"] tag = v["DataExportTag"] if (m := modal_link_re.search(question_text)): v["QuestionText"] = question_text.replace( m.group(1), target_link.format(m.group(2))) logger.debug( "Question text after replacement", extra={"question text": v["QuestionText"]}, ) updated_questions_counter += 1 updated_questions_tags.append(tag) if not dry_run: survey_client.update_question(question_id=k, data=v)
def main(): survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] for k, v in questions.items(): question_text = v["QuestionText"] updated_question = None if IMPORTANT_QUESTION_RE in question_text: updated_question = parse_important_question(v) elif (EASY_QUESTION_RE_1 in question_text) or (EASY_QUESTION_RE_2 in question_text): updated_question = parse_easy_question(v) else: print(f"Skipped question: {question_text}") if updated_question: survey_client.update_question(question_id=k, data=updated_question)
class SurveyEditor: """ Use this class for minor updates to manually-created surveys """ def __init__(self, survey_id, account_name="cambridge"): self.logger = get_logger() self.survey_client = SurveyDefinitionsClient( qualtrics_account_name=account_name, survey_id=survey_id) self.survey_definition = self.survey_client.get_survey()["result"] self.questions = self.survey_definition["Questions"] self.updated_questions = dict() def update_questions(self): for k, v in self.updated_questions.items(): self.survey_client.update_question(question_id=k, data=v) print( f"Updated {len(self.updated_questions)} questions: {list(self.updated_questions.keys())}" )
def main(): logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] t2_to_t1_extended_mapping = get_extended_mapping() for k, v in questions.items(): export_tag = v["DataExportTag"] question_text = v["QuestionText"] if export_tag in t2_to_t1_extended_mapping: if (m := PROMPT_QUESTION_RE.search(question_text)) or ( m := SLIDER_QUESTION_RE.search(question_text)): logger.debug( "Expected substitution", extra={ "before": m.group("question"), "after": t2_to_t1_extended_mapping[export_tag], }, ) v["QuestionText"] = question_text.replace( m.group("question"), t2_to_t1_extended_mapping[export_tag]) survey_client.update_question(question_id=k, data=v)
def __init__(self, **kwargs): self.target_questions = kwargs["target_questions"] previous_round_survey_id = kwargs.get("previous_round_survey_id") self.target_levels = kwargs.get("target_levels") self.reverse_levels = kwargs.get("reverse_levels", False) self.preceding_questions = kwargs.get("preceding_questions") self.following_questions = kwargs.get("following_questions") previous_round_csv = kwargs.get("previous_round_csv") survey_id = kwargs.get("survey_id") survey_name = kwargs.get("survey_name") super(DelphiSurveyManager, self).__init__( survey_id=survey_id, survey_name=survey_name, input_dataset=previous_round_csv, ) if previous_round_survey_id: previous_round_survey_client = SurveyDefinitionsClient( survey_id=previous_round_survey_id) previous_survey = previous_round_survey_client.get_survey( )["result"] self.previous_round_questions = self.covert_key_from_id_to_tag( previous_survey["Questions"]) else: self.previous_round_questions = None self.chart_options = kwargs.get("chart_options") self.graph_generator = highcharts_utils.GraphGenerator( target_questions=kwargs["target_questions"], input_filename=previous_round_csv, study_group_column_heading=kwargs.get( "study_group_column_heading"), study_groups_definition=kwargs.get("study_groups_definition"), target_levels=self.target_levels, chart_options=self.chart_options, ) self.delphi_question_options = kwargs.get("delphi_question_options", dict())
class SurveyManager(CsvImporter): """ Manages the process of creating/updating surveys from scratch """ def __init__(self, survey_id=None, survey_name=None, input_dataset=None): super().__init__(csvfile_path=input_dataset) self.survey_client = SurveyDefinitionsClient(survey_id=survey_id) if survey_id is None: if survey_name == "Test survey": from random import randrange survey_name = f"Test survey {randrange(99999)}" self.survey_client.create_survey(survey_name) print(f"Created new survey: {survey_name}") self.survey = self.survey_client.get_survey()["result"] # pprint(self.survey) self.groups = dict() self.questions = dict() # rendered questions indexed by export tag self.thrash_block_id = None self.blocks_to_update = dict() self.blocks_to_add = dict() self.blocks_to_delete = dict() self.blocks_not_to_touch = dict() self.questions_to_update = dict() self.questions_to_add = dict() self.questions_to_delete = dict() self.questions_not_to_touch = dict() # region question methods def add_questions(self): responses = dict() for k, q in self.questions_to_add.items(): responses[k] = self.survey_client.create_question(data=q) return responses def update_questions(self): responses = dict() for k, q in self.questions_to_update.items(): responses[k] = self.survey_client.update_question(q["QuestionID"], data=q) return responses def delete_questions(self): responses = dict() for k, q in self.questions_to_delete.items(): responses[k] = self.survey_client.delete_question(q["QuestionID"]) return responses # endregion @staticmethod def covert_key_from_id_to_tag(questions): return {v["DataExportTag"]: v for _, v in questions.items()} @staticmethod def is_question_text_identical(q1, q2): return q1["QuestionText"] == q2["QuestionText"]
# License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # A copy of the GNU Affero General Public License is available in the # docs folder of this project. It is also available www.gnu.org/licenses/ # import local.dev_config # sets env variables TEST_ON_AWS and AWS_TEST_API import local.secrets # sets env variables THISCOVERY_AFS25_PROFILE and THISCOVERY_AMP205_PROFILE from pprint import pprint from thiscovery_lib.qualtrics import SurveyDefinitionsClient from local.dev_config import SURVEY_ID, QUESTION_TAG, SURVEY_ACCOUNT survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID, qualtrics_account_name=SURVEY_ACCOUNT) survey = survey_client.get_survey()["result"] if QUESTION_TAG: questions = survey["Questions"] for _, v in questions.items(): tag = v["DataExportTag"] if tag == QUESTION_TAG: with open("survey_definition.py", "w") as sd: pprint(v, sd) break else: with open("survey_definition.py", "w") as sd: pprint(survey, sd)
# # Thiscovery API - THIS Institute’s citizen science platform # Copyright (C) 2019 THIS Institute # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # A copy of the GNU Affero General Public License is available in the # docs folder of this project. It is also available www.gnu.org/licenses/ # import local.dev_config # sets env variables TEST_ON_AWS and AWS_TEST_API import local.secrets # sets env variables THISCOVERY_AFS25_PROFILE and THISCOVERY_AMP205_PROFILE from pprint import pprint from thiscovery_lib.qualtrics import SurveyDefinitionsClient SURVEY_ID = "XXXXXXXXXXX" survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] print(survey["SurveyName"])
def main(dry_run): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] updated_questions = list() for k, v in questions.items(): updated_question = False export_tag = v["DataExportTag"] question_text = v["QuestionText"] # replace consensus modals by tooltip no_consensus_replacements = no_consensus_re.subn( no_consensus_target, question_text) consensus_reached_replacements = consensus_reached_re.subn( consensus_reached_target, question_text) total_replacements = (no_consensus_replacements[1] + consensus_reached_replacements[1]) if total_replacements > 0: if no_consensus_replacements[1] > 0: div_name = "Consensus failed" question_text = no_consensus_replacements[0] elif consensus_reached_replacements[1] > 0: div_name = "Consensus reached" question_text = consensus_reached_replacements[0] logger.info(f"{div_name} div replaced in question {export_tag}", extra={}) updated_question = True # add t1 question scores and graphs if export_tag in t2_to_t1_mapping: # question scores if (m := previous_score_re.search(question_text)): question_text = question_text.replace( m.group("question"), t2_to_t1_mapping[export_tag]) else: logger.error( f"No match for previous score regular expression found in question {export_tag}", extra={"question_text": question_text}, ) # graphs # add question JS existing_question_js = v.get("QuestionJS") if not existing_question_js: existing_question_js = QUALTRICS_EMPTY_JS split_js = js_add_on_ready_re.split(existing_question_js, maxsplit=1) assert ( n := len(split_js) ) == 2, f"Unexpected split of existing_question_js ({n}): {split_js}" t1_tag = t2_to_t1_mapping[export_tag] v["QuestionJS"] = ( split_js[0] + "Qualtrics.SurveyEngine.addOnReady(function()\n{" + get_graph_js_for_qualtrics(t1_tag) + split_js[1]) # add graph container container_id = f"highcharts-{t1_tag}" replacements_1 = highcharts_placeholder_re_1.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text) replacements_2 = highcharts_placeholder_re_2.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text) total_replacements = replacements_1[1] + replacements_2[1] assert ( total_replacements == 1 ), f"Unexpected number of replacements ({total_replacements}) in question_text: {question_text}" replacements = replacements_1 if replacements_2[1] > 0: replacements = replacements_2 v["QuestionText"] = replacements[0] updated_question = True if updated_question: updated_questions.append(export_tag) if not dry_run: survey_client.update_question(question_id=k, data=v)