Пример #1
0
 def __init__(self, logger=None, correlation_id=None):
     self.ddb_client = Dynamodb(stack_name=STACK_NAME)
     self.correlation_id = correlation_id
     self.target_appointment_ids = self.get_appointments_to_be_deleted()
     self.logger = logger
     if logger is None:
         self.logger = utils.get_logger()
Пример #2
0
 def __init__(self,
              qualtrics_account_name="cambridge",
              survey_id=None,
              correlation_id=None):
     client = SurveyDefinitionsClient(
         qualtrics_account_name=qualtrics_account_name,
         survey_id=survey_id,
         correlation_id=correlation_id,
     )
     response = client.get_survey()
     assert (response["meta"]["httpStatus"] == "200 - OK"
             ), f"Call to Qualtrics API failed with response {response}"
     self.survey_id = survey_id
     self.definition = response["result"]
     self.flow = self.definition["SurveyFlow"]["Flow"]
     self.blocks = self.definition["Blocks"]
     self.questions = self.definition["Questions"]
     self.modified = self.definition["LastModified"]
     self.ddb_client = Dynamodb(stack_name=const.STACK_NAME)
     self.logger = utils.get_logger()
     self.logger.debug(
         "Initialised SurveyDefinition",
         extra={
             "__dict__": self.__dict__,
             "correlation_id": correlation_id,
         },
     )
Пример #3
0
def execute_non_query(sql, params, correlation_id=new_correlation_id()):
    """
    Use this method to make changes that will be committed to the database (e.g. UPDATE, DELETE calls)
    """
    logger = get_logger()
    conn = _get_connection(correlation_id)
    sql = minimise_white_space(sql)
    param_str = str(params)
    logger.info(
        "postgres query",
        extra={
            "query": sql,
            "parameters": param_str,
            "correlation_id": correlation_id
        },
    )
    with conn.cursor() as cursor:
        try:
            cursor.execute(sql, params)
            rowcount = cursor.rowcount
            conn.commit()
        except psycopg2.IntegrityError as err:
            errorjson = {
                "error": err.args[0],
                "correlation_id": str(correlation_id)
            }
            raise DetailedIntegrityError("Database integrity error", errorjson)
    return rowcount
Пример #4
0
    def __init__(self, correlation_id=None, ddb_client=None):
        self.id = None
        self.user_project = None
        self.project_task = None
        self.status = None
        self.consented = None
        self.progress_info = None
        self.ext_user_task_id = None
        self.anon_user_task_id = None
        self.user_task_url = None

        self._correlation_id = correlation_id
        self.user_id = None
        self.first_name = None
        self.last_name = None
        self.email = None
        # this is the same as project_task above
        self.project_task_id = None
        self.project_task_status = None
        self.project_task_url_params = None
        self.anon_user_task_id = None
        self.created = None
        self.project_id = None
        self.base_url = None
        self.task_provider_name = None
        self.external_task_id = None
        self.user_specific_url = None
        self.anonymise_url = None
        # this is the same as user_project above
        self.user_project_id = None
        self.anon_project_specific_user_id = None
        self.task_type_name = None

        self._ddb_client = ddb_client
        self._logger = utils.get_logger()
    def __init__(self, appointment_id, logger=None, correlation_id=None):
        self.appointment_id = str(appointment_id)
        self.acuity_info = None
        self.calendar_id = None
        self.calendar_name = None
        self.participant_email = None
        self.participant_user_id = None
        self.appointment_type = AppointmentType()
        self.latest_participant_notification = (
            "0000-00-00 00:00:00+00:00"  # used as GSI sort key, so cannot be None
        )
        self.appointment_date = None
        self.anon_project_specific_user_id = None
        self.anon_user_task_id = None
        self.appointment_type_id = None

        self._logger = logger
        if self._logger is None:
            self._logger = utils.get_logger()
        self._correlation_id = correlation_id
        self._ddb_client = Dynamodb(stack_name=STACK_NAME)
        self._core_api_client = CoreApiClient(
            correlation_id=self._correlation_id)  # transactional emails
        self._acuity_client = AcuityClient(correlation_id=self._correlation_id)
        self.original_appointment = (
            None  # used to store appointment history if rescheduled
        )
 def __init__(self, survey_id, account_name="cambridge"):
     self.logger = get_logger()
     self.survey_client = SurveyDefinitionsClient(
         qualtrics_account_name=account_name, survey_id=survey_id)
     self.survey_definition = self.survey_client.get_survey()["result"]
     self.questions = self.survey_definition["Questions"]
     self.updated_questions = dict()
Пример #7
0
 def __init__(self, question_dict, question_config):
     self.question_dict = question_dict
     self.question_config = question_config
     self.export_tag = self.question_dict["DataExportTag"]
     self.question_text = self.question_dict["QuestionText"]
     self.question_js = self.question_dict.get("QuestionJS")
     self.logger = get_logger()
    def __init__(self,
                 ddb_client=None,
                 acuity_client=None,
                 logger=None,
                 correlation_id=None):
        self.type_id = None
        self.name = None
        self.category = None
        self.has_link = None
        self.send_notifications = None
        self.templates = None
        self.modified = None  # flag used in ddb_load method to check if ddb data was already fetched
        self.project_task_id = None
        self.system = None  # Views, MyInterview, etc

        self._logger = logger
        self._correlation_id = correlation_id
        if logger is None:
            self._logger = utils.get_logger()
        self._ddb_client = ddb_client
        if ddb_client is None:
            self._ddb_client = Dynamodb(stack_name=STACK_NAME)
        self._acuity_client = acuity_client
        if acuity_client is None:
            self._acuity_client = AcuityClient(
                correlation_id=self._correlation_id)
Пример #9
0
 def __init__(self, survey_consent_event):
     self.logger = survey_consent_event.get("logger", utils.get_logger())
     self.correlation_id = survey_consent_event.get(
         "correlation_id", utils.new_correlation_id())
     self.consent_dict = json.loads(survey_consent_event["body"])
     self.consent_info_url = self.consent_dict["consent_info_url"]
     del self.consent_dict["consent_info_url"]
     consent_embedded_data_fieldname = "consent_statements"
     self.consent_dict[consent_embedded_data_fieldname] = json.loads(
         self.consent_dict[consent_embedded_data_fieldname])
     self.to_recipient_email = self.consent_dict.get("to_recipient_email")
     try:
         self.template_name = self.consent_dict["template_name"]
     except KeyError:
         self.template_name = DEFAULT_CONSENT_EMAIL_TEMPLATE
     else:
         del self.consent_dict["template_name"]
     try:
         self.consent_dict[
             "consent_datetime"] = qualtrics2thiscovery_timestamp(
                 self.consent_dict["consent_datetime"])
     except KeyError:
         self.consent_dict["consent_datetime"] = str(utils.now_with_tz())
     self.core_api_client = CoreApiClient(
         correlation_id=self.correlation_id)
     self.consent = Consent(core_api_client=self.core_api_client,
                            correlation_id=self.correlation_id)
     self.consent.from_dict(consent_dict=self.consent_dict)
 def __init__(
     self,
     response_id,
     event_time=None,
     anon_project_specific_user_id=None,
     anon_user_task_id=None,
     detail_type=None,
     detail=None,
     correlation_id=None,
     interview_task_id=None,
     account=None,
 ):
     super().__init__(
         response_id=response_id,
         event_time=event_time,
         anon_project_specific_user_id=anon_project_specific_user_id,
         anon_user_task_id=anon_user_task_id,
         detail_type=detail_type,
         detail=detail,
         correlation_id=correlation_id,
         account=account,
     )
     del self.account
     try:
         self._detail["event_type"] = "user_interview_task"
     except TypeError:
         self._detail = {"event_type": "user_interview_task"}
     self.interview_task_id = interview_task_id
     self.interview_task = None
     self._logger = utils.get_logger()
Пример #11
0
def main(dry_run):
    dry_run_postfix = ""
    if dry_run:
        dry_run_postfix = (
            "(this was a dry run, so no changes were actually made to the survey)"
        )
    logger = get_logger()
    survey_client = SurveyDefinitionsClient(survey_id=conf.CURRENT_SURVEY_ID)
    survey = survey_client.get_survey()["result"]
    questions = survey["Questions"]
    updated_questions = list()
    for k, v in questions.items():
        updated_question = False
        export_tag = v["DataExportTag"]
        question_text = v["QuestionText"]

        # add t1 question scores and graphs
        if export_tag in conf.current_delphi_round_question_config:
            survey_question = SurveyQuestion(
                v, conf.current_delphi_round_question_config[export_tag]
            )
            survey_question.inject_previous_round_delphi_score()

            # graphs
            # add question JS
            existing_question_js = v.get("QuestionJS")
            if not existing_question_js:
                existing_question_js = q_js.EMPTY_JS
            split_js = q_js.js_add_on_ready_re.split(existing_question_js, maxsplit=1)
            assert (
                n := len(split_js)
            ) == 2, f"Unexpected split of existing_question_js ({n}): {split_js}"
            t1_tag = conf.this_task_to_previous_task_mapping[export_tag]
            v["QuestionJS"] = (
                split_js[0]
                + "Qualtrics.SurveyEngine.addOnReady(function()\n{"
                + get_graph_js_for_qualtrics(t1_tag)
                + split_js[1]
            )
            # add graph container
            container_id = f"highcharts-{t1_tag}"
            replacements_1 = conf.chart_placeholder_re.subn(
                HIGHCHARTS_CONTAINER.format(container_id), question_text
            )
            total_replacements = replacements_1[1]
            assert (
                total_replacements == 1
            ), f"Unexpected number of replacements ({total_replacements}) in question_text: {question_text}"
            replacements = replacements_1
            v["QuestionText"] = replacements[0]
            updated_question = True

        if updated_question:
            updated_questions.append(export_tag)
            if not dry_run:
                survey_client.update_question(question_id=k, data=v)
    print(
        f"Updated {len(updated_questions)} questions: {updated_questions} {dry_run_postfix}"
    )
 def __init__(self, message_id, correlation_id=None):
     self.message_id = message_id
     self.correlation_id = correlation_id
     self.s3_client = S3Client()
     self.bucket = utils.get_secret("incoming-email-bucket")['name']
     self.message = None
     self.message_obj_http_path = None
     self.logger = utils.get_logger()
Пример #13
0
def list_users_by_project(project_id, logger=None, correlation_id=None):
    if logger is None:
        logger = utils.get_logger()
    users = execute_query(
        base_sql=sql_q.LIST_USERS_BY_PROJECT_SQL,
        params=(project_id, ),
        correlation_id=correlation_id,
    )
    return users
def main():
    logger = utils.get_logger()
    ac = AcuityClient()
    for e in EVENTS:
        try:
            response = ac.post_webhooks(e)
            logger.info("Created webhook", extra={"response": response})
        except utils.DetailedValueError:
            pass
Пример #15
0
def execute_non_query_multiple(sql_iterable,
                               params_iterable,
                               correlation_id=new_correlation_id()):
    """

    Args:
        sql_iterable (tuple, list, etc): iterable containing sql queries to be executed
        params_iterable (tuple, list, etc): iterable containing params for sql queries in sql_iterable
        correlation_id:

    Returns:
        List of number of rows affected by each of the input sql queries

    """
    logger = get_logger()
    conn = _get_connection(correlation_id)
    results = []
    with conn.cursor() as cursor:
        for (sql, params) in zip(sql_iterable, params_iterable):
            sql = minimise_white_space(sql)
            param_str = str(params)
            logger.info(
                "postgres query",
                extra={
                    "query": sql,
                    "parameters": param_str,
                    "correlation_id": correlation_id,
                },
            )

            try:
                cursor.execute(sql, params)
            except psycopg2.IntegrityError as err:
                errorjson = {
                    "error": err.args[0],
                    "correlation_id": str(correlation_id),
                }
                raise DetailedIntegrityError("Database integrity error",
                                             errorjson)
            except Exception as ex:
                raise ex

            rowcount = cursor.rowcount
            logger.info(
                f"postgres query updated {rowcount} rows",
                extra={
                    "query": sql,
                    "parameters": param_str,
                    "correlation_id": correlation_id,
                },
            )
            results.append(rowcount)
    conn.commit()
    return results
Пример #16
0
 def __init__(self, correlation_id=None):
     acuity_credentials = utils.get_secret("acuity-connection")
     self.session = requests.Session()
     self.session.auth = (
         acuity_credentials["user-id"],
         acuity_credentials["api-key"],
     )
     self.logger = utils.get_logger()
     self.calendars = None
     self.correlation_id = correlation_id
     self.app_endpoint = self.base_url + "appointments"
Пример #17
0
def execute_query(
        base_sql,
        params=None,
        correlation_id=new_correlation_id(),
        return_json=True,
        jsonize_sql=True,
):
    """
    Use this method to query the database (e.g. using SELECT). Changes will not be committed to the database, so don't use this method for UPDATE and DELETE
    calls.

    Args:
        base_sql:
        params (tuple or list): http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries
        correlation_id:
        return_json:
        jsonize_sql:

    Returns:

    """
    logger = get_logger()
    # tell sql to create json if that's what's wanted
    if return_json and jsonize_sql:
        sql = _jsonize_sql(base_sql)
    else:
        sql = base_sql
    sql = minimise_white_space(sql)
    param_str = str(params)
    logger.info(
        "postgres query",
        extra={
            "query": sql,
            "parameters": param_str,
            "correlation_id": correlation_id
        },
    )
    conn = _get_connection(correlation_id)
    with conn.cursor() as cursor:
        cursor.execute(sql, params)
        records = cursor.fetchall()
    logger.info(
        "postgres result",
        extra={
            "rows returned": str(len(records)),
            "correlation_id": correlation_id
        },
    )

    if return_json:
        return _get_json_from_tuples(records)
    else:
        return records
Пример #18
0
def cochrane_get(url):
    full_url = utils.get_secret("cochrane-connection")["base_url"] + url
    headers = dict()
    headers["Content-Type"] = "application/json"
    logger = utils.get_logger()
    response = requests.get(full_url, headers=headers)
    if response.ok:
        data = response.json()
        logger.info("API response", extra={"body": data})
        return data
    else:
        raise utils.DetailedValueError("Cochrane API call failed",
                                       details={"response": response.content})
Пример #19
0
def execute_query_multiple(
        base_sql_tuple,
        params_tuple=None,
        correlation_id=new_correlation_id(),
        return_json=True,
        jsonize_sql=True,
):
    """
    Use this method to query the database (e.g. using SELECT). Changes will not be committed to the database, so don't use this method for UPDATE and DELETE
    calls.
    """
    logger = get_logger()
    conn = _get_connection(correlation_id)
    if params_tuple is None:
        params_tuple = tuple([None] * len(base_sql_tuple))
    results = []
    with conn.cursor() as cursor:
        for (base_sql, params) in zip(base_sql_tuple, params_tuple):
            # tell sql to create json if that's what's wanted
            if return_json and jsonize_sql:
                sql = _jsonize_sql(base_sql)
            else:
                sql = base_sql
            sql = minimise_white_space(sql)
            param_str = str(params)
            logger.info(
                "postgres query",
                extra={
                    "query": sql,
                    "parameters": param_str,
                    "correlation_id": correlation_id,
                },
            )

            cursor.execute(sql, params)
            records = cursor.fetchall()
            logger.info(
                "postgres result",
                extra={
                    "rows returned": str(len(records)),
                    "correlation_id": correlation_id,
                },
            )

            if return_json:
                results.append(_get_json_from_tuples(records))
            else:
                results.append(records)
    logger.info("Returning multiple results", extra={"results": results})
    return results
Пример #20
0
    def _assign_link_to_user(self, unassigned_links: list) -> str:
        """
        Assigns to user the unassigned link with the soonest expiration date.
        An existing anon_project_specific_user_id is checked at assignment type to protect against
        the unlikely but possible scenario where a concurrent invocation of the
        lambda has assigned the same link to a different user.

        Args:
            unassigned_links (list): All unassigned links for this account_survey_id in PersonalLinks table

        Returns:
            A url representing the personal link assigned to this user

        """
        # assign oldest link to user
        unassigned_links.sort(key=lambda x: x["expires"])
        user_id_attr_name = "anon_project_specific_user_id"
        logger = utils.get_logger()
        for unassigned_link in unassigned_links:
            user_link = unassigned_link["url"]
            try:
                self.ddb_client.update_item(
                    table_name=const.PersonalLinksTable.NAME,
                    key=self.account_survey_id,
                    name_value_pairs={
                        "status": "assigned",
                        user_id_attr_name: self.anon_project_specific_user_id,
                    },
                    key_name="account_survey_id",
                    sort_key={"url": user_link},
                    ConditionExpression=Attr(user_id_attr_name).not_exists(),
                )
            except ClientError:
                logger.info(
                    "Link assignment failed; link is already assigned to another user",
                    extra={
                        "user_link": user_link,
                    },
                )
            else:
                return user_link

        logger.info(
            "Ran out of unassigned links; creating some more and retrying",
            extra={
                "unassigned_links": unassigned_links,
            },
        )
        unassigned_links = self._create_personal_links()
        return self._assign_link_to_user(unassigned_links)
 def __init__(self, logger=None, correlation_id=None, test_event=False):
     """
     Args:
         logger:
         correlation_id:
         test_event: set to True when the event originates from unittests
     """
     self.logger = logger
     if logger is None:
         self.logger = utils.get_logger()
     self.core_api_client = CoreApiClient(correlation_id=correlation_id)
     self.correlation_id = correlation_id
     self.test_mode = test_event
     self.appointment = None
     self.original_booking = None  # used for determining if interviewer has changed when processing rescheduling events
def main(webhook_ids):
    logger = utils.get_logger()
    ac = AcuityClient()
    webhooks = ac.get_webhooks()
    env_name = utils.get_environment_name()
    for wh in webhooks:
        if wh["id"] in webhook_ids:
            print("Acuity webhook details:")
            pprint(wh)
            confirmation = input(
                "Are you sure you want to delete the webhook above? (y/n)")
            if confirmation.lower() == "y":
                ac.delete_webhooks(wh["id"])
                logger.info("Deleted Acuity webhook", extra={"webhook": wh})
            print("")
Пример #23
0
 def wrapper(*args, **kwargs):
     response = func(*args, **kwargs)
     if response.ok:
         try:
             return response.json()
         except JSONDecodeError:
             return response
     else:
         logger = utils.get_logger()
         logger.error(
             f"Acuity API call failed with response: {response}",
             extra={"response.content": response.content},
         )
         raise utils.DetailedValueError(
             f"Acuity API call failed with response: {response}",
             details={"response": response.content},
         )
Пример #24
0
def _get_connection(correlation_id=None):
    logger = get_logger()
    if config.conn is None:
        env_dict = get_secret("database-connection")
        if env_dict is None:
            raise ConnectionError(
                "Could not get database-connection secret from AWS")
        config.conn = psycopg2.connect(**env_dict)

        # using dsn obscures password
        logger.info(
            "created database connection",
            extra={
                "conn_string": config.conn.dsn,
                "correlation_id": correlation_id
            },
        )

    return config.conn
Пример #25
0
    def __init__(self, **kwargs):
        self._logger = utils.get_logger()
        self._correlation_id = kwargs.get("correlation_id")
        self.destination_survey_id = kwargs.get("survey_id")
        self.destination_response_id = kwargs.get("response_id")
        err_message = "Call to initialise_survey missing mandatory data: {}"
        assert self.destination_survey_id, err_message.format("survey_id")
        assert self.destination_survey_id, err_message.format("response_id")

        self.survey_init_config = SurveyInitConfig(
            destination_survey_id=self.destination_survey_id,
            correlation_id=self._correlation_id,
        )
        self.survey_init_config.get()
        try:
            self.survey_init_config.details
        except AttributeError:
            raise utils.ObjectDoesNotExistError(
                f"Initialisation config not found for survey {self.destination_survey_id}",
                details={},
            )

        self.destination_account = kwargs.get("account", "cambridge")
        self.anon_project_specific_user_id = kwargs.get(
            "anon_project_specific_user_id")
        assert self.anon_project_specific_user_id, err_message.format(
            "anon_project_specific_user_id")
        self._core_client = CoreApiClient(correlation_id=self._correlation_id)
        user = self._core_client.get_user_by_anon_project_specific_user_id(
            self.anon_project_specific_user_id)
        self.user_id = user["id"]
        self.ddb_client = Dynamodb(stack_name=const.STACK_NAME,
                                   correlation_id=self._correlation_id)
        self.cached_responses = dict()
        self.missing_responses = list()
        self.target_embedded_data = dict()
        self.responses_client = ResponsesClient(
            survey_id=self.destination_survey_id,
            qualtrics_account_name=self.destination_account,
            correlation_id=self._correlation_id,
        )
def set_interview_url(appointment_id,
                      interview_url,
                      event_type,
                      logger=None,
                      correlation_id=None):
    """

    Args:
        appointment_id:
        interview_url:
        event_type (str): passed on to AppointmentNotifier ('booking', 'rescheduling' or 'cancellation')
        logger:
        correlation_id:

    Returns:
        update_result:
        notification_results (dict): Dictionary containing participant and researchers notification results

    """
    if logger is None:
        logger = utils.get_logger()
    appointment = AcuityAppointment(
        appointment_id=appointment_id,
        logger=logger,
        correlation_id=correlation_id,
    )
    appointment.ddb_load()
    update_result = appointment.update_link(link=interview_url)
    notification_results = {
        "participant": None,
        "researchers": list(),
    }
    if appointment.appointment_type.send_notifications is True:
        notifier = AppointmentNotifier(
            appointment=appointment,
            logger=logger,
            correlation_id=correlation_id,
        )
        notification_results = notifier.send_notifications(
            event_type=event_type)
    return update_result, notification_results
Пример #27
0
def process_notifications(event, context):
    logger = get_logger()
    notifications = c_notif.get_notifications_to_process(
        stack_name=const.STACK_NAME)
    logger.info("process_notifications",
                extra={"count": str(len(notifications))})

    # note that we need to process all registrations first, then do task signups (otherwise we might try to process a signup for someone not yet registered)
    signup_notifications = list()
    login_notifications = list()
    transactional_emails = list()
    for notification in notifications:
        notification_type = notification["type"]
        if notification_type == NotificationType.USER_REGISTRATION.value:
            pass
            # process_user_registration(notification)
        elif notification_type == NotificationType.TASK_SIGNUP.value:
            # add to list for later processing
            signup_notifications.append(notification)
        elif notification_type == NotificationType.USER_LOGIN.value:
            # add to list for later processing
            login_notifications.append(notification)
        elif notification_type == NotificationType.TRANSACTIONAL_EMAIL.value:
            transactional_emails.append(notification)
        else:
            error_message = (
                f"Processing of {notification_type} notifications not implemented yet"
            )
            logger.error(error_message)
            raise NotImplementedError(error_message)

    for signup_notification in signup_notifications:
        pass
        # process_task_signup(signup_notification)

    for login_notification in login_notifications:
        process_user_login(login_notification)

    for email in transactional_emails:
        pass
def main():
    logger = get_logger()
    survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID)
    survey = survey_client.get_survey()["result"]
    questions = survey["Questions"]
    t2_to_t1_extended_mapping = get_extended_mapping()
    for k, v in questions.items():
        export_tag = v["DataExportTag"]
        question_text = v["QuestionText"]
        if export_tag in t2_to_t1_extended_mapping:
            if (m := PROMPT_QUESTION_RE.search(question_text)) or (
                    m := SLIDER_QUESTION_RE.search(question_text)):
                logger.debug(
                    "Expected substitution",
                    extra={
                        "before": m.group("question"),
                        "after": t2_to_t1_extended_mapping[export_tag],
                    },
                )
                v["QuestionText"] = question_text.replace(
                    m.group("question"), t2_to_t1_extended_mapping[export_tag])
                survey_client.update_question(question_id=k, data=v)
def translate_ids(col_to_fetch):
    logger = utils.get_logger()
    user_input = input(
        "Please paste list of anon_project_specific_user_ids separated by commas:"
    )

    anon_ids = split_string_into_list_of_strings(user_input)

    user_ids = list()
    unresolved_ids = list()
    for anon_project_specific_user_id in anon_ids:

        result = pg_utils.execute_query(
            sql_q.GET_USER_BY_ANON_PROJECT_SPECIFIC_USER_ID_SQL,
            [str(anon_project_specific_user_id)],
        )

        try:

            data_as_list = []
            for col in col_to_fetch:
                data_as_list.append(str(result[0][col]))

            data_as_str = ", ".join(data_as_list)

            user_ids.append(data_as_str)
        except IndexError:
            logger.warning(
                f"anon_project_specific_user_id {anon_project_specific_user_id} could not be found"
            )
            unresolved_ids.append(anon_project_specific_user_id)
    pg_utils.close_connection()

    if unresolved_ids:
        print("\nThe following anon_project_specific_user_ids could not be translated (%s):" % len(unresolved_ids))
        print(";\n".join(unresolved_ids))
    if user_ids:
        print("\nTranslated ids (%s):" % len(user_ids))
        print(";\n".join(user_ids))
Пример #30
0
def main(response_dataset, account, survey_id):
    logger = utils.get_logger()
    with open(response_dataset) as csvfile:
        reader = csv.DictReader(csvfile)
        row_counter = 0
        for row in reader:
            row_counter += 1
            logger.info(f"Working on row {row_counter}")
            try:
                response_id = row.pop("ResponseId")
                event_time = row.pop("RecordedDate")
                anon_project_specific_user_id = row.pop(
                    "anon_project_specific_user_id")
                anon_user_task_id = row.pop("anon_user_task_id")
            except KeyError as exc:
                raise utils.DetailedValueError(
                    f"Mandatory {exc} data not found in source event",
                    details={
                        "row": row,
                    },
                )
            try:
                utils.validate_uuid(anon_project_specific_user_id)
            except utils.DetailedValueError:
                logger.warning(
                    f"Could not process row because anon_project_specific_user_id is not valid: {anon_project_specific_user_id}"
                )
                continue
            emulated_event = user_task_completed_event_template.copy()
            emulated_event["time"] = event_time
            emulated_event["detail"] = {
                "anon_project_specific_user_id": anon_project_specific_user_id,
                "anon_user_task_id": anon_user_task_id,
                "response_id": f"{survey_id}-{response_id}",
                "account": account,
            }
            resp.put_task_response(emulated_event, {})