예제 #1
0
def sanitize_competency_id(competency_id: str):
    """ This method is used to sanitize potentially invalid competency Ids coming from CASS and other clients that could
        be fixed, based on the format agreed for the 2018 study. Two fixes are considered, described by the following examples:

        Case 1. Replace 'http' by 'https' in a competency URL:

        Case 2. Remove CASS version number from the end of a competency URL:
            "https://insertCassUrl/api/data/insertCassSchemaUrl.0.3.Competency/c8fadde7-c51e-4cfe-9d52-4e88aaad7037/1526575157983"

            will become:

            "https://insertCassUrl/api/data/insertCassSchemaUrl.0.3.Competency/c8fadde7-c51e-4cfe-9d52-4e88aaad7037"

        Other wrong competency Ids will trigger a ValueError exception.
    """
    competency_id = competency_id.replace("http://", "https://", 1)

    pieces = competency_id.split('/')
    last_term = pieces[-1:][0]

    try:
        val = int(last_term)
        # If not exception triggered then assume version number was added to competency Id.
        print_with_time(
            "WARN: Competency Id ({0}) seems to have version number at the end, cutting it accordingly."
            .format(competency_id))
        competency_id = '/'.join(pieces[0:-1])
    except Exception:
        pass  # No problem, as last term SHOULD NOT be an int.

    return competency_id
예제 #2
0
 def _determine_mastery_val(self, observations_count, mastery_probability, mastery_probability_time_decay):
     print_with_time('INFO: Determining mastery for mastery_probability = {0} and mastery_probability_time_decay = {1}'
                     .format(str(mastery_probability), str(mastery_probability_time_decay)))
     if observations_count == 0:
         return 'unknown'
     elif observations_count < self.MIN_REQUIRED_OBSERVATIONS:
         return 'indeterminate'
     elif mastery_probability >= self.MASTERY_PROB_THRESHOLD and \
          mastery_probability_time_decay < self.MASTERY_PROB_THRESHOLD:
         return 'forgotten'
     elif mastery_probability >= self.MASTERY_PROB_THRESHOLD:
         return 'held'
     else:
         return 'not held'
예제 #3
0
def _increase_competency_attempt_counter(learner_dict, mastery_prob):
    """This function updates counter but does not store changes, as learner is stored by the function calling this one."""
    if 'competencyAttemptCounters' in learner_dict:
        competency_counters = learner_dict['competencyAttemptCounters']
    else:
        competency_counters = []
        learner_dict['competencyAttemptCounters'] = competency_counters

    competency_id = mastery_prob['competencyId']
    competency_counter = next(
        (x for x in competency_counters if x['competencyId'] == competency_id),
        None)

    if competency_counter:
        updated_counter = competency_counter['attempts'] + 1
        competency_counter['attempts'] = updated_counter
        competency_counter['lastAttemptDateTime'] = mastery_prob['timestamp']
    else:
        updated_counter = 1
        competency_counter = {
            "@context": "tla-declarations.jsonld",
            "@type": "CompetencyAttemptCounter",
            "competencyId": competency_id,
            "attempts": updated_counter,
            "lastAttemptDateTime": mastery_prob['timestamp']
        }

        competency_counters.append(competency_counter)

    print_with_time(
        'INFO: Updated competency attempt counter to {0} for learner {1}, competency {2}'
        .format(str(updated_counter), learner_dict['identifier'],
                competency_id))
    log_data = {
        "learnerId": learner_dict['identifier'],
        "competencyId": competency_id,
        "attemptCounter": competency_counter['attempts']
    }
    statement = create_learner_inference_log_xapi(
        verb_id="https://w3id.org/xapi/dod-isd/verbs/saved",
        verb_en_name="saved",
        activity_id="insertIPAddr/save-competency-attempt-counter",
        activity_en_name="Save Competency Attempt Counter",
        obj_extensions={"insertIPAddr/learner-inferences/log-data": log_data},
        profile_id="https://w3id.org/xapi/dod-isd/v1.0")
    if Config.LOG_TO_LRS:
        log_to_lrs(statement)
    if Config.LOG_XAPI_TO_FILE:
        print_with_time("[XAPI LOG]: {}".format(json.dumps(statement)))
예제 #4
0
 def f_retry(*args, **kwargs):
     for i in range(num_tries):
         try:
             return func(*args, **kwargs)
         except exceptions as e:
             if i < (num_tries - 1):
                 print_with_time(
                     'WARN: Retrying call to store client after connection error: {0}.'
                     .format(str(e)))
                 continue
             else:
                 print_with_time(
                     'ERROR: Maxed out retries after connection error: {0}.'
                     .format(str(e)))
                 raise
예제 #5
0
def process_experiences():
    """Consume Experiences AMQP channel until KeyboardInterrupt detected."""
    def experience_callback_func(ch, method, properties, body):
        ch.basic_ack(delivery_tag=method.delivery_tag)
        statement = json.loads(body.decode('utf-8').replace('&46;', '.'))
        print_with_time('INFO: xAPI Statement received: {0}; verb: {1}'.format(
            statement['id'], statement['verb']['id']))
        process_experience(statement)

    while True:
        try:
            print_with_time('INFO: Connecting to Experiences AMQP channel...')
            connection = None
            credentials = pika.PlainCredentials(
                username=Config.AMQP.Experiences.AMQP_USR,
                password=Config.AMQP.Experiences.AMQP_PWD)
            connection = pika.BlockingConnection(
                pika.ConnectionParameters(
                    host=Config.AMQP.Experiences.AMQP_HOST,
                    credentials=credentials,
                    connection_attempts=Config.AMQP.Experiences.
                    CONNECTION_ATTEMPTS))
            input_channel = connection.channel()
            input_channel.exchange_declare(
                exchange=Config.AMQP.Experiences.EXCHANGE_NAME,
                exchange_type='fanout')
            input_channel.queue_declare(
                queue=Config.AMQP.Experiences.QUEUE_NAME, durable=True)
            input_channel.queue_bind(
                exchange=Config.AMQP.Experiences.EXCHANGE_NAME,
                queue=Config.AMQP.Experiences.QUEUE_NAME)
            input_channel.basic_qos(prefetch_count=1)
            print_with_time('INFO: Connected to Experiences AMQP channel.')

            input_channel.basic_consume(
                experience_callback_func,
                queue=Config.AMQP.Experiences.QUEUE_NAME)
            print_with_time(
                'INFO: Started listening for messages on Experiences Queue...')
            input_channel.start_consuming()
        except KeyboardInterrupt:
            print_with_time(
                'WARN: KeyboardInterrupt detected while consuming Experiences. Stopping consumption...'
            )
            raise  # Should stop consuming messages at this point.
        except Exception as e:
            print(str(e))
            print(traceback.format_exc())
        finally:
            # Release resources before terminating thread or reconnecting.
            if input_channel and input_channel is not None:
                input_channel.stop_consuming()
            if connection and connection is not None:
                connection.close()
예제 #6
0
            def inference_callback_func(ch, method, properties, body):
                ch.basic_ack(delivery_tag=method.delivery_tag)

                raw_inference = json.loads(
                    body.decode('utf-8').replace('&46;', '.'))

                print_with_time(
                    'INFO: Started processing raw inference: {0}'.format(
                        raw_inference))
                received_statement = create_learner_inference_log_xapi(
                    verb_id="https://w3id.org/xapi/dod-isd/verbs/received",
                    verb_en_name="received",
                    activity_id="insertIPAddr/receive-mastery-probability",
                    activity_en_name="Receive Mastery Probability",
                    obj_extensions={
                        "insertIPAddr/learner-inferences/log-data":
                        raw_inference
                    },
                    profile_id="https://w3id.org/xapi/dod-isd/v1.0")
                if Config.LOG_TO_LRS:
                    log_to_lrs(received_statement)
                if Config.LOG_XAPI_TO_FILE:
                    print_with_time("[XAPI LOG]: {}".format(
                        json.dumps(received_statement)))

                learner_inferences = process_raw_inference(raw_inference)

                print_with_time(
                    'INFO: Result of processing raw inference: {0}'.format(
                        learner_inferences))
                published_statement = create_learner_inference_log_xapi(
                    verb_id="https://w3id.org/xapi/dod-isd/verbs/published",
                    verb_en_name="published",
                    activity_id="insertIPAddr/publish-learner-inference",
                    activity_en_name="Publish Learner Inference",
                    obj_extensions={
                        "insertIPAddr/learner-inferences/log-data":
                        learner_inferences
                    },
                    profile_id="https://w3id.org/xapi/dod-isd/v1.0")
                if Config.LOG_TO_LRS:
                    log_to_lrs(published_statement)
                if Config.LOG_XAPI_TO_FILE:
                    print_with_time("[XAPI LOG]: {}".format(
                        json.dumps(published_statement)))

                if learner_inferences:  # Excludes empty dicts, and None
                    outputPublisher.publish(json.dumps(learner_inferences))
예제 #7
0
def init_daemon():
    with daemon.DaemonContext(stdout=sys.stdout,
                              stderr=sys.stdout,
                              working_directory=dir_path,
                              signal_map={
                                  signal.SIGTERM: shutdown,
                                  signal.SIGABRT: shutdown
                              },
                              pidfile=daemon.pidfile.PIDLockFile(dir_path +
                                                                 '/lock.pid')):

        experiences_threads = list()
        inferences_threads = list()
        try:
            # If issues when testing, look into:
            # https://stackoverflow.com/questions/24510310/consume-multiple-queues-in-python-pika?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
            experiences_threads = [
                threading.Thread(target=process_experiences)
                for i in range(Config.EXPERIENCES_THREADS)
            ]
            for t in experiences_threads:
                t.start()

            inferences_threads = [
                threading.Thread(target=process_raw_inferences)
                for i in range(Config.INFERENCES_THREADS)
            ]
            for t in inferences_threads:
                t.start()

        except KeyboardInterrupt:
            print_with_time('Closing on response to KeyboardInterrupt...')
            for t in experiences_threads:
                t.join(30)
            for t in inferences_threads:
                t.join(30)
예제 #8
0
 def experience_callback_func(ch, method, properties, body):
     ch.basic_ack(delivery_tag=method.delivery_tag)
     statement = json.loads(body.decode('utf-8').replace('&46;', '.'))
     print_with_time('INFO: xAPI Statement received: {0}; verb: {1}'.format(
         statement['id'], statement['verb']['id']))
     process_experience(statement)
예제 #9
0
def process_raw_inferences():
    """Consume Raw Inferences AMQP channel until KeyboardInterrupt detected."""
    while True:
        try:
            print_with_time(
                'INFO: Connecting to Raw Inferences AMQP channel...')
            credentials = pika.PlainCredentials(
                username=Config.AMQP.Inferences.AMQP_USR,
                password=Config.AMQP.Inferences.AMQP_PWD)
            connection = None
            connection = pika.BlockingConnection(
                pika.ConnectionParameters(
                    host=Config.AMQP.Inferences.AMQP_HOST,
                    port=Config.AMQP.Inferences.AMQP_PORT,
                    credentials=credentials,
                    connection_attempts=Config.AMQP.Inferences.
                    CONNECTION_ATTEMPTS))
            input_channel = connection.channel()

            input_channel.exchange_declare(
                exchange=Config.AMQP.Inferences.RAW_EXCHANGE_NAME,
                exchange_type='fanout')
            input_channel.queue_declare(
                queue=Config.AMQP.Inferences.RAW_QUEUE_NAME, durable=True)
            input_channel.queue_bind(
                exchange=Config.AMQP.Inferences.RAW_EXCHANGE_NAME,
                queue=Config.AMQP.Inferences.RAW_QUEUE_NAME)

            outputPublisher = FanoutPublisher(
                Config.AMQP.Inferences.AMQP_HOST,
                Config.AMQP.Inferences.AMQP_USR,
                Config.AMQP.Inferences.AMQP_PWD,
                Config.AMQP.Inferences.DECONFLICTED_EXCHANGE_NAME)

            print_with_time('INFO: Connected to Raw Inferences AMQP channel.')

            def inference_callback_func(ch, method, properties, body):
                ch.basic_ack(delivery_tag=method.delivery_tag)

                raw_inference = json.loads(
                    body.decode('utf-8').replace('&46;', '.'))

                print_with_time(
                    'INFO: Started processing raw inference: {0}'.format(
                        raw_inference))
                received_statement = create_learner_inference_log_xapi(
                    verb_id="https://w3id.org/xapi/dod-isd/verbs/received",
                    verb_en_name="received",
                    activity_id="insertIPAddr/receive-mastery-probability",
                    activity_en_name="Receive Mastery Probability",
                    obj_extensions={
                        "insertIPAddr/learner-inferences/log-data":
                        raw_inference
                    },
                    profile_id="https://w3id.org/xapi/dod-isd/v1.0")
                if Config.LOG_TO_LRS:
                    log_to_lrs(received_statement)
                if Config.LOG_XAPI_TO_FILE:
                    print_with_time("[XAPI LOG]: {}".format(
                        json.dumps(received_statement)))

                learner_inferences = process_raw_inference(raw_inference)

                print_with_time(
                    'INFO: Result of processing raw inference: {0}'.format(
                        learner_inferences))
                published_statement = create_learner_inference_log_xapi(
                    verb_id="https://w3id.org/xapi/dod-isd/verbs/published",
                    verb_en_name="published",
                    activity_id="insertIPAddr/publish-learner-inference",
                    activity_en_name="Publish Learner Inference",
                    obj_extensions={
                        "insertIPAddr/learner-inferences/log-data":
                        learner_inferences
                    },
                    profile_id="https://w3id.org/xapi/dod-isd/v1.0")
                if Config.LOG_TO_LRS:
                    log_to_lrs(published_statement)
                if Config.LOG_XAPI_TO_FILE:
                    print_with_time("[XAPI LOG]: {}".format(
                        json.dumps(published_statement)))

                if learner_inferences:  # Excludes empty dicts, and None
                    outputPublisher.publish(json.dumps(learner_inferences))

            input_channel.basic_qos(prefetch_count=1)
            input_channel.basic_consume(
                inference_callback_func,
                queue=Config.AMQP.Inferences.RAW_QUEUE_NAME)
            print_with_time(
                'INFO: Started listening for messages on Raw Inferences Queue...'
            )
            input_channel.start_consuming()
        except KeyboardInterrupt:
            print_with_time(
                'WARN: KeyboardInterrupt detected while consuming Raw Inferences. Stopping consumption...'
            )
            raise  # Should stop consuming messages at this point.
        except Exception as e:
            print_with_time(
                'ERROR: Exception when consuming Raw Inferences AMQP channel: {0}'
                .format(e))
            print(traceback.format_exc())
            pass
        finally:
            # Release resources before terminating thread or reconnecting.
            if input_channel and input_channel is not None:
                input_channel.stop_consuming()
            if connection and connection is not None:
                connection.close()
예제 #10
0
                t.join(30)
            for t in inferences_threads:
                t.join(30)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Start the evidence mapper AMQP listener.')
    parser.add_argument('run_command',
                        type=str,
                        choices=['start', 'stop', 'restart'])
    options = parser.parse_args()

    if options.run_command == 'start':
        try:
            print_with_time('Starting service...')
            init_daemon()
        except lockfile.AlreadyLocked:
            print("Process already started. Use \'restart\' command instead.",
                  flush=True)
    elif options.run_command == 'stop':
        try:
            print_with_time('Stopping service...')
            os.kill(int(open(dir_path + '/lock.pid').read()), signal.SIGTERM)
        except FileNotFoundError:
            print_with_time(
                'File not found. Service is likely already stopped.')
        except ProcessLookupError:
            print_with_time(
                'Process was not found. The lock file still exists, but the service does not. Deleting lock file...'
            )
예제 #11
0
def process_mastery_prob(mastery_prob):
    """ Process a new instance of MasteryProbability that is passed as parameter.

        This function loads user data from store, updates mastery information and save the learner back into store.
        If the new information triggered a change in mastery estimates, then this function outputs a dictionary
        that corresponds to type LearnerInference, which learner inference service will communicate in real time.
    """
    # Get learner from store
    # TODO: For performance, load only necessary fields.

    if not _useful_mastery_prob(mastery_prob):
        return {}

    mastery_prob['competencyId'] = sanitize_competency_id(
        mastery_prob['competencyId'])

    learner_id = get_id_from_learnerId(mastery_prob['learnerId'])
    store = Config.STORE_CLIENT(Config.STORE_HOST, Config.STORE_PORT,
                                Config.STORE_DB)

    learner_dict, _ = store.lock_learner(learner_id)

    # NOTE: this will be the case if we fail to lock the learner within the time limit
    if learner_dict is None:
        return {}

    try:
        if mastery_prob[
                'source'] == Config.MASTERY_ESTIMATOR_COMPETENCY_COUNTS:
            _increase_competency_attempt_counter(learner_dict, mastery_prob)

        if mastery_prob['source'] in Config.VALID_ESTIMATORS:
            if 'masteryProbabilities' not in learner_dict or learner_dict[
                    'masteryProbabilities'] == None:
                learner_dict['masteryProbabilities'] = []

            # TODO: Apparently only reason for not doing learner_dict['masteryProbabilities'].append(mastery_prob) is diff in
            #       the name of one field. That could be corrected later, for not having two separate data models just because
            #       of this diff.
            latest_mastery_prob = {
                '@context': mastery_prob.get('@context', None),
                '@type': mastery_prob['@type'],
                'competencyId': mastery_prob['competencyId'],
                'probability': mastery_prob['masteryProbability'],
                'timestamp': mastery_prob['timestamp'],
                'source': mastery_prob['source']
            }
            learner_dict['masteryProbabilities'].append(latest_mastery_prob)

            newEstimate, learner_dict = _get_mastery_estimate(
                learner_dict, latest_mastery_prob)

            # Save updates for mastery probabilities and competency counter.
            learner_dict = recursivelyConvertTimestampsInDict(learner_dict)
            store.update_learner(learner_id, learner_dict, lockedLearner=True)

            learnerInference = {
                '@context': mastery_prob.get('@context', None),
                '@type': Config.AMQP_LEARNER_INFERENCE_TYPE,
                'timestamp': mastery_prob['timestamp'],
                'learnerId': learner_id,
                'masteryEstimates': newEstimate
            }

            return learnerInference
    except Exception as e:
        print_with_time(
            "ERROR: Exception while processing mastery probability {0}: {1}".
            format(str(mastery_prob)), str(e))
        raise e
    finally:
        store.unlock_learner(learner_id)

    # No processing occurred.
    return {}
예제 #12
0
def _increase_activity_attempt_counters(terminated_activities):
    """The input is a dictionary that contains fields 'learnerId','activityId', 'timestamp'."""
    store = Config.STORE_CLIENT(Config.STORE_HOST, Config.STORE_PORT,
                                Config.STORE_DB)
    for activity_attempt in terminated_activities:
        learner_id = activity_attempt['learnerId']
        activity_id = activity_attempt['activityId']

        try:
            attempt_counters = store.get_learner_property(
                learner_id, 'activityAttemptCounters')  #Might return None.
            for i, attempt in enumerate(attempt_counters):
                if type(attempt['lastAttemptDateTime']) == datetime.datetime:
                    attempt_counters[i]['lastAttemptDateTime'] = attempt[
                        'lastAttemptDateTime'].replace(
                            tzinfo=datetime.timezone.utc).isoformat()
        except ValueError as e:
            # Apparently learner is not in DB. That shouldn't happen, so we are logging this as error and
            # move on to the next update.
            print_with_time(
                'ERROR: Attempted to update activityAttemptCounters for learner with Id {0} but impossible to retrieve learner data from DB.'
                .format(learner_id))
            continue

        if attempt_counters is not None:
            attempt_counter = next(
                (x
                 for x in attempt_counters if x['activityId'] == activity_id),
                None)
        else:
            attempt_counters = []
            attempt_counter = None

        if attempt_counter:
            attempt_counter['attempts'] = attempt_counter['attempts'] + 1
            attempt_counter['lastAttemptDateTime'] = activity_attempt[
                'terminatedTimestamp']
        else:
            attempt_counter = {
                "@context": "tla-declarations.jsonld",
                "@type": "ActivityAttemptCounter",
                "activityId": activity_id,
                "attempts": 1,
                "lastAttemptDateTime": activity_attempt['terminatedTimestamp']
            }

            attempt_counters.append(attempt_counter)
        dikt = {'activityAttemptCounters': attempt_counters}
        dikt = recursivelyConvertTimestampsInDict(dikt)
        store.update_learner(learner_id, dikt, learnerEtag=False)

        print_with_time(
            'INFO: Set activity attempt counter to {0} for user {1}, activity {2}'
            .format(str(attempt_counter['attempts']), learner_id, activity_id))
        log_data = {
            "learnerId": learner_id,
            "activityId": activity_id,
            "attemptCounter": attempt_counter['attempts']
        }
        statement = create_learner_inference_log_xapi(
            verb_id="https://w3id.org/xapi/dod-isd/verbs/saved",
            verb_en_name="saved",
            activity_id="insertIPAddr/save-activity-attempt-counter",
            activity_en_name="Save Activity Attempt Counter",
            obj_extensions={
                "insertIPAddr/learner-inferences/log-data": log_data
            },
            profile_id="https://w3id.org/xapi/dod-isd/v1.0")
        if Config.LOG_TO_LRS:
            log_to_lrs(statement)
        if Config.LOG_XAPI_TO_FILE:
            print_with_time("[XAPI LOG]: {}".format(json.dumps(statement)))