예제 #1
0
def debug_log(text):
    '''
    Helper function to help us trace our code.

    We print a time stamp, a stack trace, and a /short/ summary of
    what's going on.

    This is not intended for programmatic debugging. We do change
    format regularly (and you should feel free to do so too -- for
    example, on narrower terminals, a `\n\t` can help)
    '''
    stack = inspect.stack()
    stack_trace = "{s1}/{s2}/{s3}".format(
        s1=stack[1].function,
        s2=stack[2].function,
        s3=stack[3].function,
    )

    message = "{time}: {st:60}\t{body}".format(
        time=datetime.datetime.utcnow().isoformat(), st=stack_trace, body=text)

    # Flip here to print / not print debug messages
    if DEBUG:
        print(message)

    # Flip here to save / not save debug messages
    # Ideally, we'd like to log these somewhere which won't cause cascading failures.
    # If we e.g. have errors every 100ms, we don't want to create millions of debug files.
    # There are services which handle this pretty well, I believe
    if DEBUG:
        debug_fp = open(paths.logs("debug.log"), "a")
        debug_fp.write(message)
        debug_fp.write("\n")
        debug_fp.close()
예제 #2
0
    async def pipeline(parsed_message):
        '''
        And this is the pipeline itself. It takes messages, processes them,
        and informs consumers when there is new data.
        '''
        debug_log("Processing PubSub message {event} from {source}".format(
            event=parsed_message["client"]["event"], source=client_source
        ))

        # Try to run a message through all event processors.
        #
        # To do: Finer-grained exception handling. Right now, if we break, we don't run
        # through remaining processors.
        try:
            print(event_processors)
            processed_analytics = [await ep(parsed_message) for ep in event_processors]
        except Exception as e:
            traceback.print_exc()
            filename = paths.logs("critical-error-{ts}-{rnd}.tb".format(
                ts=datetime.datetime.now().isoformat(),
                rnd=uuid.uuid4().hex
            ))
            fp = open(filename, "w")
            fp.write(json.dumps(parsed_message, sort_keys=True, indent=2))
            fp.write("\nTraceback:\n")
            fp.write(traceback.format_exc())
            fp.close()
            if settings.RUN_MODE == settings.RUN_MODES.DEV:
                raise
        if processed_analytics is None:
            debug_log("No updates")
            return []
        # Transitional code.
        #
        # We'd eventually like to return only lists of outgoing
        # events. No event means we send back [] For now, our
        # modules return `None` to do nothing, events, or lists of
        # events.
        #
        # That's a major refactor away. We'd like to pass in lists /
        # iterators of incoming events so we can handle microbatches,
        # and generate lists of outgoing events too.
        if not isinstance(processed_analytics, list):
            print("FIXME: Should return list")
            processed_analytics = [processed_analytics]
        return processed_analytics
예제 #3
0
def log_ajax(url, resp_json, request):
    '''
    This is primarily used to log the responses of AJAX requests made
    TO Google and similar providers. This helps us understand the
    context of classroom activity, debug, and recover from failures
    '''
    payload = {
        'user': request['user'],
        'url': url,
        'response': resp_json,
        'timestamp': datetime.datetime.utcnow().isoformat()
    }
    encoded_payload = encode_json_block(payload)
    payload_hash = secure_hash(encoded_payload.encode('utf-8'))
    filename = AJAX_FILENAME_TEMPLATE.format(
        directory=paths.logs("ajax"),
        time=datetime.datetime.utcnow().isoformat(),
        payload_hash=payload_hash)
    with open(filename, "w") as ajax_log_fp:
        ajax_log_fp.write(encoded_payload)
예제 #4
0
def log_event(event, filename=None, preencoded=False, timestamp=False):
    '''
    This isn't done, but it's how we log events for now.
    '''
    if filename is None:
        log_file_fp = mainlog
    elif filename in files:
        log_file_fp = files[filename]
    else:
        log_file_fp = open(paths.logs("" + filename + ".log"), "ab", 0)
        files[filename] = log_file_fp

    if not preencoded:
        event = json.dumps(event, sort_keys=True)
    log_file_fp.write(event.encode('utf-8'))
    if timestamp:
        log_file_fp.write("\t".encode('utf-8'))
        log_file_fp.write(
            datetime.datetime.utcnow().isoformat().encode('utf-8'))
    log_file_fp.write("\n".encode('utf-8'))
    log_file_fp.flush()
예제 #5
0
a dozen analyses, we'll want to know those happened and what those
were, but we might not keep terabytes of data around (just enough to
redo those analyses).
'''

import datetime
import inspect
import json
import hashlib

import learning_observer.filesystem_state

import learning_observer.paths as paths
import learning_observer.settings as settings

mainlog = open(paths.logs("main_log.json"), "ab", 0)
files = {}

# Do we make files for exceptions? Do we print extra stuff on the console?
#
# On deployed systems, this can make a mess. On dev systems, this is super-helpful
DEBUG = settings.RUN_MODE == settings.RUN_MODES.DEV or 'logging' in settings.settings[
    'config']['debug']


def encode_json_line(line):
    '''
    For encoding short data, such as an event.

    We use a helper function so we have the same encoding
    everywhere. Our primary goal is replicability -- if
예제 #6
0
import hashlib
import os
import os.path
import shutil
import sys

import learning_observer.paths as paths

import learning_observer.module_loader as module_loader

# These are directories we'd like created on startup. At the moment,
# they're for different types of log files.
directories = {
    'logs': {
        'path': paths.logs()
    },
    'startup logs': {
        'path': paths.logs('startup')
    },
    'AJAX logs': {
        'path': paths.logs('ajax')
    }
}

for d in directories:
    dirpath = directories[d]['path']
    if not os.path.exists(dirpath):
        os.mkdir(dirpath)
        print("Made {dirname} directory in {dirpath}".format(dirname=d,
                                                             dirpath=dirpath))