Exemplo n.º 1
0
def save_attachments( msg, dir ):
    #connect to log file for user
    logger = mylogger.attach_to_logfile( os.path.join(dir,'log.txt' ), level = log_level )

    def keep_older_versions_of_this_file( filename ):

        def change_name_version(newfilename,filename,changes=0):
            #use just the filename (without path) for logging:
            log_nfn = os.path.split(newfilename)[1]
            log_fn = os.path.split(filename)[1]

            logger.debug("Entering change_name_version %s <- %s " % (repr(log_nfn),repr(log_fn)))
            try:
                version = int(newfilename.split('.')[-1])
                root = ".".join(newfilename.split('.')[0:-1])
            except ValueError,msg:
                logger.error( "problem with filename %s in increase_counter" % repr(filename))
                raise ValueError,msg

            if os.path.exists(newfilename):
                logger.debug("file: %s exists -- recursive retry!" % repr(log_nfn))
                changes = change_name_version(root+'.'+str(version+1),root+'.'+str(version),changes)
            else:
                logger.debug("Found last file: %s" % repr(log_nfn))

            logger.debug( "About to rename %s to %s" % (repr(log_fn),repr(log_nfn)))
            os.rename(filename,newfilename)
            return changes+1

        changes = None
        if os.path.exists(filename):
            changes = change_name_version(filename+'.1',filename)
            return changes
Exemplo n.º 2
0
def startup(log_level=logging.INFO):
    log_global = mylogger.attach_to_logfile(conf.subtest_logfile,
                                            level=log_level)
    process_emails.log_global = log_global  #let functions in that module use the same logger
    enqueue_outgoing_mails.log_global = log_global
    enqueue_outgoing_mails.conf = conf
    post_test_analysis.conf = conf

    for dirname in (conf.subtest_testcodedir,
                    conf.subtest_queue, conf.subtest_manual,
                    os.path.split(conf.subtest_logfile)[0]):
        try:
            os.makedirs(dirname)
        except OSError as e:
            if e.errno == errno.EEXIST:
                pass
            else:
                raise
        except:
            log_global.critical(
                "Unexpected error making directory {0}".format(dirname))
            raise

    log_global.debug(40 * "=")
    log_global.debug("Starting program up, module={}".format(Modulecode))
    log_global.debug("Configfile = {}".format(conf.__file__))
    log_global.debug(40 * "=")
    log_global.debug("============= Starting ===========")

    log_global.debug("Checking testing scripts are all in place:")
    #Check that testing codes are available
    for labname in conf.subtest_tests.keys():
        testfilename = conf.subtest_tests[labname]
        testfilepath = os.path.join(conf.subtest_testcodedir, labname,
                                    testfilename)
        log_global.debug("Checking labname={}, looking for {} at {}".format(
            labname, testfilename, testfilepath))
        assert os.path.exists(testfilepath), "Test file '%s' for submission '%s' is missing" \
               % (testfilepath,labname)

    # make sure PYTHONPATH is set to new modules and packages, such as
    # ctestlib
    try:
        import tetepy
    except ImportError, msg0:
        msg = "We can't import 'tetepy'. \n\nIt shoud be installed via\n" + \
            "sudo sh ~/bin/install-python-libs.sh\n" +\
            "Original error: " + str(msg0)
        print(msg)
        log_global.error(msg)
        raise ImportError(msg)
Exemplo n.º 3
0
def startup(log_level=logging.INFO):
    # Set up logging
    log_global = mylogger.attach_to_logfile(conf.outgoingmail_logfile, level = log_level)
    enqueue_outgoing_mails.log_global=log_global #let functions in that module use the same logger
    enqueue_outgoing_mails.conf=conf

    # Ensure the existence of needed directories
    for dirname in (os.path.split(conf.outgoingmail_queue)[0],
                    os.path.split(conf.outgoingmail_logfile)[0],
                    os.path.split(conf.smtp_error_file)[0]):
        try:
            os.makedirs(dirname)
        except OSError as e:
            if e.errno == errno.EEXIST:
                pass
            else:
                raise
        except:
            log_global.critical("Unexpected error making directory {0}".format(dirname))
            raise

    # Set up SMTP error statistics handling.
    if not (os.path.exists(conf.smtp_error_file)):
        with open(conf.smtp_error_file,'w') as f:
            fcntl.flock(f.fileno(), fcntl.LOCK_EX)
            cPickle.dump({}, f)
            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
        record_smtp_socket_success()

    # Log startup
    log_global.debug(40*"=")
    log_global.debug("Starting program up")
    log_global.debug(40*"=")
    log_global.debug("============= Starting ===========")

    return log_global
Exemplo n.º 4
0
def create_summary_text_and_marks(report_data, studentlabdir, logglobal):
    """
    Parameters
    ----------

      report_data : list of triplets
         Report data has the following structure: there is one triplet (i.e.
         a tuple with three entries) for every test_* function. In each triplet
         we have (A, B, C):
             A is a bool, and True if the test has passed and False if failed
             B is the name of the test, with the filename of the test*.py file
               (see example below)
             C is the stdout from running py.test.
        
      studentlabdir : str
         Path to directory in which files are.
         
      logglobal : logging.Logger
         global logger object
         
        
    Returns
    -------

       summary_text : str
          Major chunk of feedback email, comprising of 
          - overview of passed exercises and marks
          - total mark for assignment
          - more detailed feedback for each exercise
          
       passfailtotal : triplet of floats (A, B, C)
          
          - returns the obtained marks A out of the available C.
            B is the failed fraction. Historically, these were 
            tests and integer numbers, but we now also allow fractions,
            and could in principle normalise so that C = 100%.
            One of the three parameters is redundant - again for historical 
            reasons.
            
        
    Example
    -------

    Here is an example for report_data::
    
        [(True, 'test_training3_c.py::test_hello_world_compilation', None), 
         (False, 'test_training3_c.py::test_output_exit_code_is_zero', 
          "def test_output_exit_code_is_zero():\n        
          ct.attempt_compilation_and_execution(cache)\n    \n        
          #Did the program compile?\n>       
          assert cache['compilation-success'] == True\nE       
          assert False == True\n\ntest_training3_c.py:58: AssertionError\n"), 
          (False, 'test_training3_c.py::test_output_contains_hello_world', 
           "def test_output_contains_hello_world():\n    \n        
           # attempt to compile and execute code, store results in cache\n        
           ct.attempt_compilation_and_execution(cache)\n    \n        
           # Did the program compile?\n>       
           assert cache['compilation-success'] == True\nE       
           assert False == True\n\ntest_training3_c.py:70: AssertionError\n")]
    """

    r = report_data
    assert len(r) > 0, "Didn't get any data to summarise"

    # create local logger
    log = mylogger.attach_to_logfile(os.path.join(studentlabdir, 'log.txt'),
                                     level=logging.DEBUG)

    # Extract name of file with tests
    testfilename = r[0][1].split('::')[0]

    log.info("Processing {} tests from file {} in {}".format(
        len(r), testfilename, studentlabdir))

    # Data structure to complete as we parse the test results
    reports = []

    # Gather what tests we need to analyse
    for line in r:
        d = {
            'name': None,  # Name of test_function 
            #   i.e. test_hello_world_compilation
            'summary-line': None,  # one-line summary of performance
            'feedback-item': None,  # more detailed feedback
            'mark': 0.,  # actual mark
            'mark-max': 0.,  # maximum mark available
            'pass': None
        }  # test passed? [not sure we need this]

        # store all the data we have
        try:
            d['name'] = line[1].split('::')[1]  # name of the test function
        except IndexError, msg:
            msg2 = "This Error means typically that the testing script"+\
                "has failed import the student data: \nreport-data = {}".format(report_data) + \
                "studentlabdir = {}\n\nOriginal={}".format(studentlabdir,
                msg) + "\nline = '{}'".format(line)
            logglobal.info(msg2)
            print(msg2)
            # common reasons are syntax errors or semantic errors that
            # are discovered / triggered when the system imports the file
            d['name'] = testfilename
            d['summary-line'] = "import failed"

        d['pass'] = line[0]
        d['raw-feedback'] = line[2]
        d['testfilename'] = testfilename
        d['studentlabdir'] = studentlabdir
        d['lab-logger'] = log

        # can analyise further, later
        reports.append(d)
Exemplo n.º 5
0
def submission_reply_report(student_dir, attachments, lab_name):
    """ Returns a tuple (valid_attachments, reply) where:

    valid_attachments = True if all the files marked 'mandatory' are
    present in the attachments passed in, False otherwise.

    reply is a string containing a report that gives details of which
    files were saved from the student submission and which files were
    already stored in student_dir."""

    valid_attachments = False
    report=[]

    log_local = mylogger.attach_to_logfile( os.path.join( student_dir,'log.txt' ), level = logging.DEBUG )

    log_global.debug("Attachment keys: %s" % repr(attachments.keys()))

    files_by_type = analyze_filenames(assignment_file_map(lab_name), attachments.keys(), log_global)

    log_global.debug("files_by_type: %s" % repr(files_by_type))

    (valid_attachments, missing_required_files) = check_required_files(assignment_file_map(lab_name), attachments.keys())
    nr_files=reduce(lambda sf,x:sf+len(files_by_type[x]),files_by_type.keys(),0)

    log_global.info("attachments: %s files_by_type: %s"%(repr(attachments.keys()),repr(files_by_type)))

    # All required files were extracted.
    if (nr_files > 0 and valid_attachments == True):
        report.append("### Files found in this submission for assessment '%s' ###\n\n" % lab_name)

        ftypes=files_by_type.keys()
        ftypes.sort()
        for ftype in ftypes:
            files=files_by_type[ftype]
            fstr=string.join(files,"\n   ")
            report.append(" %-12s:\n   %s\n" % (ftype,fstr))

        report.append("\n")

    # Some files were extracted but not all the required ones (or
    # files not correctly named, ...)
    elif (len(missing_required_files) > 0):
        report.append("### WARNING: this submission will not be tested.\n\n")
        report.append("### Not all the required files were extracted from your email.\n")
        report.append("### Please check that you have attached all the required files.\n\n")

        report.append("### Files found in this submission for assessment '%s':\n\n" % lab_name)

        ftypes=files_by_type.keys()
        ftypes.sort()
        for ftype in ftypes:
            files=files_by_type[ftype]
            fstr=string.join(files,"\n   ")
            report.append(" %-12s:\n   %s\n" % (ftype,fstr))

        report.append("\n")

        report.append("### Required files not found in this submission:\n\n")
        for filename in missing_required_files:
            report.append("   %-12s\n" % filename)
        report.append("\n\n")

    # No files extracted.
    else:
        report.append("WARNING: no files have been extracted from your email.\n")
        report.append("         (Maybe you have forgotten to attach them?)\n\n")

    #get list of files in student_dir
    submitted_files = os.listdir(os.path.join(student_dir,lab_name))

    #remove log files from these and separate into known and unknown files
    submitted_by_type = analyze_filenames(assignment_file_map(lab_name), submitted_files, log_global)

    report.append("-----------------------------------------------------\n\n")
    report.append("In summary, you have submitted these files for assessment '%s'.\n" % lab_name)
    report.append("Please note that the files listed below may be from previous submission\n")
    report.append("attempts, where these are allowed:\n\n")

    report.append(submitted_files_report(lab_name, submitted_by_type))

    report.append("\n\n == IMPORTANT: Keep this email as a receipt of your submission ==\n")

    return (valid_attachments, string.join(report,""))
Exemplo n.º 6
0
            row = [student, student_name, student_group] + marks + [late_text]
        else:
            row = [student, student_name, student_group] + marks

        print row
        table.append(row)

    f = open(out_filename, 'w')
    for row in table:
        f.write(",".join(map(str, row)) + '\n')
    f.close()


if __name__ == "__main__":
    # Set up logging
    log_global = mylogger.attach_to_logfile(conf.report_marks_logfile,
                                            level=conf.log_level)
    process_emails.log_global = log_global  #let functions in that module use the same logger
    log_global.info("report_marks.py starting up.")

    # Read student list, returns dictionary:
    # {'stud1@domain1':('stud1 name','stud1_group'), ...}
    students = csvio.readcsv_group(conf.Studentlistcsvfile)

    # Groups defined in config file.  Structure is a dictionary:
    # {'group1': {'lab3': '20 Nov 2009 09:00', 'lab4': '27 Nov 2009 09:00'},
    #  'group2': {'lab3': '22 Nov 2009 09:00', 'lab4': '29 Nov 2009 09:00'}}
    groups = conf.deadline_groups

    # Email admin if students are found assigned to deadline groups
    # that are not defined in the config.  ignored_students lists
    # those that had invalid groups and were removed.
Exemplo n.º 7
0
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import csv
import logging, mylogger, os

try:
    import pwd
except:
    raise ImportError, "Couldn't import pwd -- only available on unix systems"
Modulecode = pwd.getpwuid(os.getuid()).pw_name.upper()

conf = __import__('config_' + Modulecode.lower())

log_global = mylogger.attach_to_logfile(conf.Logfile, level=conf.log_level)


def readcsv(filename):
    """returns dictionary with student emails (without @soton.ac.uk)
    as keys and real names as value

    """
    students = {}
    stream = {}  # dictionary data for stream
    f = open(filename, 'rb')
    csvdata = csv.reader(f)
    for item in csvdata:
        try:
            (name, email, stream) = item
        except ValueError: