Esempio n. 1
0
 def do_run(self):
     try:
         self.detail = []
         covered = set()
         for obj in CsvSchema.LoadCSV(self.schema, self.csvdata):
             # each record should be sent to all check classes, to see
             # what classes it covered
             for i, c in enumerate(self.check_classes):
                 if c(obj):
                     covered.add(i)
         # total up score by len(covered) / total_classes
         self.score = 100.0 * len(covered) / len(self.check_classes)
         self.brief = lazy_gettext(
             '%(rate).2f%% rules (%(cover)s out of %(total)s) covered',
             cover=len(covered), total=len(self.check_classes),
             rate=self.score
         )
         # build more detailed report
         for i, c in enumerate(self.check_classes):
             if i in covered:
                 self.detail.append(lazy_gettext(
                     'COVERED: %(checker)s',
                     checker=self.getDescription(c)
                 ))
             else:
                 self.detail.append(lazy_gettext(
                     'NOT COVERED: %(checker)s',
                     checker=self.getDescription(c)
                 ))
     except KeyError, ex:
         raise ScorerFailure(
             brief=lazy_gettext('CSV data does not match schema.'),
             detail=[ex.args[0]]
         )
Esempio n. 2
0
    def compile(self):
        """Validate the user submitted url address at compile stage.

        The url address will be tested with the configured regex patterns
        loaded from :attr:`BaseHost.compiler_params`.
        Refer to :ref:`hwnetapi` for more details about the rules.
        """
        if self.config['urlrule']:
            p = re.compile(self.config['urlrule'])
            if not p.match(self.config['remote_addr']):
                raise NetApiAddressRejected(compile_error=lazy_gettext(
                    'Address "%(url)s" does not match pattern "%(rule)s"',
                    url=self.config['remote_addr'],
                    rule=self.config['urlrule']))
        if self.config['iprule']:
            domain = urllib.splitport(
                urllib.splithost(
                    urllib.splittype(self.config['remote_addr'])[1])[0])[0]
            # get ip from domain
            try:
                ipaddr = socket.gethostbyname(domain)
            except Exception:
                logger.exception('Could not get ip address for domain "%s".' %
                                 domain)
                ipaddr = '<invalid>'
            # ip not match, skip
            p = re.compile(self.config['iprule'])
            if not p.match(ipaddr):
                raise NetApiAddressRejected(compile_error=lazy_gettext(
                    'IP address "%(ip)s" does not match pattern "%(rule)s"',
                    ip=ipaddr,
                    rule=self.config['iprule']))
Esempio n. 3
0
 def do_run(self):
     try:
         collector = SchemaResultCollector()
         self.schema.check(collector)
         self.score = 100.0 * (collector.total - collector.error) / float(
             collector.total)
         self.brief = lazy_gettext(
             '%(rate).2f%% check points (%(success)d out of %(total)d) '
             'passed',
             rate=self.score,
             total=collector.total,
             success=collector.total - collector.error,
         )
         self.detail = collector.errors
     except:
         # Why do we catch all exceptions, not the exceptions derived
         # from :class:`Exception` here?  Because :class:`ObjSchemaScorer`
         # is usually considered not a `harmful` scorer, and the error
         # messages may not be hidden to the students.
         #
         # If we do not catch all the exceptions, then the student
         # may submit a code that `raises` something, where the evaluation
         # script may be revealed.
         raise ScorerFailure(brief=lazy_gettext(
             'Object Structure Scorer exited with error.'),
                             detail=[])
Esempio n. 4
0
 def do_run(self):
     try:
         collector = SchemaResultCollector()
         self.schema.check(collector)
         self.score = 100.0 * (collector.total - collector.error) / float(
             collector.total)
         self.brief = lazy_gettext(
             '%(rate).2f%% check points (%(success)d out of %(total)d) '
             'passed',
             rate=self.score,
             total=collector.total,
             success=collector.total - collector.error,
         )
         self.detail = collector.errors
     except:
         # Why do we catch all exceptions, not the exceptions derived
         # from :class:`Exception` here?  Because :class:`ObjSchemaScorer`
         # is usually considered not a `harmful` scorer, and the error
         # messages may not be hidden to the students.
         #
         # If we do not catch all the exceptions, then the student
         # may submit a code that `raises` something, where the evaluation
         # script may be revealed.
         raise ScorerFailure(
             brief=lazy_gettext(
                 'Object Structure Scorer exited with error.'),
             detail=[]
         )
Esempio n. 5
0
    def compile(self):
        """Validate the user submitted url address at compile stage.

        The url address will be tested with the configured regex patterns
        loaded from :attr:`BaseHost.compiler_params`.
        Refer to :ref:`hwnetapi` for more details about the rules.
        """
        if self.config['urlrule']:
            p = re.compile(self.config['urlrule'])
            if not p.match(self.config['remote_addr']):
                raise NetApiAddressRejected(compile_error=lazy_gettext(
                    'Address "%(url)s" does not match pattern "%(rule)s"',
                    url=self.config['remote_addr'], rule=self.config['urlrule']
                ))
        if self.config['iprule']:
            domain = urllib.splitport(
                urllib.splithost(
                    urllib.splittype(self.config['remote_addr'])[1]
                )[0]
            )[0]
            # get ip from domain
            try:
                ipaddr = socket.gethostbyname(domain)
            except Exception:
                logger.exception(
                    'Could not get ip address for domain "%s".' % domain)
                ipaddr = '<invalid>'
            # ip not match, skip
            p = re.compile(self.config['iprule'])
            if not p.match(ipaddr):
                raise NetApiAddressRejected(compile_error=lazy_gettext(
                    'IP address "%(ip)s" does not match pattern "%(rule)s"',
                    ip=ipaddr, rule=self.config['iprule']
                ))
Esempio n. 6
0
    def __init__(self, score):
        super(JavaScore, self).__init__(lazy_gettext("Functionality Scorer"))

        self.score = score

        self.brief = lazy_gettext("%(rate).2f%% rules (%(cover)s out of %(total)s) covered", cover=1, total=1, rate=100)
        self.detail = [
            lazy_gettext("%(rate).2f%% rules (%(cover)s out of %(total)s) covered", cover=1, total=1, rate=100)
        ]
Esempio n. 7
0
    def do_run(self):
        if type(self.filelist) == type("a"):
            ph_out = self.filelist
            self.detail = []
            warning = 0
            l = ph_out.split("\n")
            for x in l:
                if x[:6] == "[WARN]":
                    warning += 1
                    self.detail.append(x)
            self.score = 100.0 - warning * self.errcost
            if self.score < 0.0:
                self.score = 0.0
            total_file = 1
            if warning > 0:
                self.brief = lazy_gettext(
                    "%(trouble)d problem(s) found in %(file)d file(s)", trouble=warning, file=total_file
                )
            else:
                self.brief = lazy_gettext("All files passed Google code style check")
            if self.logs != None:
                self.logs.saveCodeStyle(
                    str(self.score) + "\n" + str(self.brief) + "\n" + GetTextStringList(self.detail)
                )
            return

        guide = pep8.StyleGuide()
        guide.options.show_source = True
        guide.options.report = Pep8DetailReport(guide.options)
        result = guide.check_files(self.filelist)

        # Each error consumes 1 point.
        errcount = result.count_errors()
        self.score = 100.0 - errcount * self.errcost
        if self.score < 0.0:
            self.score = 0.0

        # format the brief report
        total_file = len(self.filelist)
        if errcount > 0:
            self.brief = lazy_gettext(
                "%(trouble)d problem(s) found in %(file)d file(s)", trouble=errcount, file=total_file
            )
        else:
            self.brief = lazy_gettext("All files passed PEP8 code style check")

        # format detailed reports
        self.detail = result.build_report()

        if self.logs != None:
            self.logs.saveCodeStyle(str(self.score) + "\n" + str(self.brief) + "\n" + GetTextStringList(self.detail))
Esempio n. 8
0
 def addError(self, test, err):
     super(UnitTestScorerDetailResult, self).addError(test, err)
     self.details.append(lazy_gettext(
         'ERROR: %(test)s.\n%(error)s',
         test=self.getDescription(test),
         error=self._exc_info_to_string(err, test)
     ))
Esempio n. 9
0
 def do_run(self):
     # First, load the data, and format the error message if schema
     # is not matched.
     try:
         loaded_data = list(CsvSchema.LoadCSV(self.schema, self.csvdata))
     except KeyError, ex:
         raise ScorerFailure(brief=lazy_gettext("CSV data does not match schema."), detail=[ex.args[0]])
Esempio n. 10
0
 def __init__(self, schema, csvdata, check_classes=None):
     super(InputClassScorer, self).__init__(
         name=lazy_gettext('InputClass Scorer'),
         schema=schema,
         csvdata=csvdata,
         check_classes=check_classes,
     )
Esempio n. 11
0
 def __init__(self, schema, csvdata, check_classes=None):
     super(BoundaryValueScorer, self).__init__(
         name=lazy_gettext('BoundaryValue Scorer'),
         schema=schema,
         csvdata=csvdata,
         check_classes=check_classes,
     )
Esempio n. 12
0
 def __init__(self, schema, csvdata, check_classes=None):
     super(InputClassScorer, self).__init__(
         name=lazy_gettext('InputClass Scorer'),
         schema=schema,
         csvdata=csvdata,
         check_classes=check_classes,
     )
Esempio n. 13
0
 def addSkip(self, test, reason):
     super(UnitTestScorerDetailResult, self).addSkip(test, reason)
     self.details.append(lazy_gettext(
         'SKIP: %(test)s: %(reason)s.',
         test=self.getDescription(test),
         reason=reason
     ))
Esempio n. 14
0
 def addExpectedFailure(self, test, err):
     super(UnitTestScorerDetailResult, self).addExpectedFailure(test, err)
     self.details.append(lazy_gettext(
         'EXPECTED FAIL: %(test)s.\n%(error)s',
         test=self.getDescription(test),
         error=self._exc_info_to_string(err, test)
     ))
Esempio n. 15
0
 def __init__(self, schema, csvdata, check_classes=None):
     super(BoundaryValueScorer, self).__init__(
         name=lazy_gettext('BoundaryValue Scorer'),
         schema=schema,
         csvdata=csvdata,
         check_classes=check_classes,
     )
Esempio n. 16
0
    def do_run(self):
        if type(self.schema) == type("a"):
            ph_out = self.schema
            l = ph_out.split("\n")
            self.detail = l[:-2]
            total = int(l[-2])
            error = int(l[-1])
            self.score = 100.0 * (total - error) / float(total)
            self.brief = lazy_gettext(
                "%(rate).2f%% check points (%(success)d out of %(total)d) " "passed",
                rate=self.score,
                total=total,
                success=total - error,
            )

            if self.logs != None:
                self.logs.saveScheme(str(self.score) + "\n" + str(self.brief) + "\n" + GetTextStringList(self.detail))

            return

        try:
            collector = SchemaResultCollector()
            self.schema.check(collector)
            self.score = 100.0 * (collector.total - collector.error) / float(collector.total)
            self.brief = lazy_gettext(
                "%(rate).2f%% check points (%(success)d out of %(total)d) " "passed",
                rate=self.score,
                total=collector.total,
                success=collector.total - collector.error,
            )
            self.detail = collector.errors

            if self.logs != None:
                self.logs.saveScheme(str(self.score) + "\n" + str(self.brief) + "\n" + GetTextStringList(self.detail))

        except:
            # Why do we catch all exceptions, not the exceptions derived
            # from :class:`Exception` here?  Because :class:`ObjSchemaScorer`
            # is usually considered not a `harmful` scorer, and the error
            # messages may not be hidden to the students.
            #
            # If we do not catch all the exceptions, then the student
            # may submit a code that `raises` something, where the evaluation
            # script may be revealed.
            if self.logs != None:
                self.logs.saveScheme(lazy_gettext("Object Structure Scorer exited with error."))
            raise ScorerFailure(brief=lazy_gettext("Object Structure Scorer exited with error."), detail=[])
Esempio n. 17
0
    def __init__(self, suite, filelist, stmt_weight=0.5, branch_weight=0.5):
        super(CoverageScorer, self).__init__(lazy_gettext('Coverage Scorer'))

        self.suite = suite
        self.brief = []
        self.filelist = list(filelist)
        self.stmt_weight = stmt_weight
        self.branch_weight = branch_weight
Esempio n. 18
0
 def parseString(self, value):
     try:
         return self.fromString(value)
     except Exception:
         raise ValueError(lazy_gettext(
             'Cannot convert "%(value)s" to %(type)s.',
             value=value, type=self.__class__.__name__
         ))
Esempio n. 19
0
 def check_require(self, collector):
     """Check whether this schema is marked as `REQUIRE` but the object
     does not exist.  All the children schema will also be notified.
     """
     if self.exist_rule == SchemaExistRule.REQUIRE and not self.exist:
         collector.addError(lazy_gettext(
             '%(schema)s is required but the object does not exist or '
             'could not be loaded.',
             schema=self.get_description()
         ))
     elif self.exist_rule == SchemaExistRule.DENY and self.exist:
         collector.addError(lazy_gettext(
             '%(schema)s is denied but the object exists.',
             schema=self.get_description()
         ))
     else:
         collector.addSuccess()
Esempio n. 20
0
    def __init__(self, filelist, skipfile=None, errcost=10.0):

        super(CodeStyleScorer, self).__init__(lazy_gettext('CodeStyle Scorer'))
        skipfile = skipfile or (lambda path: False)
        is_pyfile = lambda p: (p[-3:].lower() == '.py')
        self.filelist = [p for p in filelist
                         if not skipfile(p) and is_pyfile(p)]
        self.errcost = errcost
Esempio n. 21
0
    def __init__(self, suite, filelist, stmt_weight=0.5, branch_weight=0.5):
        super(CoverageScorer, self).__init__(lazy_gettext('Coverage Scorer'))

        self.suite = suite
        self.brief = []
        self.filelist = list(filelist)
        self.stmt_weight = stmt_weight
        self.branch_weight = branch_weight
Esempio n. 22
0
def api_handin_proclog(uuid):
    """Store the process outputs for a given submission.

    This api view is usually requested after the reports of the corresponding
    submission has been stored, so it would not change either the score or
    the state of the submission.

    If the submission state is still `Pending` or `Running`, indicating that
    the reports have not been stored (probably the process exited abnormally
    before report the score), the state will be updated to `Rejected`.

    This view will compare `uuid` in POST object to the `uuid` argument.
    If they are not equal, the operation will be rejected, since it is
    likely to be an attack.

    :route: /api/handin/proclog/<uuid>/
    :payload:

    .. code-block:: python

        {"uuid": uuid of submission,
         "exitcode": The exitcode of the process,
         "stdout": The standard output of the process,
         "stderr": The standard error output of the process}

    :param uuid: The uuid of submission.
    :type uuid: :class:`str`
    :return: ``OK`` if succeeded, error messages otherwise.
    """
    obj = request.payload

    # check uuid, so that we can prevent replay attack
    if obj['uuid'] != uuid:
        return 'uuid mismatch, do not attack'

    # load the handin object, and report error if not exist
    handin = Handin.query.filter(Handin.uuid == uuid).first()
    if not handin:
        return 'requested submission not found'

    # if handin.state != 'Accepted' and handin.state != 'Rejected',
    # the process must have exited without report the score.
    # mark such handin as "Rejected"
    if handin.state != 'Accepted' and handin.state != 'Rejected':
        handin.state = 'Rejected'
        handin.result = lazy_gettext('Process exited before reporting score.')
        handin.partials = []

    try:
        handin.exitcode = obj['exitcode']
        handin.stdout = obj['stdout']
        handin.stderr = obj['stderr']
        db.session.commit()
    except Exception:
        app.logger.exception('Cannot log proccess of submission(%s).' % uuid)
        return 'update database failed'

    return 'OK'
Esempio n. 23
0
File: api.py Progetto: lzz12/railgun
def api_handin_proclog(uuid):
    """Store the process outputs for a given submission.

    This api view is usually requested after the reports of the corresponding
    submission has been stored, so it would not change either the score or
    the state of the submission.

    If the submission state is still `Pending` or `Running`, indicating that
    the reports have not been stored (probably the process exited abnormally
    before report the score), the state will be updated to `Rejected`.

    This view will compare `uuid` in POST object to the `uuid` argument.
    If they are not equal, the operation will be rejected, since it is
    likely to be an attack.

    :route: /api/handin/proclog/<uuid>/
    :payload:

    .. code-block:: python

        {"uuid": uuid of submission,
         "exitcode": The exitcode of the process,
         "stdout": The standard output of the process,
         "stderr": The standard error output of the process}

    :param uuid: The uuid of submission.
    :type uuid: :class:`str`
    :return: ``OK`` if succeeded, error messages otherwise.
    """
    obj = request.payload

    # check uuid, so that we can prevent replay attack
    if obj['uuid'] != uuid:
        return 'uuid mismatch, do not attack'

    # load the handin object, and report error if not exist
    handin = Handin.query.filter(Handin.uuid == uuid).first()
    if not handin:
        return 'requested submission not found'

    # if handin.state != 'Accepted' and handin.state != 'Rejected',
    # the process must have exited without report the score.
    # mark such handin as "Rejected"
    if handin.state != 'Accepted' and handin.state != 'Rejected':
        handin.state = 'Rejected'
        handin.result = lazy_gettext('Process exited before reporting score.')
        handin.partials = []

    try:
        handin.exitcode = obj['exitcode']
        handin.stdout = obj['stdout']
        handin.stderr = obj['stderr']
        db.session.commit()
    except Exception:
        app.logger.exception('Cannot log proccess of submission(%s).' % uuid)
        return 'update database failed'

    return 'OK'
Esempio n. 24
0
 def do_run(self):
     # First, load the data, and format the error message if schema
     # is not matched.
     try:
         loaded_data = list(CsvSchema.LoadCSV(self.schema, self.csvdata))
     except KeyError, ex:
         raise ScorerFailure(
             brief=lazy_gettext('CSV data does not match schema.'),
             detail=[ex.args[0]])
Esempio n. 25
0
    def __init__(self, suite):
        super(UnitTestScorer, self).__init__(
            lazy_gettext('Functionality Scorer'))

        #: Store the testing suite. If it is a :func:`callable` object but not
        #: a #: :class:`~unittest.suite.TestSuite`, execute it to get the real
        #: testing suite.  Keep the :attr:`suite` lazy can prevent the scorer
        #: from exploits.
        self.suite = suite
Esempio n. 26
0
    def __init__(self, suite):
        super(UnitTestScorer,
              self).__init__(lazy_gettext('Functionality Scorer'))

        #: Store the testing suite. If it is a :func:`callable` object but not
        #: a #: :class:`~unittest.suite.TestSuite`, execute it to get the real
        #: testing suite.  Keep the :attr:`suite` lazy can prevent the scorer
        #: from exploits.
        self.suite = suite
Esempio n. 27
0
    def __init__(self, filelist, skipfile=None, errcost=10.0):

        super(CodeStyleScorer, self).__init__(lazy_gettext('CodeStyle Scorer'))
        skipfile = skipfile or (lambda path: False)
        is_pyfile = lambda p: (p[-3:].lower() == '.py')
        self.filelist = [
            p for p in filelist if not skipfile(p) and is_pyfile(p)
        ]
        self.errcost = errcost
Esempio n. 28
0
    def __init__(self, filelist, skipfile=None, errcost=10.0, logs=None):

        super(CodeStyleScorer, self).__init__(lazy_gettext("CodeStyle Scorer"), logs)
        skipfile = skipfile or (lambda path: False)
        is_pyfile = lambda p: (p[-3:].lower() == ".py")
        if type(filelist) == type("a"):
            self.filelist = filelist
        else:
            self.filelist = [p for p in filelist if not skipfile(p) and is_pyfile(p)]
        self.errcost = errcost
Esempio n. 29
0
    def do_run(self):
        guide = pep8.StyleGuide()
        guide.options.show_source = True
        guide.options.report = Pep8DetailReport(guide.options)
        result = guide.check_files(self.filelist)

        # Each error consumes 1 point.
        errcount = result.count_errors()
        self.score = 100.0 - errcount * self.errcost
        if self.score < 0.0:
            self.score = 0.0

        # format the brief report
        total_file = len(self.filelist)
        if errcount > 0:
            self.brief = lazy_gettext(
                '%(trouble)d problem(s) found in %(file)d file(s)',
                trouble=errcount, file=total_file
            )
        else:
            self.brief = lazy_gettext('All files passed PEP8 code style check')

        # format detailed reports
        self.detail = result.build_report()
Esempio n. 30
0
    def do_run(self):
        guide = pep8.StyleGuide()
        guide.options.show_source = True
        guide.options.report = Pep8DetailReport(guide.options)
        result = guide.check_files(self.filelist)

        # Each error consumes 1 point.
        errcount = result.count_errors()
        self.score = 100.0 - errcount * self.errcost
        if self.score < 0.0:
            self.score = 0.0

        # format the brief report
        total_file = len(self.filelist)
        if errcount > 0:
            self.brief = lazy_gettext(
                '%(trouble)d problem(s) found in %(file)d file(s)',
                trouble=errcount,
                file=total_file)
        else:
            self.brief = lazy_gettext('All files passed PEP8 code style check')

        # format detailed reports
        self.detail = result.build_report()
Esempio n. 31
0
 def do_run(self):
     self.suite = load_suite(self.suite)
     # get the result of unittest
     result = UnitTestScorerDetailResult()
     self.suite.run(result)
     # format score and reports according to unittest result
     total = self.suite.countTestCases()
     errors, failures = map(len, (result.errors, result.failures))
     # give out a score according to the above statistics
     success = total - (errors + failures)
     if total > 0:
         self.score = 100.0 * success / total
     else:
         self.score = 100.0
     # format the brief report
     self.brief = lazy_gettext(
         '%(rate).2f%% tests (%(success)d out of %(total)d) passed',
         rate=self.score, total=total, time=self.time, success=success
     )
     # format the detailed report
     self.detail = result.details
Esempio n. 32
0
 def do_run(self):
     self.suite = load_suite(self.suite)
     # get the result of unittest
     result = UnitTestScorerDetailResult()
     self.suite.run(result)
     # format score and reports according to unittest result
     total = self.suite.countTestCases()
     errors, failures = map(len, (result.errors, result.failures))
     # give out a score according to the above statistics
     success = total - (errors + failures)
     if total > 0:
         self.score = 100.0 * success / total
     else:
         self.score = 100.0
     # format the brief report
     self.brief = lazy_gettext(
         '%(rate).2f%% tests (%(success)d out of %(total)d) passed',
         rate=self.score,
         total=total,
         time=self.time,
         success=success)
     # format the detailed report
     self.detail = result.details
Esempio n. 33
0
    def LoadCSV(cls, iterable):
        """Get iterable objects from given line `iterable` object."""
        rdr = csv.reader(iterable)

        # parse the header line
        headers = {k: i for i, k in enumerate(next(rdr))}
        field_getter = {}

        for k, v in cls.__dict__.iteritems():
            if isinstance(v, CsvField):
                field_name = v.name if v.name else k
                if field_name in headers:
                    # set the getter to fetch Nth column of a row
                    # where N = headers[k]
                    field_getter[k] = (
                        lambda row, key=field_name, col=v: (
                            col.parseString(row[headers[key]])
                        )
                    )
                elif v.has_default:
                    # not exist in CSV, if has default, use default value
                    field_getter[k] = lambda row, val=v: val.default
                else:
                    # not exist, no default, raise KeyError
                    raise KeyError(lazy_gettext(
                        'Field "%(field)s" not found in CSV data.',
                        field=field_name
                    ))

        # Yield object from CSV one by one
        for row in rdr:
            if not row:
                continue
            obj = cls()
            for f, g in field_getter.iteritems():
                setattr(obj, f, g(row))
            yield obj
Esempio n. 34
0
 def __init__(self, **kwargs):
     super(RuntimeFileCopyFailure, self).__init__(lazy_gettext(
         "Couldn't copy runtime files, please contact TA."
     ), **kwargs)
Esempio n. 35
0
                brief=lazy_gettext('CSV data does not match schema.'),
                detail=[ex.args[0]])
        # Next, check the loaded data
        self.detail = []
        covered = set()
        for obj in loaded_data:
            # each record should be sent to all check classes, to see
            # what classes it covered
            for i, c in enumerate(self.check_classes):
                if c(obj):
                    covered.add(i)
        # total up score by len(covered) / total_classes
        self.score = 100.0 * len(covered) / len(self.check_classes)
        self.brief = lazy_gettext(
            '%(rate).2f%% rules (%(cover)s out of %(total)s) covered',
            cover=len(covered),
            total=len(self.check_classes),
            rate=self.score)
        # build more detailed report
        for i, c in enumerate(self.check_classes):
            if i in covered:
                self.detail.append(
                    lazy_gettext('COVERED: %(checker)s',
                                 checker=self.getDescription(c)))
            else:
                self.detail.append(
                    lazy_gettext('NOT COVERED: %(checker)s',
                                 checker=self.getDescription(c)))


class InputClassScorer(InputDataScorer):
Esempio n. 36
0
 def addUnexpectedSuccess(self, test):
     super(UnitTestScorerDetailResult, self).addUnexpectedSuccess(test)
     self.details.append(lazy_gettext(
         'UNEXPECTED SUCCESS: %(test)s.',
         test=self.getDescription(test)
     ))
Esempio n. 37
0
 def __init__(self, schema):
     super(ObjSchemaScorer, self) .__init__(
         lazy_gettext('Object Structure Scorer')
     )
     self.schema = schema
Esempio n. 38
0
 def addSuccess(self, test):
     super(UnitTestScorerDetailResult, self).addSuccess(test)
     self.details.append(
         lazy_gettext('PASSED: %(test)s.', test=self.getDescription(test)))
Esempio n. 39
0
 def __init__(self, **kwargs):
     super(ExtractFileFailure, self).__init__(lazy_gettext(
         "Couldn't extract your archive file."
     ), **kwargs)
Esempio n. 40
0
 def __init__(self, **kwargs):
     super(RunnerPermissionError, self).__init__(lazy_gettext(
         'File permissions of the runner is wrong.'
     ), **kwargs)
Esempio n. 41
0
 def __init__(self, **kwargs):
     super(SpawnProcessFailure, self).__init__(lazy_gettext(
         "Couldn't start submitted program."
     ), **kwargs)
Esempio n. 42
0
    def do_run(self):
        def safe_divide(a, b, default=1.0):
            if b > 0:
                return float(a) / float(b)
            return default

        cov = coverage(branch=True)
        cov.start()
        self.suite = load_suite(self.suite)

        # Run the test suite
        # the `result` is now ignored, but we can get use of it if necessary
        result = UnitTestScorerDetailResult()
        self.suite.run(result)
        cov.stop()

        # the 1st part: total view of the coverage stats
        self.detail = ['']
        total_cov = []

        # statement coverage rate
        total_exec = total_miss = 0
        total_branch = total_taken = total_partial = total_notaken = 0
        for filename in self.filelist:
            # get the analysis on given filename
            ana = cov._analyze(filename)
            # gather statement coverage on this file
            exec_stmt = set(ana.statements)
            miss_stmt = set(ana.missing)
            total_exec += len(exec_stmt)
            total_miss += len(miss_stmt)
            # gather branch coverage on this file
            # branch: {lineno: (total_exit, taken_exit)}
            branch = ana.branch_stats()
            file_branch = len(branch)
            file_taken = len([b for b in branch.itervalues() if b[0] == b[1]])
            file_notaken = len(
                [b for b in branch.itervalues() if b[0] != b[1] and b[1] == 0])
            file_partial = file_branch - file_taken - file_notaken
            # add the file stats to total coverage results
            total_cov.append(
                '%(file)s, %(stmt)d, %(stmt_taken)d, %(stmt_cov).2f%%, '
                '%(branch)d, %(branch_taken)d, %(branch_partial)d, '
                '%(branch_cov).2f%%, %(branch_partial_cov).2f%%' % {
                    'file':
                    filename,
                    'stmt':
                    len(exec_stmt),
                    'stmt_taken':
                    len(exec_stmt) - len(miss_stmt),
                    'stmt_cov':
                    100.0 * safe_divide(
                        len(exec_stmt) - len(miss_stmt), len(exec_stmt)),
                    'branch':
                    file_branch,
                    'branch_taken':
                    file_taken,
                    'branch_partial':
                    file_partial,
                    'branch_cov':
                    100.0 * safe_divide(file_taken, file_branch),
                    'branch_partial_cov':
                    100.0 * safe_divide(file_partial, file_branch, default=0.0)
                })
            # apply file branch to global
            total_branch += file_branch
            total_taken += file_taken
            total_partial += file_partial
            total_notaken += file_notaken

            # gather all source lines into detail report
            stmt_text = []
            branch_text = []
            with open(filename, 'rb') as fsrc:
                for i, s in enumerate(fsrc, 1):
                    # first, format statement cover report
                    if i in miss_stmt:
                        stmt_text.append('- %s' % s.rstrip())
                    elif i in exec_stmt:
                        stmt_text.append('+ %s' % s.rstrip())
                    else:
                        stmt_text.append('  %s' % s.rstrip())
                    # next, format branch cover report
                    branch_exec = branch.get(i, None)
                    if not branch_exec:
                        branch_text.append('  %s' % s.rstrip())
                    elif branch_exec[1] == branch_exec[0]:
                        # branch taken
                        branch_text.append('+ %s' % s.rstrip())
                    elif branch_exec[1] == 0:
                        # branch not taken
                        branch_text.append('- %s' % s.rstrip())
                    else:
                        # branch partial taken
                        branch_text.append('* %s' % s.rstrip())
            # compose final detail
            stmt_text = '\n'.join(stmt_text)
            branch_text = '\n'.join(branch_text)

            # the statement coverage
            self.detail.append(
                lazy_gettext(
                    '%(filename)s: %(miss)d statement(s) not covered.\n'
                    '%(sep)s\n'
                    '%(source)s',
                    filename=filename,
                    sep='-' * 70,
                    miss=len(miss_stmt),
                    source=stmt_text))

            # the branch coverage
            self.detail.append(
                lazy_gettext(
                    '%(filename)s: '
                    '%(partial)d branch(es) partially taken and '
                    '%(notaken)d branch(es) not taken.\n'
                    '%(sep)s\n'
                    '%(source)s',
                    filename=filename,
                    sep='-' * 70,
                    miss=len(miss_stmt),
                    source=branch_text,
                    taken=file_taken,
                    notaken=file_notaken,
                    partial=file_partial))

        self.stmt_cover = 100.0 - 100.0 * safe_divide(total_miss, total_exec)
        self.branch_cover = 100.0 * safe_divide(total_taken, total_branch)
        self.branch_partial = 100.0 * safe_divide(
            total_partial, total_branch, default=0.0)

        # Add final total report
        self.detail[0] = lazy_gettext(
            'Coverage Results:\n'
            '%(delim1)s\n'
            'file, stmts, taken, covered, branches, taken, partially taken, '
            'covered, partially covered\n'
            '%(delim2)s\n'
            '%(detail)s\n'
            '%(delim2)s\n'
            'total, %(stmt)d, %(stmt_taken)d, %(stmt_cov).2f%%, '
            '%(branch)d, %(branch_taken)d, %(branch_partial)d, '
            '%(branch_cov).2f%%, %(branch_partial_cov).2f%%',
            delim1='=' * 70,
            delim2='-' * 70,
            detail='\n'.join(total_cov),
            stmt=total_exec,
            stmt_taken=total_exec - total_miss,
            stmt_cov=self.stmt_cover,
            branch=total_branch,
            branch_taken=total_taken,
            branch_partial=total_partial,
            branch_cov=self.branch_cover,
            branch_partial_cov=self.branch_partial,
        )

        # final score
        stmt_score = self.stmt_cover * self.stmt_weight
        full_branch_score = self.branch_cover * self.branch_weight
        partial_branch_score = self.branch_partial * self.branch_weight * 0.5

        self.score = stmt_score + full_branch_score + partial_branch_score
        self.brief = lazy_gettext(
            '%(stmt).2f%% statements covered (%(stmt_score).2f pts), '
            '%(branch).2f%% branches fully covered (%(branch_score).2f pts) '
            'and '
            '%(partial).2f%% partially covered (%(partial_score).2f pts).',
            stmt=self.stmt_cover,
            branch=self.branch_cover,
            partial=self.branch_partial,
            stmt_score=stmt_score,
            branch_score=full_branch_score,
            partial_score=partial_branch_score,
        )
Esempio n. 43
0
 def addUnexpectedSuccess(self, test):
     super(UnitTestScorerDetailResult, self).addUnexpectedSuccess(test)
     self.details.append(
         lazy_gettext('UNEXPECTED SUCCESS: %(test)s.',
                      test=self.getDescription(test)))
Esempio n. 44
0
 def __init__(self, **kwargs):
     super(SpawnProcessFailure,
           self).__init__(lazy_gettext("Couldn't start submitted program."),
                          **kwargs)
Esempio n. 45
0
def run_handin(handler, handid, hwid):
    """Common pattern to run a submission.  Its main function is to
    glue :class:`~railgun.runner.handin.BaseHandin`,
    :class:`~railgun.runner.host.BaseHost` and
    :class:`~railgun.runner.apiclient.ApiClient` together.

    It is guaranteed that all errors are handled and logged correctly in this
    method.

    :param handler: A factory to create a
        :class:`~railgun.runner.handin.BaseHandin` handler object.
    :param handid: The uuid of this submission.
    :type handid: :class:`str`
    :param hwid: The uuid of the homework.
    :type hwid: :class:`str`
    """
    # Create the api client, we may use it once or twice
    api = ApiClient(runconfig.WEBSITE_API_BASEURL)
    # Immediately report error if permcheck has error
    if permcheck.checker.has_error():
        report_error(handid, RunnerPermissionError())
        return
    try:
        report_start(handid)
        # create and launch this handler
        if callable(handler):
            handler = handler()
        exitcode, stdout, stderr = handler.execute()
        # try to convert stdout & stderr to unicode in UTF-8 encoding
        # if not success, report the client has produced non UTF-8 output
        try:
            stdout = unicode(stdout, 'utf-8')
            stderr = unicode(stderr, 'utf-8')
        except UnicodeError:
            # This routine will terminate the try-catch structure so that
            # we must report the exitcode earlier as well.
            api.proclog(handid, exitcode, None, None)
            raise NonUTF8OutputError()
        # log the handin execution
        if exitcode != 0:
            logger.warning(
                'Submission[%(handid)s] of hw[%(hwid)s]: Error.\n'
                '  stdout: %(stdout)s\n'
                '  stderr: %(stderr)s' % {
                    'handid': handid,
                    'hwid': hwid,
                    'stdout': repr(stdout),
                    'stderr': repr(stderr)
                })
        # Report failure if exitcode != 0. In this case the host itself may
        # not have the chance to report handin scores
        if exitcode != 0:
            # We do not raise RunnerError here, because under this situation,
            # we must have logged such exception, and do not want to log
            # again.
            score = HwScore(
                False,
                lazy_gettext('Exitcode %(exitcode)s != 0.', exitcode=exitcode))
            api.report(handid, score)
        # Update exitcode, stdout and stderr here, which cannot be set in
        # the host itself.
        #
        # This process may also change Handin.state, if previous process
        # exit with code 0 before it reported the score. See website/api.py
        # for more details.
        api.proclog(handid, exitcode, stdout, stderr)
        # Log that we've succesfully done this job.
        logger.info('Submission[%(handid)s] of hw[%(hwid)s]: OK.' % {
            'handid': handid,
            'hwid': hwid
        })
    except RunnerError, ex:
        # RunnerError is logically OK and sent to client only.
        # So we just log the message of this exception, not exception detail.
        logger.warning('Submission[%(handid)s] of hw[%(hwid)s]: %(message)s.' %
                       {
                           'handid': handid,
                           'hwid': hwid,
                           'message': ex.message
                       })
        report_error(handid, ex)
Esempio n. 46
0
 def addExpectedFailure(self, test, err):
     super(UnitTestScorerDetailResult, self).addExpectedFailure(test, err)
     self.details.append(
         lazy_gettext('EXPECTED FAIL: %(test)s.\n%(error)s',
                      test=self.getDescription(test),
                      error=self._exc_info_to_string(err, test)))
Esempio n. 47
0
 def __init__(self, **kwargs):
     super(RuntimeFileCopyFailure, self).__init__(
         lazy_gettext("Couldn't copy runtime files, please contact TA."),
         **kwargs)
Esempio n. 48
0
 def __init__(self, **kwargs):
     super(ArchiveContainTooManyFileError,
           self).__init__(lazy_gettext("Archive contains too many files."),
                          **kwargs)
Esempio n. 49
0
    def do_run(self):
        def safe_divide(a, b, default=1.0):
            if b > 0:
                return float(a) / float(b)
            return default

        cov = coverage(branch=True)
        cov.start()
        self.suite = load_suite(self.suite)

        # Run the test suite
        # the `result` is now ignored, but we can get use of it if necessary
        result = UnitTestScorerDetailResult()
        self.suite.run(result)
        cov.stop()

        # the 1st part: total view of the coverage stats
        self.detail = ['']
        total_cov = []

        # statement coverage rate
        total_exec = total_miss = 0
        total_branch = total_taken = total_partial = total_notaken = 0
        for filename in self.filelist:
            # get the analysis on given filename
            ana = cov._analyze(filename)
            # gather statement coverage on this file
            exec_stmt = set(ana.statements)
            miss_stmt = set(ana.missing)
            total_exec += len(exec_stmt)
            total_miss += len(miss_stmt)
            # gather branch coverage on this file
            # branch: {lineno: (total_exit, taken_exit)}
            branch = ana.branch_stats()
            file_branch = len(branch)
            file_taken = len([b for b in branch.itervalues() if b[0] == b[1]])
            file_notaken = len([b for b in branch.itervalues()
                                if b[0] != b[1] and b[1] == 0])
            file_partial = file_branch - file_taken - file_notaken
            # add the file stats to total coverage results
            total_cov.append(
                '%(file)s, %(stmt)d, %(stmt_taken)d, %(stmt_cov).2f%%, '
                '%(branch)d, %(branch_taken)d, %(branch_partial)d, '
                '%(branch_cov).2f%%, %(branch_partial_cov).2f%%' % {
                    'file': filename,
                    'stmt': len(exec_stmt),
                    'stmt_taken': len(exec_stmt) - len(miss_stmt),
                    'stmt_cov': 100.0 * safe_divide(
                        len(exec_stmt) - len(miss_stmt), len(exec_stmt)),
                    'branch': file_branch,
                    'branch_taken': file_taken,
                    'branch_partial': file_partial,
                    'branch_cov': 100.0 * safe_divide(file_taken, file_branch),
                    'branch_partial_cov': 100.0 * safe_divide(
                        file_partial, file_branch, default=0.0)
                }
            )
            # apply file branch to global
            total_branch += file_branch
            total_taken += file_taken
            total_partial += file_partial
            total_notaken += file_notaken

            # gather all source lines into detail report
            stmt_text = []
            branch_text = []
            with open(filename, 'rb') as fsrc:
                for i, s in enumerate(fsrc, 1):
                    # first, format statement cover report
                    if i in miss_stmt:
                        stmt_text.append('- %s' % s.rstrip())
                    elif i in exec_stmt:
                        stmt_text.append('+ %s' % s.rstrip())
                    else:
                        stmt_text.append('  %s' % s.rstrip())
                    # next, format branch cover report
                    branch_exec = branch.get(i, None)
                    if not branch_exec:
                        branch_text.append('  %s' % s.rstrip())
                    elif branch_exec[1] == branch_exec[0]:
                        # branch taken
                        branch_text.append('+ %s' % s.rstrip())
                    elif branch_exec[1] == 0:
                        # branch not taken
                        branch_text.append('- %s' % s.rstrip())
                    else:
                        # branch partial taken
                        branch_text.append('* %s' % s.rstrip())
            # compose final detail
            stmt_text = '\n'.join(stmt_text)
            branch_text = '\n'.join(branch_text)

            # the statement coverage
            self.detail.append(lazy_gettext(
                '%(filename)s: %(miss)d statement(s) not covered.\n'
                '%(sep)s\n'
                '%(source)s',
                filename=filename, sep='-' * 70, miss=len(miss_stmt),
                source=stmt_text
            ))

            # the branch coverage
            self.detail.append(lazy_gettext(
                '%(filename)s: '
                '%(partial)d branch(es) partially taken and '
                '%(notaken)d branch(es) not taken.\n'
                '%(sep)s\n'
                '%(source)s',
                filename=filename, sep='-' * 70, miss=len(miss_stmt),
                source=branch_text, taken=file_taken, notaken=file_notaken,
                partial=file_partial
            ))

        self.stmt_cover = 100.0 - 100.0 * safe_divide(total_miss, total_exec)
        self.branch_cover = 100.0 * safe_divide(total_taken, total_branch)
        self.branch_partial = 100.0 * safe_divide(
            total_partial, total_branch, default=0.0)

        # Add final total report
        self.detail[0] = lazy_gettext(
            'Coverage Results:\n'
            '%(delim1)s\n'
            'file, stmts, taken, covered, branches, taken, partially taken, '
            'covered, partially covered\n'
            '%(delim2)s\n'
            '%(detail)s\n'
            '%(delim2)s\n'
            'total, %(stmt)d, %(stmt_taken)d, %(stmt_cov).2f%%, '
            '%(branch)d, %(branch_taken)d, %(branch_partial)d, '
            '%(branch_cov).2f%%, %(branch_partial_cov).2f%%',
            delim1='=' * 70,
            delim2='-' * 70,
            detail='\n'.join(total_cov),
            stmt=total_exec,
            stmt_taken=total_exec - total_miss,
            stmt_cov=self.stmt_cover,
            branch=total_branch,
            branch_taken=total_taken,
            branch_partial=total_partial,
            branch_cov=self.branch_cover,
            branch_partial_cov=self.branch_partial,
        )

        # final score
        stmt_score = self.stmt_cover * self.stmt_weight
        full_branch_score = self.branch_cover * self.branch_weight
        partial_branch_score = self.branch_partial * self.branch_weight * 0.5

        self.score = stmt_score + full_branch_score + partial_branch_score
        self.brief = lazy_gettext(
            '%(stmt).2f%% statements covered (%(stmt_score).2f pts), '
            '%(branch).2f%% branches fully covered (%(branch_score).2f pts) '
            'and '
            '%(partial).2f%% partially covered (%(partial_score).2f pts).',
            stmt=self.stmt_cover,
            branch=self.branch_cover,
            partial=self.branch_partial,
            stmt_score=stmt_score,
            branch_score=full_branch_score,
            partial_score=partial_branch_score,
        )
Esempio n. 50
0
class InputDataScorer(Scorer):
    """The base class for input data scorers.

    Input data scorers are mainly used in BlackBox testing homework.
    The students may provide some structured data in CSV format,
    and we want to check whether the data covers all the equivalent
    classes and boundary values.

    To achieve that goal, I introduced :class:`InputDataScorer`.
    It can takes a set of methods as `condition` validators, where
    each methods only returns :data:`True` on a certain class of
    input data or boundary value.

    Then the student submitted data will be checked by all the
    `condition` validators one row after another.  The scorer will
    count the number of validators who have ever reported :data:`True`,
    and give the score according to that portion.

    :param name: The name of this scorer, should be set by derived classes.
    :type name: :class:`~railgun.common.lazy_i18n.GetTestString`
    :param schema: The schema for this scorer to parse csv data.
    :type schema: :class:`~railgun.common.csvdata.CsvSchema`
    :param csvdata: Iterable object over :class:`str`, each representing a
        row in the csv data.  Usually a :class:`file` object.
    :type csvdata: :class:`object`
    :param check_classes: The initial list of input data validators.
    :type check_classes: :class:`list` of :func:`callable` objects
    """
    def __init__(self, name, schema, csvdata, check_classes=None):
        """Construct a new `InputClassScorer` on given `csvdata`, checked by
        rules defined in `check_classes`."""

        super(InputDataScorer, self).__init__(name)

        #: Store the :class:`~railgun.common.csvdata.CsvSchema`.
        self.schema = schema
        #: Rows of csv data.
        self.csvdata = csvdata
        #: The input data validators.
        self.check_classes = check_classes or []

    def empty(self):
        """Whether or not this scorer has no input data validator?"""
        return not self.check_classes

    def getDescription(self, check_class):
        """Get the description for given validator.

        The `description` attribute of the given validator, or `__name__`
        if `description` doesn't exist, or the string representation of the
        given validator if `__name__` doesn't exist either.

        :param check_class: The :func:`callable` data validator.
        :return: The description of given validator.
        """

        if hasattr(check_class, 'description'):
            return getattr(check_class, 'description')
        if hasattr(check_class, '__name__'):
            return getattr(check_class, '__name__')
        return str(check_class)

    def rule(self, description):
        """Make a decorator to :func:`callable` objects which will add
        `description` attribute to the given method, and will add that
        method to :attr:`check_classes`.

        Usage::

            @scorer.rule('a >= 1 and b <= 2')
            def a_must_not_less_than_1_and_b_must_not_greater_than_2(a, b):
                return a >= 1 and b <= 2

        .. note::

            If the check method raises any exception, this rule will be
            regarded as `not matched`.  This design purpose is to ease
            the rules like `matching a string that can be converted into
            int`.

        :return: The decorator.
        """
        def outer(method):
            @wraps(method)
            def inner(*args, **kwargs):
                try:
                    return method(*args, **kwargs)
                except Exception:
                    return False

            setattr(inner, 'description', description)
            self.check_classes.append(inner)
            return method

        return outer

    def do_run(self):
        # First, load the data, and format the error message if schema
        # is not matched.
        try:
            loaded_data = list(CsvSchema.LoadCSV(self.schema, self.csvdata))
        except KeyError, ex:
            raise ScorerFailure(
                brief=lazy_gettext('CSV data does not match schema.'),
                detail=[ex.args[0]])
        except ValueError, ex:
            raise ScorerFailure(
                brief=lazy_gettext('CSV data does not match schema.'),
                detail=[ex.args[0]])
Esempio n. 51
0
 def addSkip(self, test, reason):
     super(UnitTestScorerDetailResult, self).addSkip(test, reason)
     self.details.append(
         lazy_gettext('SKIP: %(test)s: %(reason)s.',
                      test=self.getDescription(test),
                      reason=reason))
Esempio n. 52
0
 def __init__(self, **kwargs):
     super(ExtractFileFailure, self).__init__(
         lazy_gettext("Couldn't extract your archive file."), **kwargs)
Esempio n. 53
0
 def addError(self, test, err):
     super(UnitTestScorerDetailResult, self).addError(test, err)
     self.details.append(
         lazy_gettext('ERROR: %(test)s.\n%(error)s',
                      test=self.getDescription(test),
                      error=self._exc_info_to_string(err, test)))
Esempio n. 54
0
 def __init__(self, schema):
     super(ObjSchemaScorer,
           self).__init__(lazy_gettext('Object Structure Scorer'))
     self.schema = schema
Esempio n. 55
0
 def addSuccess(self, test):
     super(UnitTestScorerDetailResult, self).addSuccess(test)
     self.details.append(lazy_gettext(
         'PASSED: %(test)s.',
         test=self.getDescription(test)
     ))
Esempio n. 56
0
File: api.py Progetto: lzz12/railgun
def api_handin_report(uuid):
    """Store the final score and detailed reports of given submission.

    This view will compare `uuid` in POST object to the `uuid` argument.
    If they are not equal, the operation will be rejected, since it is
    likely to be an attack.

    If the submission state is neither `Running` nor `Pending`, the operation
    will be rejected, since it is likely to be a programmatic bug.

    If the reported score is 0.0, but the state is `Accepted`, then it
    will be modified to `Rejected`, since it is wired for a zero-score
    submission to be `Accepted`.

    If the reported brief result message is empty, it will be set to
    a translated version of `"Your submission is accepted."` or
    `"Your submission is rejected."`, depending on the reported state.

    The :class:`~railgun.website.models.FinalScore` table records will
    also be updated in this view.

    :route: /api/handin/report/<uuid>/
    :payload: A serialized :class:`~railgun.common.hw.HwScore` object.
    :param uuid: The uuid of submission.
    :type uuid: :class:`str`
    :return: ``OK`` if succeeded, error messages otherwise.
    """
    obj = request.payload

    # check uuid, so that we can prevent replay attack
    if obj['uuid'] != uuid:
        return 'uuid mismatch, do not attack'

    # construct HwScore object from payload
    try:
        score = HwScore.from_plain(obj)
    except Exception:
        return 'not valid score object'

    # load the handin object, and report error if not exist
    handin = Handin.query.filter(Handin.uuid == uuid).first()
    if not handin:
        return 'requested handin not found'

    # if handin.state not in ['Running', 'Pending'], it must already have a
    # score. reject the API call.
    if handin.state != 'Running' and handin.state != 'Pending':
        return 'score already reported'

    # Special hack: unittest will catch all exceptions.
    #
    # Such submissions may result 0.0 base score but marked as 'Accepted'.
    # I decide to treat these submissions 'Rejected', because no one
    # would accept a totally bad submission.
    handin.score = score.get_score()
    if handin.score < 1e-5 and score.accepted:
        score.accepted = False
        score.result = lazy_gettext('No test passed, submission rejected.')

    # update result of handin
    handin.state = 'Accepted' if score.accepted else 'Rejected'
    if score.accepted:
        handin.result = lazy_gettext('Your submission is accepted.')
    elif unicode(score.result):
        handin.result = score.result
    else:
        handin.result = lazy_gettext('Your submission is rejected.')
    handin.compile_error = score.compile_error
    handin.partials = score.partials

    # update hwscore table and set the final score of this homework
    if handin.is_accepted():
        final_score = handin.score * handin.scale
        hwscore = (FinalScore.query.filter(
            FinalScore.hwid == handin.hwid).filter(
                FinalScore.user_id == handin.user_id)).first()
        if not hwscore:
            hwscore = FinalScore(user_id=handin.user_id,
                                 hwid=handin.hwid,
                                 score=final_score)
            db.session.add(hwscore)
        elif final_score > hwscore.score:
            hwscore.score = final_score

    try:
        db.session.commit()
    except Exception:
        app.logger.exception('Cannot update result of submission(%s).' % uuid)
        return 'update database failed'

    return 'OK'
Esempio n. 57
0
 def __init__(self, **kwargs):
     super(RunnerPermissionError, self).__init__(
         lazy_gettext('File permissions of the runner is wrong.'), **kwargs)