Beispiel #1
0
def do_sync(tasks):
    try:
        if configs['sync'] == 'yes':
            rsync_tool_runner(tasks)
    except KeyboardInterrupt:
        satt_log('Stopping...')
        sys.exit(0)
Beispiel #2
0
    def __init__(self, conffile = None):
        self._db = DatabaseConnection(conffile)

        # self check
        ver = self._db.query('SELECT VERSION()')[0][0]
        satt_log('Connected to database: MySQL version {0}'.format(ver))

        self._rating_methods = RatingMethod(self._db.query)
Beispiel #3
0
def err(msg):
    emsg = 'ERR: {0}\n'.format(msg)
    from log import satt_log_inited
    if satt_log_inited():
        from log import satt_log
        satt_log(emsg, stdout = False)

    sys.stderr.write(colored(emsg, 'red'))
    sys.exit(1)
Beispiel #4
0
def rsync_tool_runner(tasks):
    satt_log('Synchronizing...')

    if not configs.has_key('sync-cmd'):
        dbg('No sync command')
        return

    add_tasks(tasks)

    d = SyncDispatcher(tasks)
    d.run()
Beispiel #5
0
    def _monitorTasks(self):
        assert self._is_running()

        while self._is_running():
            for fd, flags in self._poll_wait():
                if flags & select.POLLERR:
                    self._killTasks()
                    err('Waiting for benchmark failed')

                if flags & select.POLLIN:
                    bench = self._getBenchmark(fd)
                    try:
                        data = bench.readOutput()
                        while data:
                            self._report.report(data, bench)
                            data = bench.readOutput()
                    # While can be too fast and raise
                    # EBUSY
                    except IOError:
                        continue

                # is benchmark done?
                if flags & select.POLLHUP:
                    # remove the old benchmark
                    bench = self._unregisterFd(fd)
                    if not self._report.done(bench):
                        # something went wrong - queue this one again
                        satt_log('Running benchmark again');
                        # we must take this one as it was not running yet
                        self._benchmarks_done -= 1

                        # XXX we do not have a mechanism how to track
                        # how many times the benchmark ran, so it may happen
                        # that it will run indifinetly many times.
                        # It seems we don't need to care about that atm, so let's
                        # ignore it for now.
                        #
                        # P. S message for future me: If you read this, we probably hit
                        # this error and you hate me and my wickidness - just sorry.
                        bench.task.readd(bench)

                    self._benchmarks_done += 1
                    # set progress
                    if self._benchmarks_done != 0:
                        prgs = float(self._benchmarks_done) / self._benchmarks_count
                        self._report.progress(int(prgs * 100))

                    # run new benchmark
                    self._runBenchmark(bench.task)
Beispiel #6
0
    def __init__(self):
        BenchmarkReport.__init__(self)

        # use this to print out what is happening
        self._stdout = StdoutReporter()
        self.run_id = int(time())
        self.tool_params = '{0}'.format(configs.configs['params'])

        # replace apostrophes in tool_params
        self.tool_params = self.tool_params.replace('\'', '\\\'')

        from database import DatabaseConnection
        self._db = DatabaseConnection()

        ver = self._db.query('SELECT VERSION()')[0][0]
        satt_log('Connected to database: MySQL version {0}'.format(ver))

        self._rating_methods = RatingMethod(self._db.query)
Beispiel #7
0
    def __init__(self):
        BenchmarkReport.__init__(self)

        # use this to print out what is happening
        self._stdout = StdoutReporter()
        self.run_id = int(time())
        self.tool_params = '{0}'.format(configs.configs['params'])

        # replace apostrophes in tool_params
        self.tool_params = self.tool_params.replace('\'', '\\\'')

        from database import DatabaseConnection
        self._db = DatabaseConnection()

        ver = self._db.query('SELECT VERSION()')[0][0]
        satt_log('Connected to database: MySQL version {0}'.format(ver))

        self._rating_methods = RatingMethod(self._db.query)
Beispiel #8
0
        def _exception_handler(args, data):
            q, tool_id, task_id = data

            if (args[1].startswith('Duplicate entry')):

                if configs.configs['ignore-duplicates'] == 'yes':
                    satt_log('Already has this result for this tool, ignoring.')
                else:
                    err('Already has result of this benchmark for this tool.\n'
                        'It is only supported to have one result for each '
                        'benchmark and particular tool\n'
                        'If want ignore this behaviour use --ignore-duplicates.\n'
                        '(tool + version + params). You can delete the old result:\n'
                        '  $ ./db-cli \'DELETE from task_results WHERE tool_id={0}'
                        ' and task_id={1}\'\n'
                        'or you can delete all results for this tool:\n'
                        '  $ ./db-cli \'DELETE from tools WHERE id={0}\'\n'
                        .format(tool_id, task_id, tool_id))
            else:
                err('Failed querying db: {0}\n\n{1}'.format(args[1], q))
Beispiel #9
0
    def run(self):
        """ Dispatch tasks over network and wait for outcomes """

        dbg('[local] Started dispatching benchmarks')

        # take every task and call as many of benchmarks as
        # you are allowed. Later, when a task ends,
        # we will spawn only one new (one new for one done)
        try:
            for task in self._tasks:
                for n in range(0, task.getParallel()):
                    if self._runBenchmark(task) is None:
                        break

            # monitor the tasks
            self._monitorTasks()
        except KeyboardInterrupt:
            self._dontSendResults = True
            self._killTasks()
            satt_log('Stopping...')
Beispiel #10
0
        def _exception_handler(args, data):
            q, tool_id, task_id = data

            if (args[1].startswith('Duplicate entry')):

                if configs.configs['ignore-duplicates'] == 'yes':
                    satt_log(
                        'Already has this result for this tool, ignoring.')
                else:
                    err('Already has result of this benchmark for this tool.\n'
                        'It is only supported to have one result for each '
                        'benchmark and particular tool\n'
                        'If want ignore this behaviour use --ignore-duplicates.\n'
                        '(tool + version + params). You can delete the old result:\n'
                        '  $ ./db-cli \'DELETE from task_results WHERE tool_id={0}'
                        ' and task_id={1}\'\n'
                        'or you can delete all results for this tool:\n'
                        '  $ ./db-cli \'DELETE from tools WHERE id={0}\'\n'.
                        format(tool_id, task_id, tool_id))
            else:
                err('Failed querying db: {0}\n\n{1}'.format(args[1], q))
Beispiel #11
0
    def save_task(self, rb, cat_id):
        """ Save unknown task into the database """

        name = get_name(rb.name)
        cr = self._get_correct_result(name, rb)
        if cr is None:
            msg = 'Couldn\'t infer if the result is correct or not, setting unkown'
            satt_log(msg)
            rb.output += msg
            rb.result = 'unknown ({0})'.format(rb.result)
        # create new task
        q = """
        INSERT INTO tasks
          (name, category_id, correct_result, property)
          VALUES('{0}', '{1}', '{2}', '{3}');
        """.format(name, cat_id, cr, None)
        self._db.query(q)

        q = """
        SELECT id, correct_result FROM tasks
        WHERE name = '{0}' and category_id = '{1}';
        """.format(name, cat_id)
        return self._db.query(q)
Beispiel #12
0
    def save_task(self, rb, cat_id):
        """ Save unknown task into the database """

        name = get_name(rb.name)
        cr = self._get_correct_result(name, rb)
        if cr is None:
            msg = 'Couldn\'t infer if the result is correct or not, setting unkown'
            satt_log(msg)
            rb.output += msg
            rb.result = 'unknown ({0})'.format(rb.result)
        # create new task
        q = """
        INSERT INTO tasks
          (name, category_id, correct_result, property)
          VALUES('{0}', '{1}', '{2}', '{3}');
        """.format(name, cat_id, cr, None)
        self._db.query(q)

        q = """
        SELECT id, correct_result FROM tasks
        WHERE name = '{0}' and category_id = '{1}';
        """.format(name, cat_id)
        return self._db.query(q)
Beispiel #13
0
    def done(self, rb):
        mach = rb.task.getMachine()
        name = rb.name

        rb.proc.poll()
        if rb.proc.returncode != 0:
            msg = '{0} {1} - {2}: FAILED (removing)'.format(rb.category, mach, name)
            satt_log(colored('{0}'.format(msg), 'red'))
            self.tasks.remove(rb.task)

        else:
            msg = '{0} {1} - {2}: Done'.format(rb.category, mach, name)
            satt_log(msg)

        if rb.output:
            # if there's any output, it is probably an error,
            # so write it out in red color
            satt_log(colored(rb.output, 'red'))

        sys.stdout.flush()

        return True
Beispiel #14
0
Datei: sync.py Projekt: ufwt/satt
    def done(self, rb):
        mach = rb.task.getMachine()
        name = rb.name

        rb.proc.poll()
        if rb.proc.returncode != 0:
            msg = '{0} {1} - {2}: FAILED (removing)'.format(
                rb.category, mach, name)
            satt_log(colored('{0}'.format(msg), 'red'))
            self.tasks.remove(rb.task)

        else:
            msg = '{0} {1} - {2}: Done'.format(rb.category, mach, name)
            satt_log(msg)

        if rb.output:
            # if there's any output, it is probably an error,
            # so write it out in red color
            satt_log(colored(rb.output, 'red'))

        sys.stdout.flush()

        return True
Beispiel #15
0
    def done(self, rb):
        # print it after saving
        if not self._stdout.done(rb):
            # if there is a problem, the benchmark will run again, so do not
            # proceed further
            return False

        tool_id, year_id = self._updateDb(rb)

        q = """
        SELECT id, name FROM categories
        WHERE
            year_id = '{0}' and name = '{1}';
        """.format(year_id, rb.category)
        res = self._db.query(q)
        if not res:
            if configs.configs['save-new-tasks'] == 'yes':
                res = self.update_category(year_id, rb.category)
            else:
                rb.dumpToFile('Do not have given category')
                satt_log('^^ dumped to file (unknown category)')
                return True

        assert len(res) == 1
        if not len(res[0]) == 2:
            print(res[0])
        assert len(res[0]) == 2
        cat_id = res[0][0]
        cat_name = res[0][1]

        q = """
        SELECT id, correct_result FROM tasks
        WHERE name = '{0}' and category_id = '{1}';
        """.format(get_name(rb.name), cat_id)
        res = self._db.query(q)

        # we do not have such a task??
        if not res:
            if configs.configs['save-new-tasks'] == 'yes':
                res = self.save_task(rb, cat_id)
            else:
                rb.dumpToFile('Do not have given task')
                satt_log('^^ dumped to file (unknown task)')
                return True

        assert len(res) == 1
        task_id = res[0][0]
        correct_result = res[0][1]

        # replace ' by \' in output
        rb.output = rb.output.replace('\'', '\\\'')
        ic = is_correct(correct_result, rb.result)

        result = rb.result.lower()
        if rb.witness != '':
            wtns = rb.witness.strip()

            # replace ' even in witness, because it can contain
            # arbitrary text
            wtns = wtns.replace('\'', '\\\'')
        else:
            wtns = None

        if rb.witness_output != '':
            # FIXME we should limit the wintess_output size, otherwise we
            # get get some performance issues
            rb.witness_output = rb.witness_output.strip()
            rb.witness_output = rb.witness_output.replace('\'', '\\\'')

        q = """
        INSERT INTO task_results
        (tool_id, task_id, result, witness, is_correct, points, cpu_time,
         memory_usage, output, witness_output, run_id)
        VALUES('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', {8}, '{9}', '{10}')
        """.format(tool_id, task_id, result, wtns, ic,
                   self._rating_methods.points(ic, rb.result, wtns, cat_name),
                   None2Zero(rb.time), None2Zero(rb.memory),
                   Empty2Null(rb.output), rb.witness_output, self.run_id)

        def _exception_handler(args, data):
            q, tool_id, task_id = data

            if (args[1].startswith('Duplicate entry')):

                if configs.configs['ignore-duplicates'] == 'yes':
                    satt_log(
                        'Already has this result for this tool, ignoring.')
                else:
                    err('Already has result of this benchmark for this tool.\n'
                        'It is only supported to have one result for each '
                        'benchmark and particular tool\n'
                        'If want ignore this behaviour use --ignore-duplicates.\n'
                        '(tool + version + params). You can delete the old result:\n'
                        '  $ ./db-cli \'DELETE from task_results WHERE tool_id={0}'
                        ' and task_id={1}\'\n'
                        'or you can delete all results for this tool:\n'
                        '  $ ./db-cli \'DELETE from tools WHERE id={0}\'\n'.
                        format(tool_id, task_id, tool_id))
            else:
                err('Failed querying db: {0}\n\n{1}'.format(args[1], q))

        self._db.query_with_exception_handler(q, _exception_handler,
                                              (q, tool_id, task_id))

        self._commit()

        return True
Beispiel #16
0
    def done(self, rb):
        # print it after saving
        if not self._stdout.done(rb):
            # if there is a problem, the benchmark will run again, so do not
            # proceed further
            return False

        tool_id, year_id = self._updateDb(rb)

        q = """
        SELECT id, name FROM categories
        WHERE
            year_id = '{0}' and name = '{1}';
        """.format(year_id, rb.category)
        res = self._db.query(q)
        if not res:
            if configs.configs['save-new-tasks'] == 'yes':
                res = self.update_category(year_id, rb.category)
            else:
                rb.dumpToFile('Do not have given category')
                satt_log('^^ dumped to file (unknown category)')
                return True

        assert len(res) == 1
        if not len(res[0]) == 2:
            print(res[0])
        assert len(res[0]) == 2
        cat_id = res[0][0]
        cat_name = res[0][1]

        q = """
        SELECT id, correct_result FROM tasks
        WHERE name = '{0}' and category_id = '{1}';
        """.format(get_name(rb.name), cat_id)
        res = self._db.query(q)

        # we do not have such a task??
        if not res:
            if configs.configs['save-new-tasks'] == 'yes':
                res = self.save_task(rb, cat_id)
            else:
                rb.dumpToFile('Do not have given task')
                satt_log('^^ dumped to file (unknown task)')
                return True

        assert len(res) == 1
        task_id = res[0][0]
        correct_result = res[0][1]

        # replace ' by \' in output
        rb.output = rb.output.replace('\'', '\\\'')
        ic = is_correct(correct_result, rb.result)

        result= rb.result.lower()
        if rb.witness != '':
            wtns = rb.witness.strip()

            # replace ' even in witness, because it can contain
            # arbitrary text
            wtns = wtns.replace('\'', '\\\'')
        else:
            wtns = None

        if rb.witness_output != '':
            # FIXME we should limit the wintess_output size, otherwise we
            # get get some performance issues
            rb.witness_output = rb.witness_output.strip()
            rb.witness_output = rb.witness_output.replace('\'', '\\\'')

        q = """
        INSERT INTO task_results
        (tool_id, task_id, result, witness, is_correct, points, cpu_time,
         memory_usage, output, witness_output, run_id)
        VALUES('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', {8}, '{9}', '{10}')
        """.format(tool_id, task_id, result, wtns, ic,
                   self._rating_methods.points(ic, rb.result, wtns, cat_name), None2Zero(rb.time),
                   None2Zero(rb.memory), Empty2Null(rb.output), rb.witness_output, self.run_id)

        def _exception_handler(args, data):
            q, tool_id, task_id = data

            if (args[1].startswith('Duplicate entry')):

                if configs.configs['ignore-duplicates'] == 'yes':
                    satt_log('Already has this result for this tool, ignoring.')
                else:
                    err('Already has result of this benchmark for this tool.\n'
                        'It is only supported to have one result for each '
                        'benchmark and particular tool\n'
                        'If want ignore this behaviour use --ignore-duplicates.\n'
                        '(tool + version + params). You can delete the old result:\n'
                        '  $ ./db-cli \'DELETE from task_results WHERE tool_id={0}'
                        ' and task_id={1}\'\n'
                        'or you can delete all results for this tool:\n'
                        '  $ ./db-cli \'DELETE from tools WHERE id={0}\'\n'
                        .format(tool_id, task_id, tool_id))
            else:
                err('Failed querying db: {0}\n\n{1}'.format(args[1], q))

        self._db.query_with_exception_handler(q, _exception_handler,
                                              (q, tool_id, task_id))

        self._commit()

        return True
Beispiel #17
0
    def summary(self):
        incorrect_results_num = self._incorrect_false_results_num + self._incorrect_true_results_num

        satt_log("-----------------------------------------------------------------------")
        satt_log(" -| Ran in total {0} benchmarks of which {1} were answered incorrectly"\
                 .format(self._total_num, incorrect_results_num))
        satt_log(" -|")
        satt_log(" -| Correct   TRUE  results: {0}".format(self._correct_true_results_num))
        satt_log(" -| Correct   FALSE results: {0}".format(self._correct_false_results_num))
        satt_log(" -| Incorrect TRUE  results: {0}".format(self._incorrect_true_results_num))
        satt_log(" -| Incorrect FALSE results: {0}".format(self._incorrect_false_results_num))
        satt_log(" -| UNKNOWN         results: {0}".format(self._unknown_results_num))
        satt_log(" -| ERROR           results: {0}".format(self._error_results_num))
        satt_log(" -|")
        if incorrect_results_num > 0:
            satt_log(" -| Incorrect results:")
            for b in self._incorrect_results:
                satt_log("   -- {0}".format(b))
        satt_log("-----------------------------------------------------------------------")
Beispiel #18
0
    def done(self, rb):
        self._total_num += 1
        mach = rb.task.getMachine()
        name = rb.name

        color = None
        result = rb.result
        correct = self._get_correct_result(os.path.basename(name), rb)

        if rb.result == 'ERROR' or rb.result is None:
            color = 'red_bg'
            self._error_results_num += 1
        elif rb.result == 'UNKNOWN':
            color = 'yellow'
            self._unknown_results_num += 1
        elif correct is None:
            color = 'red_bg'
            rb.result = 'cannot_decide ({0})'.format(rb.result)
        elif rb.result == 'TRUE':
            # if there is not true or false is before true
            # in the name
            if correct != 'TRUE':
                color = 'red'
                self._incorrect_true_results_num += 1
                self._incorrect_results.append(rb.name)
            else:
                self._correct_true_results_num += 1
                color = 'green'
        elif rb.result == 'FALSE':
            if correct != 'FALSE':
                color = 'red'
                self._incorrect_false_results_num += 1
                self._incorrect_results.append(rb.name)
            else:
                color = 'green'
                self._correct_false_results_num += 1

            if color == 'green' and rb.witness != '':
                wtns = rb.witness.strip()
                result = '{0} ({1})'.format(rb.result, wtns)

                if wtns != 'confirmed':
                    color = 'green_yellow_bg'

        prefix = '[{0} | {1}%]  '.format(strftime('%H:%M:%S'), self._progress)
        satt_log('{0} - {1}: {2}'.format(rb.category,
                                         os.path.basename(name), result),
                                         color, prefix = prefix)

        if rb.result is None or rb.result == 'ERROR':
            satt_log('--- output <<{0}>>'.format(mach))
            out = rb.output.strip()

            if out:
                satt_log(out)
                satt_log('---')

        if rb.result is None:
            return False

        return True
Beispiel #19
0
    def done(self, rb):
        self._total_num += 1
        mach = rb.task.getMachine()
        name = rb.name

        color = None
        result = rb.result
        correct = self._get_correct_result(os.path.basename(name), rb)

        if rb.result == 'ERROR' or rb.result is None:
            color = 'red_bg'
            self._error_results_num += 1
        elif rb.result == 'UNKNOWN':
            color = 'yellow'
            self._unknown_results_num += 1
        elif correct is None:
            color = 'red_bg'
            rb.result = 'cannot_decide ({0})'.format(rb.result)
        elif rb.result == 'TRUE':
            # if there is not true or false is before true
            # in the name
            if correct != 'TRUE':
                color = 'red'
                self._incorrect_true_results_num += 1
                self._incorrect_results.append(rb.name)
            else:
                self._correct_true_results_num += 1
                color = 'green'
        elif rb.result == 'FALSE':
            if correct != 'FALSE':
                color = 'red'
                self._incorrect_false_results_num += 1
                self._incorrect_results.append(rb.name)
            else:
                color = 'green'
                self._correct_false_results_num += 1

            if color == 'green' and rb.witness != '':
                wtns = rb.witness.strip()
                result = '{0} ({1})'.format(rb.result, wtns)

                if wtns != 'confirmed':
                    color = 'green_yellow_bg'

        prefix = '[{0} | {1}%]  '.format(strftime('%H:%M:%S'), self._progress)
        satt_log('{0} - {1}: {2}'.format(rb.category, os.path.basename(name),
                                         result),
                 color,
                 prefix=prefix)

        if rb.result is None or rb.result == 'ERROR':
            satt_log('--- output <<{0}>>'.format(mach))
            out = rb.output.strip()

            if out:
                satt_log(out)
                satt_log('---')

        if rb.result is None:
            return False

        return True
Beispiel #20
0
    def summary(self):
        incorrect_results_num = self._incorrect_false_results_num + self._incorrect_true_results_num

        satt_log(
            "-----------------------------------------------------------------------"
        )
        satt_log(" -| Ran in total {0} benchmarks of which {1} were answered incorrectly"\
                 .format(self._total_num, incorrect_results_num))
        satt_log(" -|")
        satt_log(" -| Correct   TRUE  results: {0}".format(
            self._correct_true_results_num))
        satt_log(" -| Correct   FALSE results: {0}".format(
            self._correct_false_results_num))
        satt_log(" -| Incorrect TRUE  results: {0}".format(
            self._incorrect_true_results_num))
        satt_log(" -| Incorrect FALSE results: {0}".format(
            self._incorrect_false_results_num))
        satt_log(" -| UNKNOWN         results: {0}".format(
            self._unknown_results_num))
        satt_log(" -| ERROR           results: {0}".format(
            self._error_results_num))
        satt_log(" -|")
        if incorrect_results_num > 0:
            satt_log(" -| Incorrect results:")
            for b in self._incorrect_results:
                satt_log("   -- {0}".format(b))
        satt_log(
            "-----------------------------------------------------------------------"
        )