Esempio n. 1
0
    def deserialize(f, nm='nm', objdump='objdump', propagateExceptions=False):
        f = f.name
        
        if os.path.getsize(f) == 0:
            # Empty file - exit early.
            return None

        try:
            data = cPerf.importPerf(f, nm, objdump)

            # Go through the data and convert counter values to percentages.
            for f in data['functions'].values():
                fc = f['counters']
                for l in f['data']:
                    for k,v in l[0].items():
                        l[0][k] = 100.0 * float(v) / fc[k]
                for k,v in fc.items():
                    fc[k] = 100.0 * v / data['counters'][k]

            return ProfileV1(data)

        except:
            if propagateExceptions:
                raise
            warning(traceback.format_exc())
            return None
Esempio n. 2
0
    def submit_helper(self, parsed_args):
        """Submit the report to the server.  If no server
        was specified, use a local mock server.
        """

        result = None
        if parsed_args.submit_url:
            from lnt.util import ServerUtil
            for server in parsed_args.submit_url:
                self.log("submitting result to %r" % (server,))
                try:
                    result = ServerUtil.submitFile(
                        server, parsed_args.report_path, parsed_args.commit,
                        parsed_args.verbose)
                except (urllib2.HTTPError, urllib2.URLError) as e:
                    warning("submitting to {} failed with {}".format(server,
                                                                     e))
        else:
            # Simulate a submission to retrieve the results report.
            # Construct a temporary database and import the result.
            self.log("submitting result to dummy instance")

            import lnt.server.db.v4db
            import lnt.server.config
            db = lnt.server.db.v4db.V4DB("sqlite:///:memory:",
                                         lnt.server.config.Config.dummyInstance())
            result = lnt.util.ImportData.import_and_report(
                None, None, db, parsed_args.report_path, 'json', True)

        if result is None:
            fatal("results were not obtained from submission.")

        return result
Esempio n. 3
0
def async_wrapper(job, ts_args, func_args):
    """Setup test-suite in this subprocess and run something.
    
    Because of multipocessing, capture excptions and log messages,
    and return them.
    """
    global clean_db
    try:
        start_time = time.time()
        
        if not clean_db:
            lnt.server.db.v4db.V4DB.close_all_engines()
            clean_db = True
        
        note("Running async wrapper: {} ".format(job.__name__)+ str(os.getpid()))

        _v4db = current_app.old_config.get_database(ts_args['db'])
        # with contextlib.closing(_v4db) as db:
        ts = _v4db.testsuite[ts_args['tsname']]
        nothing = job(ts, **func_args)
        assert nothing is None
        end_time = time.time()
        delta = end_time-start_time
        msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__,
                                                time=delta)
        if delta < 100:
            note(msg)
        else:
            warning(msg)
    except Exception:
        # Put all exception text into an exception and raise that for our
        # parent process.
        error("Subprocess failed with:" + "".join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)
    sys.exit(0)
Esempio n. 4
0
def load_rules():
    """
    Load available rules scripts from a directory.

    Rules are organized as:

    <current dir>/rules/
    <current dir>/rules/rule_.*.py
    ...
    """

    rule_script_rex = re.compile(r'^rule_(.*)\.py$')
    rule_scripts = {}

    rules_path = os.path.join(os.path.dirname(__file__), 'rules')
    for item in os.listdir(rules_path):
        # Ignore certain known non-scripts.
        if item in ('README.txt', '__init__.py') or item.endswith('.pyc'):
            continue

        # Ignore non-matching files.
        m = rule_script_rex.match(item)
        if m is None:
            warning("ignoring item {} in rule  directory: {}".format(
                item, rules_path))
            continue

        name = m.groups()[0]
        # Allow rules to be disabled by name
        if name.endswith("disabled"):
            continue

        rule_scripts[name] = os.path.join(rules_path, item)

    return rule_scripts
Esempio n. 5
0
def get_source_version(path):
    """get_source_version(path) -> str or None

    Given the path to a revision controlled source tree, return a revision
    number, hash, etc. which identifies the source version.
    """

    if os.path.exists(os.path.join(path, ".svn")):
        return commands.capture(['/bin/sh', '-c',
                                 'cd "%s" && svnversion' % path]).strip()
    elif os.path.exists(os.path.join(path, ".git", "svn")):
        # git-svn is pitifully slow, extract the revision manually.
        res = commands.capture(['/bin/sh', '-c',
                                ('cd "%s" && '
                                 'git log -1') % path]
                               ).strip()
        last_line = res.split("\n")[-1]
        m = _git_svn_id_re.match(last_line)
        if not m:
            commands.warning("unable to understand git svn log: %r" % res)
            return
        return m.group(1)
    elif os.path.exists(os.path.join(path, ".git")):
        return commands.capture(['/bin/sh', '-c',
                                 ('cd "%s" && '
                                  'git log -1 --pretty=format:%%H') % path]
                                ).strip()
Esempio n. 6
0
File: rcs.py Progetto: cg14823/cbnt
def get_source_version(path):
    """get_source_version(path) -> str or None

    Given the path to a revision controlled source tree, return a revision
    number, hash, etc. which identifies the source version.
    """

    if os.path.exists(os.path.join(path, ".svn")):
        return commands.capture(
            ['/bin/sh', '-c', 'cd "%s" && svnversion' % path]).strip()
    elif os.path.exists(os.path.join(path, ".git", "svn")):
        # git-svn is pitifully slow, extract the revision manually.
        res = commands.capture(
            ['/bin/sh', '-c', ('cd "%s" && '
                               'git log -1') % path]).strip()
        last_line = res.split("\n")[-1]
        m = _git_svn_id_re.match(last_line)
        if not m:
            commands.warning("unable to understand git svn log: %r" % res)
            return
        return m.group(1)
    elif os.path.exists(os.path.join(path, ".git")):
        return commands.capture([
            '/bin/sh', '-c', ('cd "%s" && '
                              'git log -1 --pretty=format:%%H') % path
        ]).strip()
Esempio n. 7
0
File: perf.py Progetto: cg14823/cbnt
    def deserialize(f, nm='nm', objdump='objdump', propagateExceptions=False):
        f = f.name

        if os.path.getsize(f) == 0:
            # Empty file - exit early.
            return None

        try:
            data = cPerf.importPerf(f, nm, objdump)

            # Go through the data and convert counter values to percentages.
            for f in data['functions'].values():
                fc = f['counters']
                for l in f['data']:
                    for k, v in l[0].items():
                        l[0][k] = 100.0 * float(v) / fc[k]
                for k, v in fc.items():
                    fc[k] = 100.0 * v / data['counters'][k]

            return ProfileV1(data)

        except:
            if propagateExceptions:
                raise
            warning(traceback.format_exc())
            return None
Esempio n. 8
0
def start_browser(url, debug=False):
    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        note('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        warning('unable to detect that server started')
                
    if debug:
        note('opening webbrowser...')
    webbrowser.open(url)
Esempio n. 9
0
File: main.py Progetto: efcs/lnt
def action_runtest(name, args):
    """run a builtin test application"""

    # Runtest accepting options is deprecated, but lets not break the
    # world, so collect them anyways and pass them on.
    parser = OptionParser("%s test-name [options]" % name)
    parser.disable_interspersed_args()
    parser.add_option("", "--submit", dest="submit", type=str, default=None)
    parser.add_option("", "--commit", dest="commit", type=str, default=None)
    parser.add_option("", "--output", dest="output", type=str, default=None)
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true")

    (deprecated_opts, args) = parser.parse_args(args)
    if len(args) < 1:
        parser.error("incorrect number of argments")

    test_name, args = args[0], args[1:]
    # Rebuild the deprecated arguments.
    for key, val in vars(deprecated_opts).iteritems():
        if val is not None:
            if isinstance(val, str):
                args.insert(0, val)
            args.insert(0, "--" + key)

            warning("--{} should be passed directly to the"
                        " test suite.".format(key))

    import lnt.tests
    try:
        test_instance = lnt.tests.get_test_instance(test_name)
    except KeyError:
        parser.error('invalid test name %r' % test_name)

    test_instance.run_test('%s %s' % (name, test_name), args)
Esempio n. 10
0
File: main.py Progetto: efcs/lnt
def action_submit(name, args):
    """submit a test report to the server"""

    parser = OptionParser("%s [options] <url> <file>+" % name)
    parser.add_option("", "--commit", dest="commit", type=int,
                      help=("whether the result should be committed "
                            "[%default]"),
                      default=True)
    parser.add_option("-v", "--verbose", dest="verbose",
                      help="show verbose test results",
                      action="store_true", default=False)

    (opts, args) = parser.parse_args(args)
    if len(args) < 2:
        parser.error("incorrect number of argments")

    if not opts.commit:
        warning("submit called with --commit=0, your results will not be saved"
                " at the server.")

    from lnt.util import ServerUtil
    files = ServerUtil.submitFiles(args[0], args[1:],
                                   opts.commit, opts.verbose)
    if opts.verbose:
        for f in files:
            lnt.util.ImportData.print_report_result(f, sys.stdout,
                                                    sys.stderr, True)
Esempio n. 11
0
def action_submit(name, args):
    """submit a test report to the server"""

    parser = OptionParser("%s [options] <url> <file>+" % name)
    parser.add_option("",
                      "--commit",
                      dest="commit",
                      type=int,
                      help=("whether the result should be committed "
                            "[%default]"),
                      default=True)
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      help="show verbose test results",
                      action="store_true",
                      default=False)

    (opts, args) = parser.parse_args(args)
    if len(args) < 2:
        parser.error("incorrect number of argments")

    if not opts.commit:
        warning("submit called with --commit=0, your results will not be saved"
                " at the server.")

    from lnt.util import ServerUtil
    files = ServerUtil.submitFiles(args[0], args[1:], opts.commit,
                                   opts.verbose)
    if opts.verbose:
        for f in files:
            lnt.util.ImportData.print_report_result(f, sys.stdout, sys.stderr,
                                                    True)
Esempio n. 12
0
def start_browser(url, debug=False):
    def url_is_up(url):
        try:
            o = urllib.urlopen(url)
        except IOError:
            return False
        o.close()
        return True

    # Wait for server to start...
    if debug:
        note('waiting for server to start...')
    for i in range(10000):
        if url_is_up(url):
            break
        if debug:
            sys.stderr.write('.')
            sys.stderr.flush()
        time.sleep(.01)
    else:
        warning('unable to detect that server started')
                
    if debug:
        note('opening webbrowser...')
    webbrowser.open(url)
Esempio n. 13
0
def async_wrapper(job, ts_args, func_args):
    """Setup test-suite in this subprocess and run something.
    
    Because of multipocessing, capture excptions and log messages,
    and return them.
    """
    global clean_db
    try:
        start_time = time.time()
        
        if not clean_db:
            lnt.server.db.v4db.V4DB.close_all_engines()
            clean_db = True
        
        note("Running async wrapper: {} ".format(job.__name__)+ str(os.getpid()))

        _v4db = current_app.old_config.get_database(ts_args['db'])
        #with contextlib.closing(_v4db) as db:
        ts = _v4db.testsuite[ts_args['tsname']]
        nothing = job(ts, **func_args)
        assert nothing is None
        end_time = time.time()
        delta = end_time-start_time
        msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__,
                                                time=delta)
        if delta < 100:
            note(msg)
        else:
            warning(msg)
    except:
        # Put all exception text into an exception and raise that for our
        # parent process.
        error("Subprocess failed with:" + "".join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)
    sys.exit(0)
Esempio n. 14
0
    def submit_helper(self, parsed_args):
        """Submit the report to the server.  If no server
        was specified, use a local mock server.
        """

        result = None
        if parsed_args.submit_url:
            from lnt.util import ServerUtil
            for server in parsed_args.submit_url:
                self.log("submitting result to %r" % (server, ))
                try:
                    result = ServerUtil.submitFile(server,
                                                   parsed_args.report_path,
                                                   parsed_args.commit,
                                                   parsed_args.verbose)
                except (urllib2.HTTPError, urllib2.URLError) as e:
                    warning("submitting to {} failed with {}".format(
                        server, e))
        else:
            # Simulate a submission to retrieve the results report.
            # Construct a temporary database and import the result.
            self.log("submitting result to dummy instance")

            import lnt.server.db.v4db
            import lnt.server.config
            db = lnt.server.db.v4db.V4DB(
                "sqlite:///:memory:", lnt.server.config.Config.dummyInstance())
            result = lnt.util.ImportData.import_and_report(
                None, None, db, parsed_args.report_path, 'json', True)

        if result is None:
            fatal("results were not obtained from submission.")

        return result
Esempio n. 15
0
File: app.py Progetto: cg14823/cbnt
 def close(self):
     t = self.elapsed_time()
     if t > 10:
         warning("Request {} took {}s".format(self.url, t))
     db = getattr(self, 'db', None)
     if db is not None:
         db.close()
     return super(Request, self).close()
Esempio n. 16
0
def _importProfile(name_filename):
    name, filename = name_filename

    if not os.path.exists(filename):
        warning('Profile %s does not exist' % filename)
        return None

    pf = lnt.testing.profile.profile.Profile.fromFile(filename)
    if not pf:
        return None

    pf.upgrade()
    profilefile = pf.render()
    return lnt.testing.TestSamples(name + '.profile', [profilefile], {}, str)
def populate_blacklist():
    global ignored
    ignored = []
    try:
        path = current_app.old_config.blacklist
    except RuntimeError:
        path = os.path.join(os.path.dirname(sys.argv[0]), "blacklist")

    if path and os.path.isfile(path):
        note("Loading blacklist file: {}".format(path))
        with open(path, 'r') as f:
            for l in f.readlines():
                ignored.append(re.compile(l.strip()))
    else:
        warning("Ignoring blacklist file: {}".format(path))
Esempio n. 18
0
 def _run_test(self):
     for iteration in xrange(self.iterations):
         for output_file in self.output_files:
             try:
                 os.remove(output_file)
             except (IOError, OSError):
                 pass
         try:
             subprocess.check_call(self.command, cwd=os.getcwd(),
                                   shell=True)
         except subprocess.CalledProcessError:
             warning("failed to run command: '{}'".format(self.command))
         else:
             self.output.extend([xmltodict.parse(open(output_file, 'r'))
                                 for output_file in self.output_files])
Esempio n. 19
0
    def _generate_run_info(self, tag, result_type, run_order, parent_commit):
        env_vars = {
            'Build Number': 'BUILD_NUMBER',
            'Owner': 'GERRIT_CHANGE_OWNER_NAME',
            'Gerrit URL': 'GERRIT_CHANGE_URL',
            'Jenkins URL': 'BUILD_URL'
        }

        run_info = {
            key: os.getenv(env_var)
            for key, env_var in env_vars.iteritems() if os.getenv(env_var)
        }

        try:
            commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE')
            if commit_message:
                commit_message = base64.b64decode(commit_message)
        except Exception:
            warning('Unable to decode commit message "{}", skipping'.format(
                commit_message))
        else:
            run_info['Commit Message'] = commit_message

        git_sha = os.getenv('GERRIT_PATCHSET_REVISION')
        if not git_sha:
            fatal("unable to determine git SHA for result, exiting.")

        if run_order:
            run_info['run_order'] = str(run_order)
        else:
            note("run order not provided, will use server-side auto-generated "
                 "run order")

        run_info.update({
            'git_sha': git_sha,
            't': str(calendar.timegm(time.gmtime())),
            'tag': tag
        })

        if result_type == 'cv':
            if not parent_commit:
                parent_commit = self._get_parent_commit()

            run_info.update({'parent_commit': parent_commit})

        return run_info
Esempio n. 20
0
def _importProfile(name_filename):
    name, filename = name_filename

    if not os.path.exists(filename):
        warning('Profile %s does not exist' % filename)
        return None
    
    pf = lnt.testing.profile.profile.Profile.fromFile(filename)
    if not pf:
        return None

    pf.upgrade()
    profilefile = pf.render()
    return lnt.testing.TestSamples(name + '.profile',
                                   [profilefile],
                                   {},
                                   str)
Esempio n. 21
0
def action_runtest(name, args):
    """run a builtin test application"""

    # Runtest accepting options is deprecated, but lets not break the
    # world, so collect them anyways and pass them on.
    parser = OptionParser("%s test-name [options]" % name)
    parser.disable_interspersed_args()
    parser.add_option("", "--submit", dest="submit", type=str, default=None)
    parser.add_option("", "--commit", dest="commit", type=str, default=None)
    parser.add_option("", "--output", dest="output", type=str, default=None)
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true")

    (deprecated_opts, args) = parser.parse_args(args)
    if len(args) < 1:
        parser.error("incorrect number of argments")

    test_name, args = args[0], args[1:]
    # Rebuild the deprecated arguments.
    for key, val in vars(deprecated_opts).iteritems():
        if val is not None:
            if isinstance(val, str):
                args.insert(0, val)
            args.insert(0, "--" + key)

            warning("--{} should be passed directly to the"
                        " test suite.".format(key))

    logger = logging.getLogger(LOGGER_NAME)
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler()
    handler.setLevel(logging.INFO)
    handler.setFormatter(logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'))
    logger.addHandler(handler)
    import lnt.tests
    try:
        test_instance = lnt.tests.get_test_instance(test_name)
    except KeyError:
        parser.error('invalid test name %r' % test_name)

    server_results = test_instance.run_test('%s %s' % (name, test_name), args)
    if server_results.get('result_url'):
        print "Results available at:", server_results['result_url']
    else:
        print "Results available at: no URL available"
Esempio n. 22
0
def action_runtest(name, args):
    """run a builtin test application"""

    # Runtest accepting options is deprecated, but lets not break the
    # world, so collect them anyways and pass them on.
    parser = OptionParser("%s test-name [options]" % name)
    parser.disable_interspersed_args()
    parser.add_option("", "--submit", dest="submit", type=str, default=None)
    parser.add_option("", "--commit", dest="commit", type=str, default=None)
    parser.add_option("", "--output", dest="output", type=str, default=None)
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true")

    (deprecated_opts, args) = parser.parse_args(args)
    if len(args) < 1:
        parser.error("incorrect number of argments")

    test_name, args = args[0], args[1:]
    # Rebuild the deprecated arguments.
    for key, val in vars(deprecated_opts).iteritems():
        if val is not None:
            if isinstance(val, str):
                args.insert(0, val)
            args.insert(0, "--" + key)

            warning("--{} should be passed directly to the"
                    " test suite.".format(key))

    logger = logging.getLogger(LOGGER_NAME)
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler()
    handler.setLevel(logging.INFO)
    handler.setFormatter(
        logging.Formatter('%(asctime)s %(levelname)s: %(message)s',
                          datefmt='%Y-%m-%d %H:%M:%S'))
    logger.addHandler(handler)
    import lnt.tests
    try:
        test_instance = lnt.tests.get_test_instance(test_name)
    except KeyError:
        parser.error('invalid test name %r' % test_name)

    server_results = test_instance.run_test('%s %s' % (name, test_name), args)
    if server_results.get('result_url'):
        print "Results available at:", server_results['result_url']
    else:
        print "Results available at: no URL available"
Esempio n. 23
0
 def _run_test(self):
     for iteration in xrange(self.iterations):
         for output_file in self.output_files:
             try:
                 os.remove(output_file)
             except (IOError, OSError):
                 pass
         try:
             subprocess.check_call(self.command,
                                   cwd=os.getcwd(),
                                   shell=True)
         except subprocess.CalledProcessError:
             warning("failed to run command: '{}'".format(self.command))
         else:
             self.output.extend([
                 xmltodict.parse(open(output_file, 'r'))
                 for output_file in self.output_files
             ])
Esempio n. 24
0
    def _generate_run_info(self, tag, result_type, run_order, parent_commit):
        env_vars = {'Build Number': 'BUILD_NUMBER',
                    'Owner': 'GERRIT_CHANGE_OWNER_NAME',
                    'Gerrit URL': 'GERRIT_CHANGE_URL',
                    'Jenkins URL': 'BUILD_URL'}

        run_info = {key: os.getenv(env_var)
                    for key, env_var in env_vars.iteritems()
                    if os.getenv(env_var)}

        try:
            commit_message = os.getenv('GERRIT_CHANGE_COMMIT_MESSAGE')
            if commit_message:
                commit_message = base64.b64decode(commit_message)
        except Exception:
            warning('Unable to decode commit message "{}", skipping'.format(
                commit_message))
        else:
            run_info['Commit Message'] = commit_message

        git_sha = os.getenv('GERRIT_PATCHSET_REVISION')
        if not git_sha:
            fatal("unable to determine git SHA for result, exiting.")

        if run_order:
            run_info['run_order'] = str(run_order)
        else:
            note("run order not provided, will use server-side auto-generated "
                 "run order")

        run_info.update({'git_sha': git_sha,
                         't': str(calendar.timegm(time.gmtime())),
                         'tag': tag})

        if result_type == 'cv':
            if not parent_commit:
                parent_commit = self._get_parent_commit()

            run_info.update({'parent_commit': parent_commit})

        return run_info
Esempio n. 25
0
def load_rules():
    """
    Load available rules scripts from a directory.

    Rules are organized as:

    <current dir>/rules/
    <current dir>/rules/rule_.*.py
    ...
    """

    rule_script_rex = re.compile(
        r'^rule_(.*)\.py$')
    rule_scripts = {}

    rules_path = os.path.join(os.path.dirname(__file__),
                                              'rules')
    for item in os.listdir(rules_path):
        # Ignore certain known non-scripts.
        if item in ('README.txt', '__init__.py') or item.endswith('.pyc'):
            continue

        # Ignore non-matching files.
        m = rule_script_rex.match(item)
        if m is None:
            warning("ignoring item {} in rule  directory: {}".format(item, rules_path))
            continue

        name = m.groups()[0]
        # Allow rules to be disabled by name
        if name.endswith("disabled"):
            continue
            
        rule_scripts[name] = os.path.join(rules_path, item)

    return rule_scripts
Esempio n. 26
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }
        
        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
            
        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k,v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k,v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append( (name, v) )
                        continue
                    
                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(name + '.' + LIT_METRIC_TO_LNT[k],
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [self._get_lnt_code(test_data['code'])],
                                            test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                warning('Profiles had not completed importing after %s seconds.'
                        % TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts'
        }
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order
        
        machine_info = {
        }
        
        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 27
0
def regenerate_fieldchanges_for_run(ts, run_id):
    """Regenerate the set of FieldChange objects for the given run.
    """
    # Allow for potentially a few different runs, previous_runs, next_runs
    # all with the same order_id which we will aggregate together to make
    # our comparison result.
    run = ts.getRun(run_id)
    runs = ts.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()
    regressions = ts.query(ts.Regression).all()[::-1]
    previous_runs = ts.get_previous_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)
    next_runs = ts.get_next_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    if next_runs != []:
        end_order = next_runs[-1].order
    else:
        end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    # When the same rev is submitted many times, the database accesses here
    # can be huge, and it is almost always an error to have the same rev
    # be used in so many runs.
    run_size = len(runs_to_load)
    if run_size > 50:
        warning("Generating field changes for {} runs."
                "That will be very slow.".format(run_size))
    runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)

    # Only store fieldchanges for "metric" samples like execution time;
    # not for fields with other data, e.g. hash of a binary
    for field in list(ts.Sample.get_metric_fields()):
        for test_id in runinfo.test_ids:
            f = None
            result = runinfo.get_comparison_result(
                runs, previous_runs, test_id, field,
                ts.Sample.get_hash_of_binary_field())
            # Try and find a matching FC and update, else create one.
            try:
                f = ts.query(ts.FieldChange) \
                    .filter(ts.FieldChange.start_order == start_order) \
                    .filter(ts.FieldChange.end_order == end_order) \
                    .filter(ts.FieldChange.test_id == test_id) \
                    .filter(ts.FieldChange.machine == run.machine) \
                    .filter(ts.FieldChange.field == field) \
                    .one()
            except sqlalchemy.orm.exc.NoResultFound:
                f = None

            if not result.is_result_performance_change() and f:
                # With more data, its not a regression. Kill it!
                note("Removing field change: {}".format(f.id))
                delete_fieldchange(ts, f)
                continue

            if result.is_result_performance_change() and not f:
                test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
                f = ts.FieldChange(start_order=start_order,
                                   end_order=run.order,
                                   machine=run.machine,
                                   test=test,
                                   field=field)
                ts.add(f)
                ts.commit()
                found, new_reg = identify_related_changes(ts, regressions, f)
                if found:
                    regressions.append(new_reg)
                    note("Found field change: {}".format(run.machine))

            # Always update FCs with new values.
            if f:
                f.old_value = result.previous
                f.new_value = result.current
                f.run = run
    ts.commit()
    rules.post_submission_hooks(ts, run_id)
Esempio n. 28
0
    def fromfile(file, path):
        # I love valgrind, but this is really a horribly lame data format. Oh
        # well.

        it = iter(file)

        # Read the header.
        description_lines = []
        command = None
        events = None
        positions = initial_positions = ['line']
        for ln in it:
            # If there is no colon in the line, we have reached the end of the
            # header.
            if ':' not in ln:
                break

            key, value = ln.split(':', 1)
            if key == 'desc':
                description_lines.append(value.strip())
            elif key == 'cmd':
                if command is not None:
                    warning("unexpected multiple 'cmd' keys in %r" % (path, ))
                command = value.strip()
            elif key == 'events':
                if events is not None:
                    warning("unexpected multiple 'events' keys in %r" %
                            (path, ))
                events = value.split()
            elif key == 'positions':
                if positions is not initial_positions:
                    warning("unexpected multiple 'positions' keys in %r" %
                            (path, ))
                positions = value.split()
            else:
                warning("found unknown key %r in %r" % (key, path))

        # Validate that required fields were present.
        if events is None:
            raise CalltreeParseError("missing required 'events' key in header")

        # Construct an instance.
        data = CalltreeData(events, "\n".join(description_lines), command)

        # Read the file data.
        num_samples = len(positions) + len(events)
        current_file = None
        current_function = None
        summary_samples = None
        for ln in it:
            # Check if this is the closing summary line.
            if ln.startswith('summary'):
                key, value = ln.split(':', 1)
                summary_samples = map(int, value.split())
                break

            # Check if this is an update to the current file or function.
            if ln.startswith('fl='):
                current_file = ln[3:-1]
            elif ln.startswith('fn='):
                current_function = ln[3:-1]
            else:
                # Otherwise, this is a data record.
                samples = map(int, ln.split())
                if len(samples) != num_samples:
                    raise CalltreeParseError(
                        "invalid record line, unexpected sample count")
                data.records.append((current_file, current_function, samples))

        # Validate that there are no more remaining records.
        for ln in it:
            raise CalltreeParseError("unexpected line in footer: %r" % (ln, ))

        # Validate that the summary line was present.
        if summary_samples is None:
            raise CalltreeParseError(
                "missing required 'summary' key in footer")

        data.summary = summary_samples

        return data
Esempio n. 29
0
def regenerate_fieldchanges_for_run(ts, run_id):
    """Regenerate the set of FieldChange objects for the given run.
    """
    # Allow for potentially a few different runs, previous_runs, next_runs
    # all with the same order_id which we will aggregate together to make
    # our comparison result.
    run = ts.getRun(run_id)
    runs = ts.query(ts.Run). \
        filter(ts.Run.order_id == run.order_id). \
        filter(ts.Run.machine_id == run.machine_id). \
        all()
    regressions = ts.query(ts.Regression).all()[::-1]
    previous_runs = ts.get_previous_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)
    next_runs = ts.get_next_runs_on_machine(run, FIELD_CHANGE_LOOKBACK)

    # Find our start/end order.
    if previous_runs != []:
        start_order = previous_runs[0].order
    else:
        start_order = run.order
    if next_runs != []:
        end_order = next_runs[-1].order
    else:
        end_order = run.order

    # Load our run data for the creation of the new fieldchanges.
    runs_to_load = [r.id for r in (runs + previous_runs)]

    # When the same rev is submitted many times, the database accesses here
    # can be huge, and it is almost always an error to have the same rev
    # be used in so many runs.
    run_size = len(runs_to_load)
    if run_size > 50:
        warning("Generating field changes for {} runs."
                "That will be very slow.".format(run_size))
    runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)

    # Only store fieldchanges for "metric" samples like execution time;
    # not for fields with other data, e.g. hash of a binary
    for field in list(ts.Sample.get_metric_fields()):
        for test_id in runinfo.test_ids:
            f = None
            result = runinfo.get_comparison_result(
                runs, previous_runs, test_id, field,
                ts.Sample.get_hash_of_binary_field())
            # Try and find a matching FC and update, else create one.
            try:
                f = ts.query(ts.FieldChange) \
                    .filter(ts.FieldChange.start_order == start_order) \
                    .filter(ts.FieldChange.end_order == end_order) \
                    .filter(ts.FieldChange.test_id == test_id) \
                    .filter(ts.FieldChange.machine == run.machine) \
                    .filter(ts.FieldChange.field == field) \
                    .one()
            except sqlalchemy.orm.exc.NoResultFound:
                f = None

            if not result.is_result_performance_change() and f:
                # With more data, its not a regression. Kill it!
                note("Removing field change: {}".format(f.id))
                delete_fieldchange(ts, f)
                continue

            if result.is_result_performance_change() and not f:
                test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
                f = ts.FieldChange(start_order=start_order,
                                   end_order=run.order,
                                   machine=run.machine,
                                   test=test,
                                   field=field)
                # Check the rules to see if this change matters.
                if rules.is_useful_change(ts, f):
                    ts.add(f)
                    ts.commit()
                    try:
                        found, new_reg = identify_related_changes(
                            ts, regressions, f)
                    except ObjectDeletedError:
                        # This can happen from time to time.
                        # So, lets retry once.
                        regressions = ts.query(ts.Regression).all()[::-1]
                        found, new_reg = identify_related_changes(
                            ts, regressions, f)

                    if found:
                        regressions.append(new_reg)
                        note("Found field change: {}".format(run.machine))

            # Always update FCs with new values.
            if f:
                f.old_value = result.previous
                f.new_value = result.current
                f.run = run
    ts.commit()
    rules.post_submission_hooks(ts, regressions)
Esempio n. 30
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        note(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        note(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        note(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        note(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        note(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                     short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            note(profile + "-->" + report_path)
        else:
            warning("Skipping execution profiling because this is not Darwin.")
        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):

            def get(self, url):
                return report_path

            def __getitem__(self, key):
                return report_path

        return DontSubmitResults()
Esempio n. 31
0
    def fromfile(file, path):
        # I love valgrind, but this is really a horribly lame data format. Oh
        # well.

        it = iter(file)

        # Read the header.
        description_lines = []
        command = None
        events = None
        positions = initial_positions = ['line']
        for ln in it:
            # If there is no colon in the line, we have reached the end of the
            # header.
            if ':' not in ln:
                break

            key,value = ln.split(':', 1)
            if key == 'desc':
                description_lines.append(value.strip())
            elif key == 'cmd':
                if command is not None:
                    warning("unexpected multiple 'cmd' keys in %r" % (path,))
                command = value.strip()
            elif key == 'events':
                if events is not None:
                    warning("unexpected multiple 'events' keys in %r" % (path,))
                events = value.split()
            elif key == 'positions':
                if positions is not initial_positions:
                    warning("unexpected multiple 'positions' keys in %r" % (
                            path,))
                positions = value.split()
            else:
                warning("found unknown key %r in %r" % (key, path))

        # Validate that required fields were present.
        if events is None:
            raise CalltreeParseError("missing required 'events' key in header")

        # Construct an instance.
        data = CalltreeData(events, "\n".join(description_lines), command)

        # Read the file data.
        num_samples = len(positions) + len(events)
        current_file = None
        current_function = None
        summary_samples = None
        for ln in it:
            # Check if this is the closing summary line.
            if ln.startswith('summary'):
                key,value = ln.split(':', 1)
                summary_samples = map(int, value.split())
                break

            # Check if this is an update to the current file or function.
            if ln.startswith('fl='):
                current_file = ln[3:-1]
            elif ln.startswith('fn='):
                current_function = ln[3:-1]
            else:
                # Otherwise, this is a data record.
                samples = map(int, ln.split())
                if len(samples) != num_samples:
                    raise CalltreeParseError(
                        "invalid record line, unexpected sample count")
                data.records.append((current_file,
                                     current_function,
                                     samples))

        # Validate that there are no more remaining records.
        for ln in it:
            raise CalltreeParseError("unexpected line in footer: %r" % (ln,))

        # Validate that the summary line was present.
        if summary_samples is None:
            raise CalltreeParseError("missing required 'summary' key in footer")

        data.summary = summary_samples

        return data
Esempio n. 32
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []

        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[
                            k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(
                            name + '.' + LIT_METRIC_TO_LNT[k], [v], test_info,
                            LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(
                        name + '.exec.status',
                        [self._get_lnt_code(test_data['code'])], test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                warning(
                    'Profiles had not completed importing after %s seconds.' %
                    TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {'tag': 'nts'}
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 33
0
def action_updatedb(name, args):
    """modify a database"""

    from optparse import OptionParser, OptionGroup

    parser = OptionParser("%s [options] <instance> <file>+" % name)
    parser.add_option("",
                      "--database",
                      dest="database",
                      default="default",
                      help="database to modify [%default]")
    parser.add_option("",
                      "--testsuite",
                      dest="testsuite",
                      help="testsuite to modify")
    parser.add_option("", "--commit", dest="commit", type=int, default=False)
    parser.add_option("",
                      "--show-sql",
                      dest="show_sql",
                      action="store_true",
                      default=False)
    parser.add_option("",
                      "--delete-machine",
                      dest="delete_machines",
                      action="append",
                      default=[])
    parser.add_option("",
                      "--delete-run",
                      dest="delete_runs",
                      action="append",
                      default=[],
                      type=int)
    (opts, args) = parser.parse_args(args)

    if len(args) != 1:
        parser.error("invalid number of arguments")

    if opts.testsuite is None:
        parser.error("--testsuite is required")

    path, = args

    # Load the instance.
    instance = lnt.server.instance.Instance.frompath(path)

    # Get the database and test suite.
    with contextlib.closing(
            instance.get_database(opts.database, echo=opts.show_sql)) as db:
        ts = db.testsuite[opts.testsuite]

        # Compute a list of all the runs to delete.
        runs_to_delete = list(opts.delete_runs)
        if opts.delete_machines:
            runs_to_delete.extend(
                id
                for id, in ts.query(ts.Run.id).\
                    join(ts.Machine).\
                    filter(ts.Machine.name.in_(opts.delete_machines)))

        # Delete all samples associated with those runs.
        ts.query(ts.Sample).\
            filter(ts.Sample.run_id.in_(runs_to_delete)).\
            delete(synchronize_session=False)

        # Delete all those runs.
        ts.query(ts.Run).\
            filter(ts.Run.id.in_(runs_to_delete)).\
            delete(synchronize_session=False)

        # Delete the machines.
        for name in opts.delete_machines:
            # Delete all FieldChanges associated with this machine.
            ids = ts.query(ts.FieldChange.id).\
                join(ts.Machine).filter(ts.Machine.name == name).all()
            for i in ids:
                ts.query(ts.FieldChange).filter(ts.FieldChange.id == i[0]).\
                    delete()

            num_deletes = ts.query(ts.Machine).filter_by(name=name).delete()
            if num_deletes == 0:
                warning("unable to find machine named: %r" % name)

        if opts.commit:
            db.commit()
        else:
            db.rollback()
Esempio n. 34
0
def action_updatedb(name, args):
    """modify a database"""

    from optparse import OptionParser, OptionGroup

    parser = OptionParser("%s [options] <instance> <file>+"%name)
    parser.add_option("", "--database", dest="database", default="default",
                      help="database to modify [%default]")
    parser.add_option("", "--testsuite", dest="testsuite",
                      help="testsuite to modify")
    parser.add_option("", "--commit", dest="commit", type=int,
                      default=False)
    parser.add_option("", "--show-sql", dest="show_sql", action="store_true",
                      default=False)
    parser.add_option("", "--delete-machine", dest="delete_machines",
                      action="append", default=[])
    parser.add_option("", "--delete-run", dest="delete_runs",
                      action="append", default=[], type=int)
    (opts, args) = parser.parse_args(args)

    if len(args) != 1:
        parser.error("invalid number of arguments")

    if opts.testsuite is None:
        parser.error("--testsuite is required")

    path, = args

    # Load the instance.
    instance = lnt.server.instance.Instance.frompath(path)

    # Get the database and test suite.
    with contextlib.closing(instance.get_database(opts.database,
                                                  echo=opts.show_sql)) as db:
        ts = db.testsuite[opts.testsuite]

        # Compute a list of all the runs to delete.
        runs_to_delete = list(opts.delete_runs)
        if opts.delete_machines:
            runs_to_delete.extend(
                id
                for id, in ts.query(ts.Run.id).\
                    join(ts.Machine).\
                    filter(ts.Machine.name.in_(opts.delete_machines)))

        # Delete all samples associated with those runs.
        ts.query(ts.Sample).\
            filter(ts.Sample.run_id.in_(runs_to_delete)).\
            delete(synchronize_session=False)

        # Delete all those runs.
        ts.query(ts.Run).\
            filter(ts.Run.id.in_(runs_to_delete)).\
            delete(synchronize_session=False)

        # Delete the machines.
        for name in opts.delete_machines:
            # Delete all FieldChanges associated with this machine.
            ids = ts.query(ts.FieldChange.id).\
                join(ts.Machine).filter(ts.Machine.name == name).all()
            for i in ids:
                ts.query(ts.FieldChange).filter(ts.FieldChange.id == i[0]).\
                    delete()

            num_deletes = ts.query(ts.Machine).filter_by(name=name).delete()
            if num_deletes == 0:
                warning("unable to find machine named: %r" % name)

        if opts.commit:
            db.commit()
        else:
            db.rollback()