예제 #1
0
def update_schema(engine, versions, available_migrations, schema_name):
    schema_migrations = available_migrations[schema_name]

    # Get the current schema version.
    db_version = versions.get(schema_name, None)
    current_version = schema_migrations['current_version']

    # If there was no previous version, initialize the version.
    if db_version is None:
        logger.info("assigning initial version for schema %r", schema_name)
        _set_schema_version(engine, schema_name, 0)
        db_version = 0

    # If we are up-to-date, do nothing.
    if db_version == current_version:
        return False

    # Otherwise, update the database.
    if db_version > current_version:
        logger.error("invalid schema %r version %r (greater than current)",
                     schema_name, db_version)
        return False

    logger.info("updating schema %r from version %r to current version %r",
                schema_name, db_version, current_version)
    while db_version < current_version:
        # Lookup the upgrade function for this version.
        upgrade_script = schema_migrations[db_version]

        globals = {}
        with open(upgrade_script) as f:
            exec(compile(f.read(), upgrade_script, 'exec'), globals)
        upgrade_method = globals['upgrade']

        # Execute the upgrade.
        #
        # FIXME: Backup the database here.
        #
        # FIXME: Execute this inside a transaction?
        logger.info("applying upgrade for version %d to %d" %
                    (db_version, db_version + 1))
        upgrade_method(engine)

        # Update the schema version.
        db_version += 1
        _set_schema_version(engine, schema_name, db_version)

    return True
예제 #2
0
def post_submission_hooks(session, ts, run_id):
    """Run all the post submission hooks on the submitted run."""
    if not HOOKS_LOADED:
        logger.error("Running Hooks without loading them first.")
    for func in HOOKS['post_submission_hook']:
        func(session, ts, run_id)
예제 #3
0
파일: app.py 프로젝트: quangvd3/lnt
 def log_exception(self, exc_info):
     # We need to stringify the traceback, since logs are sent via
     # pickle.
     logger.error("Exception: " + traceback.format_exc())
예제 #4
0
def action_send_run_comparison(instance_path, run_a_id, run_b_id, database,
                               testsuite, host, from_address, to_address,
                               subject_prefix, dry_run):
    """send a run-vs-run comparison email"""
    import contextlib
    import email.mime.multipart
    import email.mime.text
    import lnt.server.reporting.dailyreport
    import smtplib

    init_logger(logging.ERROR)

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(instance_path)
    config = instance.config

    # Get the database.
    with contextlib.closing(config.get_database(database)) as db:
        session = db.make_session()

        # Get the testsuite.
        ts = db.testsuite[testsuite]

        # Lookup the two runs.
        run_a_id = int(run_a_id)
        run_b_id = int(run_b_id)
        run_a = session.query(ts.Run).\
            filter_by(id=run_a_id).first()
        run_b = session.query(ts.Run).\
            filter_by(id=run_b_id).first()
        if run_a is None:
            logger.error("invalid run ID %r (not in database)" % (run_a_id, ))
        if run_b is None:
            logger.error("invalid run ID %r (not in database)" % (run_b_id, ))

        # Generate the report.
        data = lnt.server.reporting.runs.generate_run_data(
            session,
            run_b,
            baseurl=config.zorgURL,
            result=None,
            compare_to=run_a,
            baseline=None,
            aggregation_fn=min)

        env = lnt.server.ui.app.create_jinja_environment()
        text_template = env.get_template('reporting/run_report.txt')
        text_report = text_template.render(data)
        html_template = env.get_template('reporting/run_report.html')
        html_report = html_template.render(data)

        subject = data['subject']
        if subject_prefix is not None:
            subject = "%s %s" % (subject_prefix, subject)

        # Form the multipart email message.
        msg = email.mime.multipart.MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = from_address
        msg['To'] = to_address
        msg.attach(email.mime.text.MIMEText(text_report, 'plain'))
        msg.attach(email.mime.text.MIMEText(html_report, 'html'))

        # Send the report.
        if not dry_run:
            mail_client = smtplib.SMTP(host)
            mail_client.sendmail(from_address, [to_address], msg.as_string())
            mail_client.quit()
        else:
            out = sys.stdout
            out.write("From: %s\n" % from_address)
            out.write("To: %s\n" % to_address)
            out.write("Subject: %s\n" % subject)
            out.write("=== text/plain report\n")
            out.write(text_report + "\n")
            out.write("=== html report\n")
            out.write(html_report + "\n")
예제 #5
0
def _load_migrations():
    """
    Load available migration scripts from a directory.

    Migrations are organized as:

    <current dir>/migrations/
    <current dir>/migrations/upgrade_<N>_to_<N+1>.py
    ...
    """

    upgrade_script_rex = re.compile(
        r'^upgrade_(0|[1-9][0-9]*)_to_([1-9][0-9]*)\.py$')
    migrations = {}

    # Currently, we only load migrations for a '__core__' schema, and only from
    # the migrations directory. One idea if we need to eventually support
    # migrations for the per-testsuite tables is to add subdirectories keyed on
    # the testsuite.
    for schema_name in ('__core__', ):
        schema_migrations_path = os.path.join(os.path.dirname(__file__),
                                              'migrations')
        schema_migrations = {}
        for item in os.listdir(schema_migrations_path):
            # Ignore certain known non-scripts.
            if item in ('README.txt', '__init__.py', 'new_suite.py',
                        'util.py') or item.endswith('.pyc'):
                continue

            # Ignore non-matching files.
            m = upgrade_script_rex.match(item)
            if m is None:
                logger.warning(
                    "ignoring item %r in schema migration directory: %r", item,
                    schema_migrations_path)
                continue

            # Check the version numbers for validity.
            version, next_version = map(int, m.groups())
            if next_version != version + 1:
                logger.error(
                    "invalid script name %r in schema migration directory: %r",
                    item, schema_migrations_path)
                continue

            schema_migrations[version] = os.path.join(schema_migrations_path,
                                                      item)

        # Ignore directories with no migrations.
        if not schema_migrations:
            logger.warning("ignoring empty migrations directory: %r",
                           schema_migrations_path)
            continue

        # Check the provided versions for sanity.
        current_version = max(schema_migrations) + 1
        for i in range(current_version):
            if i not in schema_migrations:
                logger.error("schema %r is missing migration for version: %r",
                             schema_name, i)

        # Store the current version as another item in the per-schema migration
        # dictionary.
        schema_migrations['current_version'] = current_version

        # Store the schema migrations.
        migrations[schema_name] = schema_migrations

    return migrations
예제 #6
0
파일: app.py 프로젝트: llvm-mirror/lnt
 def log_exception(self, exc_info):
     # We need to stringify the traceback, since logs are sent via
     # pickle.
     logger.error("Exception: " + traceback.format_exc())
예제 #7
0
파일: compilers.py 프로젝트: TUBOSS/lnt
def get_cc_info(path, cc_flags=[]):
    """get_cc_info(path) -> { ... }

    Extract various information on the given compiler and return a dictionary
    of the results."""

    cc = path

    # Interrogate the compiler.
    cc_version = capture([cc, '-v', '-E'] + cc_flags +
                         ['-x', 'c', '/dev/null', '-###'],
                         include_stderr=True).strip()

    # Determine the assembler version, as found by the compiler.
    cc_as_version = capture([cc, "-c", '-Wa,-v', '-o', '/dev/null'] +
                            cc_flags + ['-x', 'assembler', '/dev/null'],
                            include_stderr=True).strip()

    if "clang: error: unsupported argument '-v'" in cc_as_version:
        cc_as_version = "Clang built in."

    # Determine the linker version, as found by the compiler.
    cc_ld_version = capture(([cc, "-Wl,-v", "-dynamiclib"]),
                            include_stderr=True).strip()
    # Extract the default target .ll (or assembly, for non-LLVM compilers).
    cc_target_assembly = capture([cc, '-S', '-flto', '-o', '-'] + cc_flags +
                                 ['-x', 'c', '/dev/null'],
                                 include_stderr=True).strip()

    # Extract the compiler's response to -dumpmachine as the target.
    cc_target = cc_dumpmachine = capture([cc, '-dumpmachine']).strip()

    # Default the target to the response from dumpmachine.
    cc_target = cc_dumpmachine

    # Parse out the compiler's version line and the path to the "cc1" binary.
    cc1_binary = None
    version_ln = None
    cc_name = cc_version_num = cc_build_string = cc_extra = ""
    for ln in cc_version.split('\n'):
        if ' version ' in ln:
            version_ln = ln
        elif 'cc1' in ln or 'clang-cc' in ln:
            m = re.match(r' "?([^"]*)"?.*"?-E"?.*', ln)
            if not m:
                fatal("unable to determine cc1 binary: %r: %r" % (cc, ln))
            cc1_binary, = m.groups()
        elif "-_Amachine" in ln:
            m = re.match(r'([^ ]*) *-.*', ln)
            if not m:
                fatal("unable to determine cc1 binary: %r: %r" % (cc, ln))
            cc1_binary, = m.groups()
    if cc1_binary is None:
        logger.error("unable to find compiler cc1 binary: %r: %r" %
                     (cc, cc_version))
    if version_ln is None:
        logger.error("unable to find compiler version: %r: %r" %
                     (cc, cc_version))
    else:
        m = re.match(r'(.*) version ([^ ]*) +(\([^(]*\))(.*)', version_ln)
        if m is not None:
            cc_name, cc_version_num, cc_build_string, cc_extra = m.groups()
        else:
            # If that didn't match, try a more basic pattern.
            m = re.match(r'(.*) version ([^ ]*)', version_ln)
            if m is not None:
                cc_name, cc_version_num = m.groups()
            else:
                logger.error("unable to determine compiler version: %r: %r" %
                             (cc, version_ln))
                cc_name = "unknown"

    # Compute normalized compiler name and type. We try to grab source
    # revisions, branches, and tags when possible.
    cc_norm_name = None
    cc_build = None
    cc_src_branch = cc_alt_src_branch = None
    cc_src_revision = cc_alt_src_revision = None
    cc_src_tag = None
    llvm_capable = False
    cc_extra = cc_extra.strip()
    if cc_name == 'icc':
        cc_norm_name = 'icc'
        cc_build = 'PROD'
        cc_src_tag = cc_version_num

    elif cc_name == 'gcc' and (cc_extra == ''
                               or re.match(r' \(dot [0-9]+\)', cc_extra)):
        cc_norm_name = 'gcc'
        m = re.match(r'\(Apple Inc. build ([0-9]*)\)', cc_build_string)
        if m:
            cc_build = 'PROD'
            cc_src_tag, = m.groups()
        else:
            logger.error('unable to determine gcc build version: %r' %
                         cc_build_string)
    elif (cc_name
          in ('clang', 'LLVM', 'Debian clang', 'Apple clang', 'Apple LLVM')
          and (cc_extra == '' or 'based on LLVM' in cc_extra or
               (cc_extra.startswith('(') and cc_extra.endswith(')')))):
        llvm_capable = True
        if cc_name == 'Apple clang' or cc_name == 'Apple LLVM':
            cc_norm_name = 'apple_clang'
        else:
            cc_norm_name = 'clang'

        m = re.match(r'\(([^ ]*)( ([0-9]+))?\)', cc_build_string)
        if m:
            cc_src_branch, _, cc_src_revision = m.groups()

            # With a CMake build, the branch is not emitted.
            if cc_src_branch and not cc_src_revision and \
                    cc_src_branch.isdigit():
                cc_src_revision = cc_src_branch
                cc_src_branch = ""

            # These show up with git-svn.
            if cc_src_branch == '$URL$':
                cc_src_branch = ""
        else:
            # Otherwise, see if we can match a branch and a tag name. That
            # could be a git hash.
            m = re.match(r'\((.+) ([^ ]+)\)', cc_build_string)
            if m:
                cc_src_branch, cc_src_revision = m.groups()
            else:
                logger.error('unable to determine '
                             'Clang development build info: %r' %
                             ((cc_name, cc_build_string, cc_extra), ))
                cc_src_branch = ""

        m = re.search('clang-([0-9.]*)', cc_src_branch)
        if m:
            cc_build = 'PROD'
            cc_src_tag, = m.groups()

            # We sometimes use a tag of 9999 to indicate a dev build.
            if cc_src_tag == '9999':
                cc_build = 'DEV'
        else:
            cc_build = 'DEV'

        # Newer Clang's can report separate versions for LLVM and Clang. Parse
        # the cc_extra text so we can get the maximum SVN version.
        if cc_extra.startswith('(') and cc_extra.endswith(')'):
            m = re.match(r'\((.+) ([^ ]+)\)', cc_extra)
            if m:
                cc_alt_src_branch, cc_alt_src_revision = m.groups()

                # With a CMake build, the branch is not emitted.
                if cc_src_branch and not cc_src_revision and \
                        cc_src_branch.isdigit():
                    cc_alt_src_revision = cc_alt_src_branch
                    cc_alt_src_branch = ""

            else:
                logger.error('unable to determine '
                             'Clang development build info: %r' %
                             ((cc_name, cc_build_string, cc_extra), ))

    elif cc_name == 'gcc' and 'LLVM build' in cc_extra:
        llvm_capable = True
        cc_norm_name = 'llvm-gcc'
        m = re.match(r' \(LLVM build ([0-9.]+)\)', cc_extra)
        if m:
            llvm_build, = m.groups()
            if llvm_build:
                cc_src_tag = llvm_build.strip()
            cc_build = 'PROD'
        else:
            cc_build = 'DEV'
    else:
        logger.error("unable to determine compiler name: %r" %
                     ((cc_name, cc_build_string), ))

    if cc_build is None:
        logger.error("unable to determine compiler build: %r" % cc_version)

    # If LLVM capable, fetch the llvm target instead.
    if llvm_capable:
        m = re.search('target triple = "(.*)"', cc_target_assembly)
        if m:
            cc_target, = m.groups()
        else:
            logger.error("unable to determine LLVM compiler target: %r: %r" %
                         (cc, cc_target_assembly))

    cc_exec_hash = hashlib.sha1()
    cc_exec_hash.update(open(cc, 'rb').read())

    info = {
        'cc_build': cc_build,
        'cc_name': cc_norm_name,
        'cc_version_number': cc_version_num,
        'cc_dumpmachine': cc_dumpmachine,
        'cc_target': cc_target,
        'cc_version': cc_version,
        'cc_exec_hash': cc_exec_hash.hexdigest(),
        'cc_as_version': cc_as_version,
        'cc_ld_version': cc_ld_version,
        'cc_target_assembly': cc_target_assembly,
    }
    if cc1_binary is not None and os.path.exists(cc1_binary):
        cc1_exec_hash = hashlib.sha1()
        cc1_exec_hash.update(open(cc1_binary, 'rb').read())
        info['cc1_exec_hash'] = cc1_exec_hash.hexdigest()
    if cc_src_tag is not None:
        info['cc_src_tag'] = cc_src_tag
    if cc_src_revision is not None:
        info['cc_src_revision'] = cc_src_revision
    if cc_src_branch:
        info['cc_src_branch'] = cc_src_branch
    if cc_alt_src_revision is not None:
        info['cc_alt_src_revision'] = cc_alt_src_revision
    if cc_alt_src_branch is not None:
        info['cc_alt_src_branch'] = cc_alt_src_branch

    # Infer the run order from the other things we have computed.
    info['inferred_run_order'] = get_inferred_run_order(info)

    return info
예제 #8
0
파일: main.py 프로젝트: llvm-mirror/lnt
def action_send_run_comparison(instance_path, run_a_id, run_b_id, database,
                               testsuite, host, from_address, to_address,
                               subject_prefix, dry_run):
    """send a run-vs-run comparison email"""
    import contextlib
    import email.mime.multipart
    import email.mime.text
    import lnt.server.reporting.dailyreport
    import smtplib

    init_logger(logging.ERROR)

    # Load the LNT instance.
    instance = lnt.server.instance.Instance.frompath(instance_path)
    config = instance.config

    # Get the database.
    with contextlib.closing(config.get_database(database)) as db:
        session = db.make_session()

        # Get the testsuite.
        ts = db.testsuite[testsuite]

        # Lookup the two runs.
        run_a_id = int(run_a_id)
        run_b_id = int(run_b_id)
        run_a = session.query(ts.Run).\
            filter_by(id=run_a_id).first()
        run_b = session.query(ts.Run).\
            filter_by(id=run_b_id).first()
        if run_a is None:
            logger.error("invalid run ID %r (not in database)" % (run_a_id,))
        if run_b is None:
            logger.error("invalid run ID %r (not in database)" % (run_b_id,))

        # Generate the report.
        data = lnt.server.reporting.runs.generate_run_data(
            session, run_b, baseurl=config.zorgURL, result=None,
            compare_to=run_a, baseline=None, aggregation_fn=min)

        env = lnt.server.ui.app.create_jinja_environment()
        text_template = env.get_template('reporting/run_report.txt')
        text_report = text_template.render(data).encode('utf-8')
        html_template = env.get_template('reporting/run_report.html')
        html_report = html_template.render(data).encode('utf-8')

        subject = data['subject']
        if subject_prefix is not None:
            subject = "%s %s" % (subject_prefix, subject)

        # Form the multipart email message.
        msg = email.mime.multipart.MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = from_address
        msg['To'] = to_address
        msg.attach(email.mime.text.MIMEText(text_report, 'plain', 'utf-8'))
        msg.attach(email.mime.text.MIMEText(html_report, 'html', 'utf-8'))

        # Send the report.
        if not dry_run:
            mail_client = smtplib.SMTP(host)
            mail_client.sendmail(
                from_address,
                [to_address],
                msg.as_string())
            mail_client.quit()
        else:
            out = sys.stdout
            out.write("From: %s\n" % from_address)
            out.write("To: %s\n" % to_address)
            out.write("Subject: %s\n" % subject)
            out.write("=== text/plain report\n")
            out.write(text_report + "\n")
            out.write("=== html report\n")
            out.write(html_report + "\n")