Пример #1
0
def main(args):
    global CONFIG

    CONFIG = configuration.get_config(args)

    test_case_definition_dir = CONFIG.get("testDir") + "/" + TEST_DIR_BASE_NAME
    test_plan_definition_dir = CONFIG.get("testDir") + "/" + PLAN_DIR_BASE_NAME
    test_case_definition_file_list = common.find_all_test_case(
        CONFIG.get("plan"), test_case_definition_dir, test_plan_definition_dir)

    test_case_definition_url = "https://github.com/qinshulei/ci-test-cases"
    if CONFIG.get("testUrl") is not None and CONFIG.get("testUrl") != "":
        test_case_definition_url = CONFIG.get("testUrl")

    setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % CONFIG.get("url")
    distro = CONFIG.get("distro")
    if distro is None:
        distro = "Ubuntu"

    if CONFIG.get("tree") == "open-estuary":
        walk_url(CONFIG.get("url"), CONFIG.get("url"), CONFIG.get("plans"),
                 CONFIG.get("arch"), CONFIG.get("targets"),
                 CONFIG.get("priority"), distro, CONFIG.get("scope"),
                 CONFIG.get("level"), test_case_definition_dir,
                 test_case_definition_file_list, test_case_definition_url)
    elif CONFIG.get("tree") == "linaro":
        create_jobs2(CONFIG.get("plans"), "D05", CONFIG.get("targets"),
                     CONFIG.get("priority"), distro, CONFIG.get("scope"),
                     CONFIG.get("level"), test_case_definition_dir,
                     test_case_definition_file_list, test_case_definition_url)
    print 'Done scanning for kernel information'
    print 'Done creating YAML jobs'
    exit(0)
Пример #2
0
def main(args):
    config = configuration.get_config(args)
    token = config.get('token')
    api = config.get('api')
    storage = config.get('storage')
    builds_json = config.get('builds')

    print("Working on kernel {}/{}".format(
        config.get('tree'), config.get('branch')))

    if not storage:
        raise Exception("No KernelCI storage URL provided")

    if builds_json:
        print("Getting builds from {}".format(builds_json))
        with open(builds_json) as json_file:
            builds = json.load(json_file)
    else:
        print("Getting builds from KernelCI API")
        if not token:
            raise Exception("No KernelCI API token provided")
        if not api:
            raise Exception("No KernelCI API URL provided")
        builds = get_builds(api, token, config)

    print("Number of builds: {}".format(len(builds)))

    jobs = get_jobs_from_builds(config, builds)
    print("Number of jobs: {}".format(len(jobs)))

    write_jobs(config, jobs)
Пример #3
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))
    load_jobs()
    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if 'result' in jobs[job_id]:
                    if jobs[job_id]['result'] == 'FAIL':
                        exit(1)
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #4
0
def main(args):
    config = configuration.get_config(args)
    token = config.get('token')
    api = config.get('api')
    storage = config.get('storage')
    builds_json = config.get('builds')

    print("Working on kernel {}/{}".format(config.get('tree'),
                                           config.get('branch')))

    if not storage:
        raise Exception("No KernelCI storage URL provided")

    if builds_json:
        print("Getting builds from {}".format(builds_json))
        with open(builds_json) as json_file:
            builds = json.load(json_file)
    else:
        print("Getting builds from KernelCI API")
        if not token:
            raise Exception("No KernelCI API token provided")
        if not api:
            raise Exception("No KernelCI API URL provided")
        builds = get_builds(api, token, config)

    print("Number of builds: {}".format(len(builds)))

    jobs = get_jobs_from_builds(config, builds)
    print("Number of jobs: {}".format(len(jobs)))

    write_jobs(config, jobs)
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"),
                               config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))
    load_jobs()
    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_yaml(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #6
0
def airflow_task_success(context):
    section = get_section(context['task'].task_id)
    dag_id = context['dag'].dag_id
    from lib.configuration import get_config
    if dag_id in [
            '01_update_granted_patent', '02_gi_post_manual',
            '03_disambiguation_support', '04_disambiguation_postprocessing',
            '99_daily_checks'
    ]:
        type = 'granted_patent'
    else:
        type = 'application'
    config = get_config(type)
    message = 'AIRFLOW TASK Success:\n' \
              'DAG:    {dag_id}\n' \
              'TASKS:  {task_id}\n' \
              'Duration:  {duration}\n' \
        .format(dag_id=context['task_instance'].dag_id, task_id=context['task_instance'].task_id,
                duration=context['task_instance'].duration)
    report_message = get_report_message(context['task'].task_id, config)
    send_slack_notification(report_message,
                            config,
                            section=section,
                            level='success')
    send_slack_notification(message, config, section=section, level='success')
Пример #7
0
def clean_data():
    yield
    config = get_config()
    config["DATABASE"]["NEW_DB"] = 'sarvo_test_db'
    from sqlalchemy import create_engine
    engine = create_engine(get_connection_string(config, "NEW_DB"))
    engine.execute("TRUNCATE TABLE inventor;")
    engine.execute("TRUNCATE TABLE disambiguated_inventor_ids;")
Пример #8
0
def main(args):
    config = configuration.get_config(args)

    setup_job_dir(os.getcwd() + "/jobs")
    print "Scanning %s for kernel information..." % config.get("url")
    walk_url(config.get("url"), config.get("plans"), config.get("arch"), config.get("targets"), config.get("priority"))
    print "Done scanning for kernel information"
    print "Done creating JSON jobs"
    exit(0)
Пример #9
0
def main(args):
    config = configuration.get_config(args)

    if config.get("boot"):
        boot_report(config)
        generate_email_test_report()
        generate_current_test_report()
        generate_history_test_report()

    exit(0)
Пример #10
0
def main(args):
    config = configuration.get_config(args)

    setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % config.get("url")
    walk_url(config.get("url"), config.get("url"), config.get("plans"),
             config.get("arch"), config.get("targets"), config.get("priority"))
    print 'Done scanning for kernel information'
    print 'Done creating JSON jobs'
    exit(0)
Пример #11
0
def main(args):
    global directory
    config = configuration.get_config(args)
    if config.get("jobs"):
        directory = setup_job_dir(config.get("jobs"))
    else:
        directory = setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % config.get("url")
    walk_url(config.get("url"), config.get("plans"), config.get("arch"), config.get("targets"), config.get("priority"))
    print 'Done scanning for kernel information'
    print 'Done creating JSON jobs'
    exit(0)
Пример #12
0
def main(args):
    config = configuration.get_config(args)

    global TEST_CASE_DEFINITION_DIR
    TEST_CASE_DEFINITION_DIR = config.get("testDir")

    if config.get("boot"):
        boot_report(config)
        generate_current_test_report()
        generate_email_test_report()
        generate_history_test_report()

    exit(0)
Пример #13
0
def airflow_daily_check_failure(context):
    section = get_section(context['task'].task_id)
    from lib.configuration import get_config
    config = get_config(type)
    message = 'AIRFLOW TASK FAILURE:\n' \
              'DAG:    {dag_id}\n' \
              'TASKS:  {task_id}\n' \
              'Duration:  {duration}\n' \
              'Reason: {exception}\n' \
        .format(dag_id=context['task_instance'].dag_id, task_id=context['task_instance'].task_id,
                duration=context['task_instance'].duration, exception=context['exception'])
    config["SLACK"]["CHANNEL"] = "pv_server_status"
    send_slack_notification(message, config, section=section, level='error')
Пример #14
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
    connection = utils.connect(url)
    load_jobs()
    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    create_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    exit(0)
Пример #15
0
def main(args):
    global directory
    config = configuration.get_config(args)
    if config.get("jobs"):
        directory = setup_job_dir(config.get("jobs"))
    else:
        directory = setup_job_dir(os.getcwd() + '/jobs')
    url = config.get('url')
    arch = config.get('arch')
    if arch:
        url += arch + "/"
    print 'Scanning %s for kernel information...' % config.get("url")
    walk_url(url, config.get("plans"), arch, config.get("targets"),
             config.get("priority"))
    print 'Done scanning for kernel information'
    print 'Done creating JSON jobs'
    exit(0)
Пример #16
0
def airflow_daily_check_success(context):
    section = get_section(context['task'].task_id)
    from lib.configuration import get_config
    config = get_config()
    message = 'AIRFLOW TASK Success:\n' \
              'DAG:    {dag_id}\n' \
              'TASKS:  {task_id}\n' \
              'Duration:  {duration}\n' \
        .format(dag_id=context['task_instance'].dag_id, task_id=context['task_instance'].task_id,
                duration=context['task_instance'].duration)
    report_message = get_report_message(context['task'].task_id, config)
    config["SLACK"]["CHANNEL"] = "pv_server_status"
    send_slack_notification(report_message,
                            config,
                            section=section,
                            level='success')
    send_slack_notification(message, config, section=section, level='success')
Пример #17
0
def main(args):
    config = configuration.get_config(args)
    lava_connection = LavaConnection(config)

    lava_job = LavaRunJob(lava_connection,
                          config.get('job'),
                          2)
    lava_job.connect()

    if config.get("curses"):
        output_handler = CursesOutput(lava_job, config.get("interval"))
    else:
        output_handler = FileOutputHandler(sys.stdout, lava_job, config.get("interval"))

    output_handler.run()

    exit(0)
def main(args):
    config = configuration.get_config(args)

    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    if job_map:
        url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
        connection = utils.connect(url)
        submit_jobs(connection)
    exit(0)
Пример #19
0
def main(args):
    config = configuration.get_config(args)

    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    jobs = config.get("jobs")
    print("Loading jobs from {}".format(jobs))
    load_jobs(jobs)

    if JOBS:
        start_time = time.time()
        url = utils.validate_input(config.get("username"), config.get("token"),
                                   config.get("server"))
        connection = utils.connect(url)
        submit_jobs(connection)
    exit(0)
Пример #20
0
def main(args):
    config = configuration.get_config(args)
    jenkins_build_url = os.getenv('BUILD_URL', '')
    global TEST_CASE_DEFINITION_DIR
    TEST_CASE_DEFINITION_DIR = config.get("testDir")
    distro = config.get("distro")

    if config.get("boot"):
        boot_report(config)
        module_dict = generate_module_dict(job_result_dict,
                                           TEST_CASE_DEFINITION_DIR)
        generate_scope_test_report(TEST_CASE_DEFINITION_DIR, module_dict,
                                   jenkins_build_url, distro)
        generate_current_test_report()
        generate_email_test_report(distro, module_dict, jenkins_build_url)
        generate_history_test_report()

    exit(0)
Пример #21
0
def main(args):
    global test_kind
    config = configuration.get_config(args)

    setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % config.get("url")
    distro = config.get("distro")
    if distro is None:
        distro = "Ubuntu"
    test_kind = config.get("testClassify")
    if test_kind is None:
        test_kind = "BOTH"
    walk_url(config.get("url"), config.get("url"), config.get("plans"),
             config.get("arch"), config.get("targets"), config.get("priority"),
             distro, config.get("SasFlag"))
    print 'Done scanning for kernel information'
    print 'Done creating JSON jobs'
    exit(0)
Пример #22
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"),
                               config.get("server"))
    connection = utils.connect(url)

    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    submit_jobs(connection)
    exit(0)
def main(args):
    global test_kind
    config = configuration.get_config(args)

    setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % config.get("url")
    distro = config.get("distro")
    if distro is None:
        distro = "Ubuntu"
    test_kind = config.get("testClassify")
    if test_kind is None:
        test_kind = "BOTH"
    walk_url(config.get("url"), config.get("url"), config.get("plans"),
            config.get("arch"), config.get("targets"), config.get("priority"),
            distro, config.get("SasFlag"))
    print 'Done scanning for kernel information'
    print 'Done creating JSON jobs'
    exit(0)
Пример #24
0
def main(args):
    conf = configuration.get_config(args)
    api, token = (conf.get(x) for x in ['api', 'token'])

    if not token:
        raise Exception("No token provided")
    if not api:
        raise Exception("No KernelCI API URL provided")

    with open(args['submitted']) as jobs_file:
        jobs = json.load(jobs_file)

    lab = jobs['lab']
    all_passed = True
    boots = {}

    for job_def, job_id in jobs['jobs'].iteritems():
        with open(job_def) as job_yaml:
            job = yaml.safe_load(job_yaml)
        meta = job['metadata']
        boot = poll_boot(token, api, meta, lab, args)
        if not boot:
            print("Boot not found, skipping")
            all_passed = False
            continue

        keys = [
            'job', 'git_branch', 'kernel', 'arch', 'defconfig_full',
            'lab_name', 'board'
        ]
        name = '-'.join(boot[k] for k in keys)
        print("  id: {}, result: {}".format(boot['_id']['$oid'],
                                            boot['status']))
        print("  {}".format(name))
        if boot['status'] != 'PASS':
            all_passed = False
        boots[boot['_id']['$oid']] = boot

    output_path = args['output']
    print("Saving boot results in {}".format(output_path))
    with open(output_path, 'w') as output:
        json.dump(boots, output)

    sys.exit(0 if all_passed is True else 2)
Пример #25
0
def main(args):
    config = configuration.get_config(args)

    jobs_submitted = config.get('submitted')
    lab = config.get('lab')
    if jobs_submitted:
        if not lab:
            raise Exception("Lab name required when saving submitted jobs")
        if os.path.exists(jobs_submitted):
            os.unlink(jobs_submitted)

    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    jobs = config.get("jobs")
    print("Loading jobs from {}".format(jobs))
    load_jobs(jobs)

    if not JOBS:
        print("No jobs to submit")
        result = False
    else:
        start_time = time.time()
        labs_config = configparser.ConfigParser()
        labs_config.read('labs.ini')
        lava_api = labs_config[config.get("lab")]['api']
        print("LAVA API: {}".format(lava_api))
        url = utils.validate_input(config.get("username"), config.get("token"),
                                   lava_api)
        connection = utils.connect(url)
        result = submit_jobs(connection)
        if jobs_submitted and SUBMITTED:
            print("Saving submitted jobs data in {}".format(jobs_submitted))
            data = {
                'start_time': start_time,
                'lab': config.get('lab'),
                'jobs': {k: v
                         for k, v in SUBMITTED.iteritems() if v},
            }
            with open(jobs_submitted, 'w') as json_file:
                json.dump(data, json_file)

    exit(0 if result is True else 1)
Пример #26
0
def main(args):
    config = configuration.get_config(args)

    jobs_submitted = config.get('submitted')
    lab = config.get('lab')
    if jobs_submitted:
        if not lab:
            raise Exception("Lab name required when saving submitted jobs")
        if os.path.exists(jobs_submitted):
            os.unlink(jobs_submitted)

    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    jobs = config.get("jobs")
    print("Loading jobs from {}".format(jobs))
    load_jobs(jobs)

    if not JOBS:
        print("No jobs to submit")
        result = False
    else:
        start_time = time.time()
        labs_config = configparser.ConfigParser()
        labs_config.read('labs.ini')
        lava_api = labs_config[config.get("lab")]['api']
        print("LAVA API: {}".format(lava_api))
        url = utils.validate_input(config.get("username"),
                                   config.get("token"),
                                   lava_api)
        connection = utils.connect(url)
        result = submit_jobs(connection)
        if jobs_submitted and SUBMITTED:
            print("Saving submitted jobs data in {}".format(jobs_submitted))
            data = {
                'start_time': start_time,
                'lab': config.get('lab'),
                'jobs': {k: v for k, v in SUBMITTED.iteritems() if v},
            }
            with open(jobs_submitted, 'w') as json_file:
                json.dump(data, json_file)

    exit(0 if result is True else 1)
Пример #27
0
def airflow_task_failure(context):
    section = get_section(context['task'].task_id)
    from lib.configuration import get_config
    dag_id = context['dag'].dag_id
    if dag_id in [
            '01_update_granted_patent', '02_gi_post_manual',
            '03_disambiguation_support', '04_disambiguation_postprocessing'
    ]:
        type = 'granted_patent'
    else:
        type = 'application'
    config = get_config(type)
    message = 'AIRFLOW TASK FAILURE:\n' \
              'DAG:    {dag_id}\n' \
              'TASKS:  {task_id}\n' \
              'Duration:  {duration}\n' \
              'Reason: {exception}\n' \
        .format(dag_id=context['task_instance'].dag_id, task_id=context['task_instance'].task_id,
                duration=context['task_instance'].duration, exception=context['exception'])
    send_slack_notification(message, config, section=section, level='error')
Пример #28
0
def main(args):
    config = configuration.get_config(args)
    token = config.get('token')
    api = config.get('api')
    storage = config.get('storage')
    builds_json = config.get('builds')

    print("Working on kernel {}/{}".format(config.get('tree'),
                                           config.get('branch')))

    if not storage:
        raise Exception("No KernelCI storage URL provided")

    if builds_json:
        print("Getting builds from {}".format(builds_json))
        with open(builds_json) as json_file:
            builds = json.load(json_file)
    else:
        print("Getting builds from KernelCI API")
        if not token:
            raise Exception("No KernelCI API token provided")
        if not api:
            raise Exception("No KernelCI API URL provided")
        builds = get_builds(api, token, config)

    print("Number of builds: {}".format(len(builds)))

    config_data = kernelci.config.test.from_yaml(config.get('test_configs'))

    plan_configs = config_data['test_plans']
    base_plans = config.get('plans')
    plans = list(plan_config for plan_config in plan_configs.values()
                 if plan_config.base_name in base_plans)

    tests = config_data['test_configs']
    print("Number of test configs: {}".format(len(tests)))

    jobs = get_jobs_from_builds(config, builds, tests, plans)
    print("Number of jobs: {}".format(len(jobs)))

    write_jobs(config, jobs)
Пример #29
0
def main(args):
    global test_kind

    global TEST_CASE_DEFINITION_DIR
    global TEST_CASE_DEFINITION_FILE_LIST
    global TEST_CASE_DEFINITION_URL

    global CONFIG

    CONFIG = configuration.get_config(args)

    TEST_CASE_DEFINITION_DIR = CONFIG.get("testDir")
    TEST_CASE_DEFINITION_FILE_LIST = find_all_test_case(
        TEST_CASE_DEFINITION_DIR)

    TEST_CASE_DEFINITION_URL = "https://github.com/qinshulei/ci-test-cases"
    if CONFIG.get("testUrl") != None and CONFIG.get("testUrl") != "":
        TEST_CASE_DEFINITION_URL = CONFIG.get("testUrl")

    setup_job_dir(os.getcwd() + '/jobs')
    print 'Scanning %s for kernel information...' % CONFIG.get("url")
    distro = CONFIG.get("distro")
    if distro is None:
        distro = "Ubuntu"
    test_kind = CONFIG.get("testClassify")
    if test_kind is None:
        test_kind = "BOTH"

    if CONFIG.get("tree") == "open-estuary":
        walk_url(CONFIG.get("url"), CONFIG.get("url"), CONFIG.get("plans"),
                 CONFIG.get("arch"), CONFIG.get("targets"),
                 CONFIG.get("priority"), distro, CONFIG.get("scope"),
                 CONFIG.get("level"))
    elif CONFIG.get("tree") == "linaro":
        create_new_jobs(CONFIG.get("plans"), "D05", CONFIG.get("targets"),
                        CONFIG.get("priority"), distro, CONFIG.get("scope"),
                        CONFIG.get("level"))
    print 'Done scanning for kernel information'
    print 'Done creating YAML jobs'
    exit(0)
Пример #30
0
def main(args):
    config = configuration.get_config(args)
    token = config.get('token')
    api = config.get('api')

    if not token:
        raise Exception("No KernelCI API token provided")
    if not api:
        raise Exception("No KernelCI API URL provided")

    upload_path = '/'.join(args[k] for k in [
        'tree', 'branch', 'kernel', 'arch', 'defconfig', 'lab'])
    log_file_name = 'bisect-{}.json'.format(args['target'])

    print("Uploading bisect log: {}".format(upload_path))
    upload_log(args, upload_path, log_file_name, token, api)

    print("Sending bisection results")
    send_result(args, log_file_name, token, api)

    print("Sending bisection report email")
    send_report(args, log_file_name, token, api)
Пример #31
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"),
                               config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if 'result' in jobs[job_id]:
                    if jobs[job_id]['result'] == 'FAIL':
                        exit(1)
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #32
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if "result" in jobs[job_id]:
                    if jobs[job_id]["result"] == "FAIL":
                        exit(1)
        jobs["duration"] = end_time - start_time
        jobs["username"] = config.get("username")
        jobs["token"] = config.get("token")
        jobs["server"] = config.get("server")
        results_directory = os.getcwd() + "/results"
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #33
0
def main(args):
    config = configuration.get_config(args)

    if config.get("boot"):
        boot_report(config)
    exit(0)
def main(args):
    config = configuration.get_config(args)
    plans = config.get("plans")
    targets = config.get("targets")
    if config.get("jobs"):
        job_dir = setup_job_dir(config.get("jobs"))
    else:
        job_dir = setup_job_dir(os.getcwd() + '/jobs')

    arch = args.get('arch')
    plans = args.get('plans')
    branch = args.get('branch')
    tree = args.get('tree')
    kernel = tree
    api = args.get('api')
    headers = {
        "Authorization": config.get('token')
    }

    print "Working on kernel %s/%s" % (tree, branch)
    url = urlparse.urljoin(api, ("/build?job=%s&kernel=%s&status=PASS&arch=%s" % (tree, branch, arch)))
    print "Calling KernelCI API: %s" % url
    response = requests.get(url, headers=headers)
    print response
    data = json.loads(response.content)
    builds = data['result']
    print len(builds)
    jobs = []
    cwd = os.getcwd()
    for build in builds:
        defconfig = build['defconfig_full']
        arch_defconfig = ("%s-%s" % (arch, defconfig))
        print "Working on build %s" % arch_defconfig
        test_suite = None
        test_set = None
        test_desc = None
        test_type = None
        plan_defconfigs = []
        modules = build['modules']
        for plan in plans:
            if plan != 'boot':
                    config = ConfigParser.ConfigParser()
                    try:
                        config.read(cwd + '/templates/' + plan + '/' + plan + '.ini')
                        test_suite = config.get(plan, 'suite')
                        test_set = config.get(plan, 'set')
                        test_desc = config.get(plan, 'description')
                        test_type = config.get(plan, 'type')
                        plan_defconfigs = config.get(plan, 'defconfigs').split(',')
                    except:
                        print "Unable to load test configuration"
                        exit(1)
            if build['kernel_image']:
                # handle devices without a DTB, hacky :/
                if build['kernel_image'] == 'bzImage' and arch == 'x86':
                    build['dtb_dir_data'].extend(LEGACY_X86_PLATFORMS)
                if arch == 'arm64' and 'arm64-defconfig' in arch_defconfig:
                    build['dtb_dir_data'].extend(LEGACY_ARM64_PLATFORMS)
                for dtb in build['dtb_dir_data']:
                    # hack for arm64 dtbs in subfolders
                    dtb_full = dtb
                    if arch == 'arm64':
                        dtb = str(dtb).split('/')[-1]
                    if dtb in device_map:
                        # print "device %s was in the device_map" % dtb
                        for device in device_map[dtb]:
                            # print "working on device %s" % dtb
                            lpae = device['lpae']
                            device_type = device['device_type']
                            fastboot = str(device['fastboot']).lower()
                            blacklist = False
                            nfs_blacklist = False
                            if arch_defconfig in device['defconfig_blacklist']:
                                print "defconfig %s is blacklisted for device %s" % (defconfig, device['device_type'])
                                continue
                            elif "BIG_ENDIAN" in defconfig and plan != 'boot-be':
                                print "BIG_ENDIAN is not supported on %s" % device_type
                                continue
                            elif "LPAE" in defconfig and not lpae:
                                print "LPAE is not support on %s" % device_type
                                continue
                            elif any([x for x in device['kernel_blacklist'] if x in kernel]):
                                print "kernel %s is blacklisted for device %s" % (kernel, device_type)
                                continue
                            elif any([x for x in device['nfs_blacklist'] if x in kernel]) \
                                    and plan in ['boot-nfs', 'boot-nfs-mp']:
                                print "kernel %s is blacklisted for NFS on device %s" % (kernel, device_type)
                                continue
                            elif 'be_blacklist' in device \
                                    and any([x for x in device['be_blacklist'] if x in kernel]) \
                                    and plan in ['boot-be']:
                                print "kernel %s is blacklisted for BE on device %s" % (kernel, device_type)
                                continue
                            elif (arch_defconfig not in plan_defconfigs) and (plan != "boot"):
                                print "defconfig %s not in test plan %s" % (arch_defconfig, plan)
                                continue
                            elif targets is not None and device_type not in targets:
                                print "device_type %s is not in targets %s" % (device_type, targets)
                            else:
                                for template in device['templates']:
                                    short_template_file = plan + '/' + str(template)
                                    template_file = cwd + '/templates/' + short_template_file
                                    if os.path.exists(template_file) and template_file.endswith('.jinja2'):
                                        job_name = tree + '-' + branch + '-' + arch + "-" + defconfig[:100] + '-' + dtb + '-' + device_type + '-' + plan
                                        base_url = "https://storage.kernelci.org/%s/%s/%s/" % (build['job'], build['kernel'], arch_defconfig)
                                        dtb_url = base_url + "dtbs/" + dtb_full
                                        kernel_url = urlparse.urljoin(base_url, build['kernel_image'])
                                        endian = 'little'
                                        if 'BIG_ENDIAN' in defconfig and plan == 'boot-be':
                                            endian = 'big'
                                        if build['modules']:
                                            modules_url = urlparse.urljoin(base_url, build['modules'])
                                        else:
                                            modules_url = None
                                        if device['device_type'].startswith('qemu'):
                                            device['device_type'] = 'qemu'
                                        job = {'name': job_name, 'dtb_url': dtb_url, 'platform': dtb, 'kernel_url': kernel_url, 'image_type': 'kernel-ci', 'image_url': base_url,
                                               'modules_url': modules_url, 'plan': plan, 'kernel': branch, 'tree': tree, 'defconfig': defconfig, 'fastboot': fastboot,
                                               'priority': args.get('priority'), 'device': device, 'template_file': template_file, 'base_url': base_url, 'endian': endian,
                                               'test_suite': test_suite, 'test_set': test_set, 'test_desc': test_desc, 'test_type': test_type, 'short_template_file': short_template_file,
                                               'arch': arch, 'arch_defconfig': arch_defconfig}
                                        jobs.append(job)
                                        # print job
            else:
                print "no kernel_image for %s" % build['defconfig_full']

    for job in jobs:
        job_file = job_dir + '/' + job['name'] + '.yaml'
        with open(job_file, 'w') as f:
            f.write(jinja_render(job))
        print "Job written: %s" % job_file
Пример #35
0
        con=engine)
    assignee_org_data = assignee_organization_with_count.sort_values(
        "org_count",
        ascending=False).groupby("assignee_id").head(1).reset_index(drop=True)
    assignee_org_data = assignee_org_data.drop("org_count",
                                               axis=1).assign(name_first=None,
                                                              name_last=None)
    assignee_org_data.rename({
        "assignee_id": "id"
    }, axis=1).to_sql(name='assignee',
                      con=engine,
                      if_exists='append',
                      index=False)


def post_process_assignee(config):
    upload_disambig_results(config)
    update_rawassignee(config)
    create_assignee(config)


def post_process_qc(config):
    qc = AssigneePostProcessingQC(config)
    qc.runTests()


if __name__ == '__main__':
    config = get_config()
    # post_process_assignee(config)
    post_process_qc(config)
Пример #36
0
                xml_helpers.process_patent_numbers(line.strip('\n')))
    withdrawn_patents_frame = pd.DataFrame(withdrawn_patents)
    withdrawn_patents_frame.columns = ['patent_id']
    withdrawn_patents_frame.to_sql(con=engine,
                                   name="temp_withdrawn_patent",
                                   index=False)


def update_withdrawn(update_config):
    update_query = "UPDATE patent p join temp_withdrawn_patent twp on twp.patent_id = p.id set p.withdrawn = 1; "
    engine = create_engine(get_connection_string(update_config, "NEW_DB"))
    engine.execute(update_query)


def process_withdrawn(config):
    withdrawn_folder = '{}/withdrawn'.format(
        config['FOLDERS']['WORKING_FOLDER'])
    download_withdrawn_patent_numbers(withdrawn_folder)
    load_withdrawn(update_config=config)
    update_withdrawn(update_config=config)


def post_withdrawn(config):
    qc = WithdrawnTest(config)
    qc.runTests()


if __name__ == '__main__':
    config = get_config(type="granted_patent")
    #process_withdrawn(config)
    post_withdrawn(config)
Пример #37
0
    def test_yearly_count(self, table, strict=True):
        start_date = datetime.datetime.strptime(
            self.config['DATES']['START_DATE'], '%Y%m%d')
        end_date = datetime.datetime.strptime(self.config['DATES']['END_DATE'],
                                              '%Y%m%d')
        start_date_string = start_date.strftime('%Y-%m-%d')
        end_date_string = end_date.strftime('%Y-%m-%d')
        if table in ['cpc_current', 'wipo']:
            if not self.connection.open:
                self.connection.connect()

            with self.connection.cursor() as count_cursor:
                in_between_query = "SELECT count(1) as new_count from {table} t join patent p on p.id =t.patent_id and p.date  between '{start_dt}' and '{end_dt}'".format(
                    table=table,
                    start_dt=start_date_string,
                    end_dt=end_date_string)
                count_cursor.execute(in_between_query)
                count_value = count_cursor.fetchall()[0][0]
                if count_value < 1:
                    raise AssertionError(
                        "Table doesn't not have new data : {table}, date range '{start_dt}' to '{end_dt}' "
                        .format(table=table,
                                start_dt=start_date_string,
                                end_dt=end_date_string))
            super().test_yearly_count(table)


if __name__ == '__main__':
    qc = CPCTest(get_config())
    qc.runTests()
Пример #38
0
def main(args):
    config = configuration.get_config(args)
    plans = config.get("plans")
    targets = config.get("targets")
    lab_name = config.get('lab')
    job_dir = setup_job_dir(config.get('jobs') or lab_name)
    token = config.get('token')
    api = config.get('api')
    storage = config.get('storage')

    if not token:
        raise Exception("No token provided")
    if not api:
        raise Exception("No KernelCI API URL provided")
    if not storage:
        raise Exception("No KernelCI storage URL provided")

    arch = args.get('arch')
    plans = args.get('plans')
    branch = args.get('branch')
    git_describe = args.get('describe')
    tree = args.get('tree')
    expected = int(args.get('defconfigs'))
    headers = {
        "Authorization": token,
    }

    print "Working on kernel %s/%s" % (tree, branch)
    url_params = urllib.urlencode({
        'job': tree,
        'kernel': git_describe,
        'git_branch': branch,
        'arch': arch,
    })
    url = urlparse.urljoin(api, 'build?{}'.format(url_params))
    print "Calling KernelCI API: %s" % url
    builds = []
    loops = 10
    retry_time = 30
    for loop in range(loops):
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        data = json.loads(response.content)
        builds = data['result']
        if len(builds) >= expected:
            break
        print "Got less builds (%s) than expected (%s), retry in %s seconds" % (
            len(builds), expected, retry_time)
        time.sleep(retry_time)

    print("Number of builds: {}".format(len(builds)))
    jobs = []
    cwd = os.getcwd()
    for build in builds:
        defconfig = build['defconfig_full']
        arch_defconfig = ("%s-%s" % (arch, defconfig))
        print "Working on build %s %s %s %s %s" % (tree, branch, git_describe,
                                                   arch, defconfig)
        test_suite = None
        test_set = None
        test_desc = None
        test_type = None
        plan_defconfigs = []
        modules = build['modules']
        if build['kernel_image']:
            if build['kernel_image'] == 'bzImage' and arch == 'x86':
                build['dtb_dir_data'].extend(LEGACY_X86_PLATFORMS)
        else:
            continue
        if 'PASS' not in build.get('status', ''):
            continue
        if arch in ['arm', 'arm64', 'x86'] and 'defconfig' in defconfig:
            build['dtb_dir_data'].append('qemu')
        for plan in plans:
            if plan != 'boot':
                config = ConfigParser.ConfigParser()
                try:
                    config.read(cwd + '/templates/' + plan + '/' + plan +
                                '.ini')
                    test_suite = config.get(plan, 'suite')
                    test_set = config.get(plan, 'set')
                    test_desc = config.get(plan, 'description')
                    test_type = config.get(plan, 'type')
                    plan_defconfigs = config.get(plan, 'defconfigs').split(',')
                except:
                    print "Unable to load test configuration"
                    exit(1)
            if build['kernel_image']:
                for dtb in build['dtb_dir_data']:
                    # hack for arm64 dtbs in subfolders
                    dtb_full = dtb
                    if arch == 'arm64':
                        dtb = str(dtb).split('/')[-1]
                    if dtb in device_map:
                        # print "device %s was in the device_map" % dtb
                        for device in device_map[dtb]:
                            # print "working on device %s" % dtb
                            lpae = device['lpae']
                            device_type = device['device_type']
                            mach = device['mach']
                            fastboot = str(device['fastboot']).lower()
                            blacklist = False
                            nfs_blacklist = False
                            if defconfig in device['defconfig_blacklist']:
                                print "defconfig %s is blacklisted for device %s" % (
                                    defconfig, device['device_type'])
                                continue
                            elif device.has_key('defconfig_whitelist'
                                                ) and defconfig not in device[
                                                    'defconfig_whitelist']:
                                print "defconfig %s is not in whitelist for device %s" % (
                                    defconfig, device['device_type'])
                                continue
                            elif device.has_key(
                                    'arch_blacklist'
                            ) and arch in device['arch_blacklist']:
                                print "arch %s is blacklisted for device %s" % (
                                    arch, device['device_type'])
                                continue
                            elif device.has_key(
                                    'lab_blacklist'
                            ) and lab_name in device['lab_blacklist']:
                                print "device %s is blacklisted for lab %s" % (
                                    device['device_type'], lab_name)
                                continue
                            elif "BIG_ENDIAN" in defconfig and plan != 'boot-be':
                                print "BIG_ENDIAN is not supported on %s" % device_type
                                continue
                            elif "LPAE" in defconfig and not lpae:
                                print "LPAE is not support on %s" % device_type
                                continue
                            elif any([
                                    x for x in device['kernel_blacklist']
                                    if x in git_describe
                            ]):
                                print "git_describe %s is blacklisted for device %s" % (
                                    git_describe, device_type)
                                continue
                            elif any([x for x in device['nfs_blacklist'] if x in git_describe]) \
                                    and plan in ['boot-nfs', 'boot-nfs-mp']:
                                print "git_describe %s is blacklisted for NFS on device %s" % (
                                    git_describe, device_type)
                                continue
                            elif 'be_blacklist' in device \
                                    and any([x for x in device['be_blacklist'] if x in git_describe]) \
                                    and plan in ['boot-be']:
                                print "git_describe %s is blacklisted for BE on device %s" % (
                                    git_describe, device_type)
                                continue
                            elif (arch_defconfig
                                  not in plan_defconfigs) and (plan != "boot"):
                                print "defconfig %s not in test plan %s" % (
                                    arch_defconfig, plan)
                                continue
                            elif targets is not None and device_type not in targets:
                                print "device_type %s is not in targets %s" % (
                                    device_type, targets)
                            elif arch == 'x86' and dtb == 'x86-32' and 'i386' not in arch_defconfig:
                                print "%s is not a 32-bit x86 build, skipping for 32-bit device %s" % (
                                    defconfig, device_type)
                            elif 'kselftest' in defconfig and plan != 'kselftest':
                                print "Skipping kselftest defconfig because plan was not kselftest"
                            else:
                                for template in device['templates']:
                                    short_template_file = plan + '/' + str(
                                        template)
                                    template_file = cwd + '/templates/' + short_template_file
                                    if os.path.exists(
                                            template_file
                                    ) and template_file.endswith('.jinja2'):
                                        job_name = tree + '-' + branch + '-' + git_describe + '-' + arch + '-' + defconfig[:
                                                                                                                           100] + '-' + dtb + '-' + device_type + '-' + plan
                                        base_url = "%s/%s/%s/%s/%s/%s/" % (
                                            storage, build['job'],
                                            build['git_branch'],
                                            build['kernel'], arch, defconfig)
                                        nfsrootfs_url = None
                                        initrd_url = None
                                        callback_name = 'lava/boot' if plan == 'boot' else 'lava/test'
                                        context = device[
                                            'context'] if 'context' in device else None
                                        if dtb_full.endswith('.dtb'):
                                            dtb_url = base_url + "dtbs/" + dtb_full
                                            platform = dtb[:-4]
                                        else:
                                            dtb_url = None
                                            platform = device_type
                                        kernel_url = urlparse.urljoin(
                                            base_url, build['kernel_image'])
                                        defconfig_base = ''.join(
                                            defconfig.split('+')[:1])
                                        endian = 'little'
                                        if 'BIG_ENDIAN' in defconfig and plan == 'boot-be':
                                            endian = 'big'
                                        initrd_arch = arch
                                        if arch == 'arm64' and endian == 'big':
                                            initrd_arch = 'arm64be'
                                        if arch == 'arm':
                                            if endian == 'big':
                                                initrd_arch = 'armeb'
                                            else:
                                                initrd_arch = 'armel'
                                        if 'kselftest' in plan:
                                            initrd_url = KSELFTEST_INITRD_URL.format(
                                                initrd_arch)
                                        else:
                                            initrd_url = INITRD_URL.format(
                                                initrd_arch)
                                        if 'nfs' in plan:
                                            nfsrootfs_url = NFSROOTFS_URL.format(
                                                initrd_arch)
                                            initrd_url = None
                                            platform = platform + "_rootfs:nfs"
                                        if build['modules']:
                                            modules_url = urlparse.urljoin(
                                                base_url, build['modules'])
                                        else:
                                            modules_url = None
                                        device_type = device['device_type']
                                        if device_type.startswith(
                                                'qemu'
                                        ) or device_type == 'kvm':
                                            device_type = 'qemu'
                                        job = {
                                            'name': job_name,
                                            'dtb_url': dtb_url,
                                            'dtb_full': dtb_full,
                                            'platform': platform,
                                            'mach': mach,
                                            'kernel_url': kernel_url,
                                            'image_type': 'kernel-ci',
                                            'image_url': base_url,
                                            'modules_url': modules_url,
                                            'plan': plan,
                                            'kernel': git_describe,
                                            'tree': tree,
                                            'defconfig': defconfig,
                                            'fastboot': fastboot,
                                            'priority': args.get('priority'),
                                            'device_type': device_type,
                                            'template_file': template_file,
                                            'base_url': base_url,
                                            'endian': endian,
                                            'test_suite': test_suite,
                                            'test_set': test_set,
                                            'test_desc': test_desc,
                                            'test_type': test_type,
                                            'short_template_file':
                                            short_template_file,
                                            'arch': arch,
                                            'arch_defconfig': arch_defconfig,
                                            'git_branch': branch,
                                            'git_commit': build['git_commit'],
                                            'git_describe': git_describe,
                                            'git_url': build['git_url'],
                                            'defconfig_base': defconfig_base,
                                            'initrd_url': initrd_url,
                                            'kernel_image':
                                            build['kernel_image'],
                                            'dtb_short': dtb,
                                            'nfsrootfs_url': nfsrootfs_url,
                                            'callback': args.get('callback'),
                                            'api': api,
                                            'lab_name': lab_name,
                                            'callback_name': callback_name,
                                            'context': context,
                                        }
                                        jobs.append(job)
            else:
                print "no kernel_image for %s" % build['defconfig_full']

    for job in jobs:
        job_file = job_dir + '/' + job['name'] + '.yaml'
        with open(job_file, 'w') as f:
            f.write(jinja_render(job))
        print "Job written: %s" % job_file
Пример #39
0
    detail_desc_length(config)
    consolidate_uspc(config)
    pct_data_doc_type(config)
    consolidate_claim(config)
    consolidate_usreldoc(config)
    yearly_claim(config)
    yearly_brf_sum_text(config)
    yearly_draw_desc_text(config)
    yearly_detail_desc_text(config)


def post_upload_database(**kwargs):
    config = update_config_date(**kwargs)
    qc = AppUploadTest(config)
    qc.runTests()


if __name__ == "__main__":
    config = get_config('application')
    consolidate_rawlocation(config)
    consolidate_cpc(config)
    detail_desc_length(config)
    consolidate_uspc(config)
    pct_data_doc_type(config)
    consolidate_claim(config)
    consolidate_usreldoc(config)
    yearly_claim(config)
    yearly_brf_sum_text(config)
    yearly_draw_desc_text(config)
    yearly_detail_desc_text(config)
Пример #40
0
def main(args):
    config = configuration.get_config(args)

    if config.get("boot"):
        boot_report(config)
    exit(0)
Пример #41
0
def main(args):
    config = configuration.get_config(args)
    plans = config.get("plans")
    targets = config.get("targets")
    if config.get("jobs"):
        job_dir = setup_job_dir(config.get("jobs"))
    else:
        job_dir = setup_job_dir(os.getcwd() + '/jobs')

    arch = args.get('arch')
    plans = args.get('plans')
    branch = args.get('branch')
    tree = args.get('tree')
    kernel = tree
    api = args.get('api')
    headers = {"Authorization": config.get('token')}

    print "Working on kernel %s/%s" % (tree, branch)
    url = urlparse.urljoin(api,
                           ("/build?job=%s&kernel=%s&status=PASS&arch=%s" %
                            (tree, branch, arch)))
    print "Calling KernelCI API: %s" % url
    response = requests.get(url, headers=headers)
    if response.status_code != 200:
        print "Error calling KernelCI API"
        print response
        sys.exit(1)
    data = json.loads(response.content)
    builds = data['result']
    print len(builds)
    jobs = []
    cwd = os.getcwd()
    for build in builds:
        defconfig = build['defconfig_full']
        arch_defconfig = ("%s-%s" % (arch, defconfig))
        print "Working on build %s" % arch_defconfig
        test_suite = None
        test_set = None
        test_desc = None
        test_type = None
        plan_defconfigs = []
        modules = build['modules']
        for plan in plans:
            if plan != 'boot':
                config = ConfigParser.ConfigParser()
                try:
                    config.read(cwd + '/templates/' + plan + '/' + plan +
                                '.ini')
                    test_suite = config.get(plan, 'suite')
                    test_set = config.get(plan, 'set')
                    test_desc = config.get(plan, 'description')
                    test_type = config.get(plan, 'type')
                    plan_defconfigs = config.get(plan, 'defconfigs').split(',')
                except:
                    print "Unable to load test configuration"
                    exit(1)
            if build['kernel_image']:
                # handle devices without a DTB, hacky :/
                if build['kernel_image'] == 'bzImage' and arch == 'x86':
                    build['dtb_dir_data'].extend(LEGACY_X86_PLATFORMS)
                if arch == 'arm64' and 'arm64-defconfig' in arch_defconfig:
                    build['dtb_dir_data'].extend(LEGACY_ARM64_PLATFORMS)
                for dtb in build['dtb_dir_data']:
                    # hack for arm64 dtbs in subfolders
                    dtb_full = dtb
                    if arch == 'arm64':
                        dtb = str(dtb).split('/')[-1]
                    if dtb in device_map:
                        # print "device %s was in the device_map" % dtb
                        for device in device_map[dtb]:
                            # print "working on device %s" % dtb
                            lpae = device['lpae']
                            device_type = device['device_type']
                            fastboot = str(device['fastboot']).lower()
                            blacklist = False
                            nfs_blacklist = False
                            if arch_defconfig in device['defconfig_blacklist']:
                                print "defconfig %s is blacklisted for device %s" % (
                                    defconfig, device['device_type'])
                                continue
                            elif "BIG_ENDIAN" in defconfig and plan != 'boot-be':
                                print "BIG_ENDIAN is not supported on %s" % device_type
                                continue
                            elif "LPAE" in defconfig and not lpae:
                                print "LPAE is not support on %s" % device_type
                                continue
                            elif any([
                                    x for x in device['kernel_blacklist']
                                    if x in kernel
                            ]):
                                print "kernel %s is blacklisted for device %s" % (
                                    kernel, device_type)
                                continue
                            elif any([x for x in device['nfs_blacklist'] if x in kernel]) \
                                    and plan in ['boot-nfs', 'boot-nfs-mp']:
                                print "kernel %s is blacklisted for NFS on device %s" % (
                                    kernel, device_type)
                                continue
                            elif 'be_blacklist' in device \
                                    and any([x for x in device['be_blacklist'] if x in kernel]) \
                                    and plan in ['boot-be']:
                                print "kernel %s is blacklisted for BE on device %s" % (
                                    kernel, device_type)
                                continue
                            elif (arch_defconfig
                                  not in plan_defconfigs) and (plan != "boot"):
                                print "defconfig %s not in test plan %s" % (
                                    arch_defconfig, plan)
                                continue
                            elif targets is not None and device_type not in targets:
                                print "device_type %s is not in targets %s" % (
                                    device_type, targets)
                            else:
                                for template in device['templates']:
                                    short_template_file = plan + '/' + str(
                                        template)
                                    template_file = cwd + '/templates/' + short_template_file
                                    if os.path.exists(
                                            template_file
                                    ) and template_file.endswith('.jinja2'):
                                        job_name = tree + '-' + branch + '-' + arch + "-" + defconfig[:
                                                                                                      100] + '-' + dtb + '-' + device_type + '-' + plan
                                        base_url = "https://storage.kernelci.org/%s/%s/%s/" % (
                                            build['job'], build['kernel'],
                                            arch_defconfig)
                                        dtb_url = base_url + "dtbs/" + dtb_full
                                        kernel_url = urlparse.urljoin(
                                            base_url, build['kernel_image'])
                                        defconfig_base = ''.join(
                                            defconfig.split('+')[:1])
                                        endian = 'little'
                                        if 'BIG_ENDIAN' in defconfig and plan == 'boot-be':
                                            endian = 'big'
                                        initrd_arch = arch
                                        if arch not in INITRD_URLS.keys():
                                            if arch == 'arm64' and endian == 'big':
                                                initrd_arch = 'arm64be'
                                            if arch == 'arm':
                                                if endian == 'big':
                                                    initrd_arch = 'armeb'
                                                else:
                                                    initrd_arch = 'armel'
                                        initrd_url = INITRD_URLS[initrd_arch]
                                        if build['modules']:
                                            modules_url = urlparse.urljoin(
                                                base_url, build['modules'])
                                        else:
                                            modules_url = None
                                        if device['device_type'].startswith(
                                                'qemu'
                                        ) or device['device_type'] == 'kvm':
                                            device['device_type'] = 'qemu'
                                        job = {
                                            'name': job_name,
                                            'dtb_url': dtb_url,
                                            'platform': dtb_full,
                                            'kernel_url': kernel_url,
                                            'image_type': 'kernel-ci',
                                            'image_url': base_url,
                                            'modules_url': modules_url,
                                            'plan': plan,
                                            'kernel': branch,
                                            'tree': tree,
                                            'defconfig': defconfig,
                                            'fastboot': fastboot,
                                            'priority': args.get('priority'),
                                            'device': device,
                                            'template_file': template_file,
                                            'base_url': base_url,
                                            'endian': endian,
                                            'test_suite': test_suite,
                                            'test_set': test_set,
                                            'test_desc': test_desc,
                                            'test_type': test_type,
                                            'short_template_file':
                                            short_template_file,
                                            'arch': arch,
                                            'arch_defconfig': arch_defconfig,
                                            'git_branch': build['git_branch'],
                                            'git_commit': build['git_commit'],
                                            'git_describe':
                                            build['git_describe'],
                                            'defconfig_base': defconfig_base,
                                            'initrd_url': initrd_url,
                                            'kernel_image':
                                            build['kernel_image'],
                                            'dtb_short': dtb
                                        }
                                        jobs.append(job)
            else:
                print "no kernel_image for %s" % build['defconfig_full']

    for job in jobs:
        job_file = job_dir + '/' + job['name'] + '.yaml'
        with open(job_file, 'w') as f:
            f.write(jinja_render(job))
        print "Job written: %s" % job_file