Пример #1
0
def run(args):
    check_project_root(args)

    # Init workflow cache
    cache = WorkflowCache(args)

    # validate infrabox.json
    data = load_infrabox_file(args.infrabox_file_path)
    if args.memory:
        logger.warn(
            'WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.'
        )
        for job in data['jobs']:
            job['resources']['limits']['memory'] = int(args.memory)
    if args.cpu:
        logger.warn(
            'WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.'
        )
        for job in data['jobs']:
            job['resources']['limits']['cpu'] = int(args.cpu)

    jobs = get_job_list(data, args, infrabox_context=args.project_root)

    if not args.job_name:
        # We run all jobs, so clear all cached jobs
        cache.clear()

    # Cache all jobs
    cache.add_jobs(jobs)

    for j in cache.get_jobs(job_name=args.job_name, children=args.children):
        build_and_run(args, j, cache)
Пример #2
0
def check_if_supported(job):
    if 'resources' not in job:
        return

    res_k8s = job['resources'].get('kubernetes', None)

    if res_k8s:
        logger.warn('Using kubernetes resources is not supported')
Пример #3
0
def build_and_run(args, job, cache):
    # check if dependency conditions are met
    for dep in job.get("depends_on", []):
        on = dep['on']
        parent = get_parent_job(dep['job'])

        if not parent:
            continue

        if parent['state'] not in on:
            logger.info('Skipping job %s' % job['name'])
            track_as_parent(job, 'skipped')
            return

    job_type = job['type']
    start_date = datetime.now()

    logger.info("Starting job %s" % job['name'])

    state = 'finished'

    try:
        if job_type == "docker-compose":
            build_and_run_docker_compose(args, job)
        elif job_type == "docker":
            build_and_run_docker(args, job)
        elif job_type == "docker-image":
            run_docker_image(args, job)
        elif job_type == "wait":
            # do nothing
            pass
        else:
            logger.error("Unknown job type")
            sys.exit(1)
    except Exception as e:
        state = 'failure'
        traceback.print_exc(file=sys.stdout)
        logger.warn("Job failed: %s" % e)
        sys.exit(1)

    if not job.get('directories', None):
        return

    # Dynamic child jobs
    infrabox_file = os.path.join(job['directories']['output'], 'infrabox.json')
    if not os.path.exists(infrabox_file):
        infrabox_file = os.path.join(job['directories']['output'],
                                     'infrabox.yaml')

    jobs = []
    if os.path.exists(infrabox_file):
        logger.info("Loading generated jobs")

        data = load_infrabox_file(infrabox_file)
        jobs = get_job_list(data,
                            args,
                            infrabox_context=os.path.join(
                                args.project_root, '.infrabox', 'output'))

    end_date = datetime.now()

    track_as_parent(job, state, start_date, end_date)
    logger.info("Finished job %s" % job['name'])

    for j in jobs:
        # Prefix name with parent
        j['name'] = job['name'] + '/' + j['name']

        # Add dependencies to all root jobs
        if not j.get('depends_on', None):
            j['depends_on'] = [{"on": ["finished"], "job": job['name']}]
        else:
            dependencies = copy.deepcopy(j['depends_on'])

            for d in dependencies:
                d['job'] = job['name'] + '/' + d['job']

            j['depends_on'] = dependencies

        cache.add_job(j)

    if args.children:
        for j in jobs:
            build_and_run(args, j, cache)