コード例 #1
0
def graph(args):
    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, base_path=args.project_root)

    d = 'digraph "%s" {' % args.project_name

    for j in jobs:
        parents = j['parents']

        if len(parents) == 0:
            continue

        parent_name = get_parent_name(parents)

        for i in range(0, len(parents)):
            p = parents[i]
            d += ' subgraph "cluster_%s" {\n' % get_parent_name(
                parents[0:i + 1])

        d += '   label="%s";\n' % parent_name.split('/')[-1]
        d += '   "%s";\n' % j['name']

        for i in range(0, len(parents)):
            p = parents[i]
            d += ' }\n'

    for j in jobs:
        for dep in j.get('depends_on', []):
            d += '"%s" -> "%s";\n' % (dep, j['name'])

    d += "}"

    with open(args.output, 'w') as out:
        out.write(d)
コード例 #2
0
ファイル: run.py プロジェクト: meranos/InfraBox-cli
def run(args):
    check_project_root(args)

    # Init workflow cache
    cache = WorkflowCache(args)

    # validate infrabox.json
    data = load_infrabox_file(args.infrabox_file_path)
    if args.memory:
        logger.warn(
            'WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.'
        )
        for job in data['jobs']:
            job['resources']['limits']['memory'] = int(args.memory)
    if args.cpu:
        logger.warn(
            'WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.'
        )
        for job in data['jobs']:
            job['resources']['limits']['cpu'] = int(args.cpu)

    jobs = get_job_list(data, args, infrabox_context=args.project_root)

    if not args.job_name:
        # We run all jobs, so clear all cached jobs
        cache.clear()

    # Cache all jobs
    cache.add_jobs(jobs)

    for j in cache.get_jobs(job_name=args.job_name, children=args.children):
        build_and_run(args, j, cache)
コード例 #3
0
ファイル: list_jobs.py プロジェクト: metti/cli
def list_jobs(args):
    args.project_root = os.path.abspath(args.project_root)
    infrabox_json_path = os.path.join(args.project_root, 'infrabox.json')
    if not os.path.isfile(infrabox_json_path):
        logger.error('%s does not exist' % infrabox_json_path)
        sys.exit(1)

    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, base_path=args.project_root)

    jobs.sort(key=lambda x: x['name'])
    for j in jobs:
        print(j['name'])
コード例 #4
0
ファイル: list_jobs.py プロジェクト: steffenschroeder/cli
def list_jobs(args):
    args.project_root = os.path.abspath(args.project_root)
    infrabox_json_path = os.path.join(args.project_root, 'infrabox.json')
    if not os.path.isfile(infrabox_json_path):
        logger.error('%s does not exist' % infrabox_json_path)
        sys.exit(1)

    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, base_path=args.project_root)

    cache = WorkflowCache(args)
    cache.add_jobs(jobs)
    cache.print_tree()
コード例 #5
0
def graph(args):
    check_project_root(args)
    args.project_root = os.path.abspath(args.project_root)
    infrabox_file_path = args.infrabox_file_path
    if not os.path.isfile(infrabox_file_path):
        logger.error('%s does not exist' % infrabox_file_path)
        sys.exit(1)

    data = load_infrabox_file(args.infrabox_file_path)
    jobs = get_job_list(data, args, infrabox_context=args.project_root)

    cache = WorkflowCache(args)
    cache.add_jobs(jobs)
    cache.print_graph()
コード例 #6
0
ファイル: run.py プロジェクト: AlienCookie/cli
def run(args):
    # Init workflow cache
    cache = WorkflowCache(args)

    # validate infrabox.json
    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, infrabox_context=args.project_root)

    if not args.job_name:
        # We run all jobs, so clear all cached jobs
        cache.clear()

    # Cache all jobs
    cache.add_jobs(jobs)

    for j in cache.get_jobs(job_name=args.job_name, children=args.children):
        build_and_run(args, j, cache)
コード例 #7
0
ファイル: run.py プロジェクト: metti/cli
def run(args):
    # validate infrabox.json
    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, base_path=args.project_root)

    # check if job name exists
    job = None
    if args.job_name:
        for j in jobs:
            if j['name'] == args.job_name:
                job = j
                break

        if not job:
            logger.error("job %s not found in infrabox.json" % args.job_name)
            sys.exit(1)

    if job:
        build_and_run(args, job)
    else:
        for j in jobs:
            build_and_run(args, j)
コード例 #8
0
ファイル: run.py プロジェクト: steffenschroeder/cli
def run(args):
    # Init workflow cache
    cache = WorkflowCache(args)

    # validate infrabox.json
    data = load_infrabox_json(args.infrabox_json)
    jobs = get_job_list(data, args, base_path=args.project_root)

    if args.job_name:
        cache.add_jobs(jobs)
        job = cache.get_job(args.job_name)

        if not job:
            logger.error("job %s not found in infrabox.json" % args.job_name)
            sys.exit(1)

        build_and_run(args, job, cache)
    else:
        cache.clear()
        cache.add_jobs(jobs)
        for j in jobs:
            build_and_run(args, j, cache)
コード例 #9
0
ファイル: run.py プロジェクト: meranos/InfraBox-cli
def build_and_run(args, job, cache):
    # check if dependency conditions are met
    for dep in job.get("depends_on", []):
        on = dep['on']
        parent = get_parent_job(dep['job'])

        if not parent:
            continue

        if parent['state'] not in on:
            logger.info('Skipping job %s' % job['name'])
            track_as_parent(job, 'skipped')
            return

    job_type = job['type']
    start_date = datetime.now()

    logger.info("Starting job %s" % job['name'])

    state = 'finished'

    try:
        if job_type == "docker-compose":
            build_and_run_docker_compose(args, job)
        elif job_type == "docker":
            build_and_run_docker(args, job)
        elif job_type == "docker-image":
            run_docker_image(args, job)
        elif job_type == "wait":
            # do nothing
            pass
        else:
            logger.error("Unknown job type")
            sys.exit(1)
    except Exception as e:
        state = 'failure'
        traceback.print_exc(file=sys.stdout)
        logger.warn("Job failed: %s" % e)
        sys.exit(1)

    if not job.get('directories', None):
        return

    # Dynamic child jobs
    infrabox_file = os.path.join(job['directories']['output'], 'infrabox.json')
    if not os.path.exists(infrabox_file):
        infrabox_file = os.path.join(job['directories']['output'],
                                     'infrabox.yaml')

    jobs = []
    if os.path.exists(infrabox_file):
        logger.info("Loading generated jobs")

        data = load_infrabox_file(infrabox_file)
        jobs = get_job_list(data,
                            args,
                            infrabox_context=os.path.join(
                                args.project_root, '.infrabox', 'output'))

    end_date = datetime.now()

    track_as_parent(job, state, start_date, end_date)
    logger.info("Finished job %s" % job['name'])

    for j in jobs:
        # Prefix name with parent
        j['name'] = job['name'] + '/' + j['name']

        # Add dependencies to all root jobs
        if not j.get('depends_on', None):
            j['depends_on'] = [{"on": ["finished"], "job": job['name']}]
        else:
            dependencies = copy.deepcopy(j['depends_on'])

            for d in dependencies:
                d['job'] = job['name'] + '/' + d['job']

            j['depends_on'] = dependencies

        cache.add_job(j)

    if args.children:
        for j in jobs:
            build_and_run(args, j, cache)