示例#1
0
def main():  # pragma: no cover
    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_DASHBOARD_URL')

    conn = connect_db()
    conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
    logger.info("Connected to database")

    elect_leader(conn, "github-review")

    curs = conn.cursor()
    curs.execute("LISTEN job_update;")

    logger.info("Waiting for job updates")

    while 1:
        if select.select([conn], [], [], 5) != ([], [], []):
            conn.poll()
            while conn.notifies:
                notify = conn.notifies.pop(0)
                handle_job_update(conn, json.loads(notify.payload))
示例#2
0
def main():
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_GITHUB_WEBHOOK_SECRET')

    connect_db()  # Wait until DB is ready

    wsgi.server(eventlet.listen(('0.0.0.0', 8080)), app)
示例#3
0
 def __init__(self, conn, args):
     self.conn = conn
     self.args = args
     self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE")
     self.logger = get_logger("scheduler")
示例#4
0
def main():
    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')

    conn = connect_db()
    migrate_db(conn)
    conn.close()
示例#5
0
 def __init__(self, args, resource):
     self.args = args
     self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE")
     self.logger = get_logger("controller")
     self.resource = resource
示例#6
0
def main():
    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_ROOT_URL')
    get_env('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES')
    get_env('INFRABOX_LOCAL_CACHE_ENABLED')
    get_env('INFRABOX_JOB_MAX_OUTPUT_SIZE')
    get_env('INFRABOX_JOB_API_URL')
    get_env('INFRABOX_JOB_GIT_URL')
    get_env('INFRABOX_JOB_MOUNT_DOCKER_SOCKET')
    console = ApiConsole()

    j = None
    try:
        j = RunJob(console)
        j.main()
        j.console.flush()
        j.console.header('Finished', show=True)
        j.update_status('finished', message='Successfully finished')
    except Failure as e:
        j.console.header('Failure', show=True)
        j.console.collect(e.message, show=True)
        j.console.flush()
        j.update_status('failure', message=e.message)
    except:
        print_stackdriver()
        if j:
            j.console.header('An error occured', show=True)
            msg = traceback.format_exc()
            j.console.collect(msg, show=True)
            j.console.flush()
            j.update_status('error', message='An error occured')
示例#7
0
def main():  # pragma: no cover
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')

    cluster_name = get_env('INFRABOX_CLUSTER_NAME')
    conn = connect_db()
    conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
    logger.info("Connected to database")

    elect_leader(conn, 'github-review', cluster_name)

    curs = conn.cursor()
    curs.execute("LISTEN job_update;")

    logger.info("Waiting for job updates")

    while True:
        if select.select([conn], [], [], 5) != ([], [], []):
            conn.poll()
            while conn.notifies:
                notify = conn.notifies.pop(0)
                if not is_leader(
                        conn, 'github-review', cluster_name, exit=False):
                    continue
                handle_job_update(conn, json.loads(notify.payload))
示例#8
0
def main():
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_ADMIN_PASSWORD')
    get_env('INFRABOX_ADMIN_EMAIL')

    conn = connect_db()
    migrate_db(conn)
    configure_admin(conn)
    conn.close()
示例#9
0
#pylint: disable=too-few-public-methods
import os
import uuid

import boto3
from google.cloud import storage as gcs
from flask import after_this_request

from pyinfraboxutils import get_env

USE_S3 = get_env('INFRABOX_STORAGE_S3_ENABLED') == 'true'
USE_GCS = get_env('INFRABOX_STORAGE_GCS_ENABLED') == 'true'
storage = None

class S3(object):
    def __init__(self):
        url = ''

        if get_env('INFRABOX_STORAGE_S3_SECURE') == 'true':
            url = 'https://'
        else:
            url = 'http://'
        url += get_env('INFRABOX_STORAGE_S3_ENDPOINT')
        url += ':'
        url += get_env('INFRABOX_STORAGE_S3_PORT')
        self.url = url

        self.upload_bucket = get_env('INFRABOX_STORAGE_S3_PROJECT_UPLOAD_BUCKET')
        self.cache_bucket = get_env('INFRABOX_STORAGE_S3_CONTAINER_CONTENT_CACHE_BUCKET')
        self.output_bucket = get_env('INFRABOX_STORAGE_S3_CONTAINER_OUTPUT_BUCKET')
示例#10
0
 def _get_client(self):
     client = BlockBlobService(
         account_name=get_env('INFRABOX_STORAGE_AZURE_ACCOUNT_NAME'),
         account_key=get_env('INFRABOX_STORAGE_AZURE_ACCOUNT_KEY'))
     return client
示例#11
0
from urlparse import urlparse

from flask import g, request, abort, redirect, make_response

from flask_restplus import Resource

from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.utils import OneLogin_Saml2_Utils

from pyinfraboxutils import get_logger, get_root_url, get_env
from pyinfraboxutils.ibrestplus import api
from pyinfraboxutils.token import encode_user_token

logger = get_logger("saml")

get_env("INFRABOX_ACCOUNT_SAML_SETTINGS_PATH")
get_env("INFRABOX_ACCOUNT_SAML_EMAIL_FORMAT")
get_env("INFRABOX_ACCOUNT_SAML_NAME_FORMAT")
get_env("INFRABOX_ACCOUNT_SAML_USERNAME_FORMAT")


def init_saml_auth():
    parsed_url = urlparse(request.url)
    request_data = {
        "https": "on" if request.scheme == "https" else "off",
        "http_host": request.host,
        "server_port": parsed_url.port,
        "script_name": request.path,
        "get_data": request.args.copy(),
        "post_data": request.form.copy(),
        "query_string": request.query_string
示例#12
0
 def __init__(self):
     self.bucket = get_env('INFRABOX_STORAGE_GCS_BUCKET')
示例#13
0
def handle_job_update(conn, event):
    job_id = event['job_id']

    jobs = execute_sql(
        conn, '''
        SELECT id, state, name, project_id, build_id
        FROM job
        WHERE id = %s
    ''', [job_id])

    if not jobs:
        return False

    job = jobs[0]

    project_id = job['project_id']
    build_id = job['build_id']

    projects = execute_sql(
        conn, '''
        SELECT id, name, type
        FROM project
        WHERE id = %s
    ''', [project_id])

    if not projects:
        return False

    project = projects[0]

    if project['type'] != 'github':
        return False

    builds = execute_sql(
        conn, '''
        SELECT id, build_number, restart_counter, commit_id
        FROM build
        WHERE id = %s
    ''', [build_id])

    if not builds:
        return False

    build = builds[0]

    project_name = project['name']
    job_state = job['state']
    job_name = job['name']
    commit_sha = build['commit_id']
    build_id = build['id']
    build_number = build['build_number']
    build_restartCounter = build['restart_counter']
    dashboard_url = get_env('INFRABOX_ROOT_URL')

    # determine github commit state
    state = 'success'
    if job_state in ('scheduled', 'running', 'queued'):
        state = 'pending'

    if job_state in ('failure', 'skipped', 'killed'):
        state = 'failure'

    if job_state == 'error':
        state = 'error'

    logger.info("")
    logger.info("Handle job %s", job_id)
    logger.info("Setting state to %s", state)

    token = execute_sql(
        conn, '''
        SELECT github_api_token FROM "user" u
        INNER JOIN collaborator co
            ON co.owner = true
            AND co.project_id = %s
            AND co.user_id = u.id
    ''', [project_id])

    if not token:
        logger.info("No API token, not updating status")
        return False

    github_api_token = token[0]['github_api_token']

    github_status_url = execute_sql(
        conn, '''
        SELECT github_status_url
        FROM "commit"
        WHERE id = %s
        AND project_id = %s
    ''', [commit_sha, project_id])[0]['github_status_url']

    payload = {
        "state":
        state,
        "target_url":
        '%s/dashboard/#/project/%s/build/%s/%s/job/%s' %
        (dashboard_url, project_name, build_number, build_restartCounter,
         urllib.quote_plus(job_name).replace('+', '%20')),
        "description":
        "InfraBox",
        "context":
        "Job: %s" % job_name
    }

    headers = {
        "Authorization": "token " + github_api_token,
        "User-Agent": "InfraBox"
    }

    # TODO(ib-steffen): support ca bundles
    try:
        r = requests.post(github_status_url,
                          data=json.dumps(payload),
                          headers=headers,
                          timeout=10,
                          verify=False)

        if r.status_code != 201:
            logger.warn("Failed to update github status: %s", r.text)
        else:
            logger.info("Successfully updated github status")
    except Exception as e:
        logger.warn("Failed to update github status: %s", e)
        return False

    return True
示例#14
0
def handle_job_update(conn, update):
    if update['data']['project']['type'] != 'github':
        return

    project_id = update['data']['project']['id']
    job_state = update['data']['job']['state']
    job_id = update['data']['job']['id']
    job_name = update['data']['job']['name']
    commit_sha = update['data']['commit']['id']
    build_id = update['data']['build']['id']
    dashboard_url = get_env('INFRABOX_DASHBOARD_URL')

    # determine github commit state
    state = 'success'
    if job_state in ('scheduled', 'running', 'queued'):
        state = 'pending'

    if job_state in ('failure', 'skipped', 'killed'):
        state = 'failure'

    if job_state == 'error':
        state = 'error'

    logger.info("")
    logger.info("Handle job %s", job_id)
    logger.info("Setting state to %s", state)

    token = execute_sql(
        conn, '''
        SELECT github_api_token FROM "user" u
        INNER JOIN collaborator co
            ON co.owner = true
            AND co.project_id = %s
            AND co.user_id = u.id
    ''', [project_id])

    if not token:
        logger.info("No API token, not updating status")
        return

    github_api_token = token[0][0]

    github_status_url = execute_sql(
        conn, '''
        SELECT github_status_url
        FROM "commit"
        WHERE id = %s
        AND project_id = %s
    ''', [commit_sha, project_id])[0][0]

    payload = {
        "state": state,
        "target_url":  dashboard_url + \
                       '/dashboard/project/' + \
                       project_id + '/build/' + \
                       build_id + '/job/' + job_id,
        "description": "InfraBox",
        "context": "Job: %s" % job_name
    }

    headers = {
        "Authorization": "token " + github_api_token,
        "User-Agent": "InfraBox"
    }

    # TODO(ib-steffen): support ca bundles
    r = requests.post(github_status_url,
                      data=json.dumps(payload),
                      headers=headers,
                      timeout=5,
                      verify=False)

    if r.status_code != 201:
        logger.warn("Failed to update github status: %s", r.text)
    else:
        logger.info("Successfully updated github status")
示例#15
0
 def _upload(self, stream, bucket, key):
     client = gcs.Client(project=get_env('INFRABOX_STORAGE_GCS_PROJECT_ID'))
     bucket = client.get_bucket(bucket)
     blob = bucket.blob(key)
     blob.upload_from_file(stream)
示例#16
0
 def upload_project(self, stream, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_PROJECT_UPLOAD_BUCKET')
     self._upload(stream, bucket, key)
示例#17
0
def main():  # pragma: no cover
    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_DATABASE_DB')

    get_env('INFRABOX_JOB_MAX_OUTPUT_SIZE')
    get_env('INFRABOX_JOB_SECURITY_CONTEXT_CAPABILITIES_ENABLED')

    if get_env('INFRABOX_STORAGE_GCS_ENABLED') == 'true':
        get_env('GOOGLE_APPLICATION_CREDENTIALS')
        get_env('INFRABOX_STORAGE_GCS_BUCKET')

    if get_env('INFRABOX_STORAGE_S3_ENABLED') == 'true':
        get_env('INFRABOX_STORAGE_S3_BUCKET')
        get_env('INFRABOX_STORAGE_S3_REGION')

    app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 1024 * 4
    client_manager = ClientManager()
    sio = flask_socketio.SocketIO(app,
                                  path='/api/v1/socket.io',
                                  async_mode='eventlet',
                                  client_manager=client_manager)

    @sio.on('listen:jobs')
    def __listen_jobs(project_id):
        logger.debug('listen:jobs for %s', project_id)

        if not project_id:
            logger.debug('project_id not set')
            return flask_socketio.disconnect()

        try:
            uuid.UUID(project_id)
        except:
            logger.debug('project_id not a uuid')
            return flask_socketio.disconnect()

        conn = dbpool.get()
        try:
            p = conn.execute_one_dict(
                '''
                    SELECT public
                    FROM project
                    WHERE id = %s
                ''', [project_id])

            if not p['public']:
                token = get_token()
                if token['type'] == 'user':
                    user_id = token['user']['id']
                    collaborator = is_collaborator(user_id,
                                                   project_id,
                                                   db=conn)

                    if not collaborator:
                        logger.warn('not a collaborator')
                        return flask_socketio.disconnect()
                else:
                    logger.debug('only user token allowed')
                    return flask_socketio.disconnect()

        finally:
            dbpool.put(conn)

        flask_socketio.join_room(project_id)

    @sio.on('listen:build')
    def __listen_build(build_id):
        logger.debug('listen:build for %s', build_id)
        token = get_token()

        if not build_id:
            logger.debug('build_id not set')
            return flask_socketio.disconnect()

        try:
            uuid.UUID(build_id)
        except:
            logger.debug('build_id not a uuid')
            return flask_socketio.disconnect()

        conn = dbpool.get()
        try:
            if token['type'] not in ('project', 'project-token'):
                logger.debug('only project token allowed')
                return flask_socketio.disconnect()

            project_id = token['project']['id']

            build = conn.execute_one(
                '''
                SELECT id
                FROM build
                WHERE project_id = %s AND id = %s
            ''', [project_id, build_id])

            if not build:
                logger.debug('build does not belong to project')
                return flask_socketio.disconnect()
        finally:
            dbpool.put(conn)

        flask_socketio.join_room(build_id)

    @sio.on('listen:console')
    def __listen_console(job_id):
        logger.debug('listen:console for %s', job_id)
        token = get_token()

        if not job_id:
            logger.debug('job_id not set')
            return flask_socketio.disconnect()

        try:
            uuid.UUID(job_id)
        except:
            logger.debug('job_id not a uuid')
            return flask_socketio.disconnect()

        conn = dbpool.get()
        try:
            if token['type'] not in ('project', 'project-token'):
                logger.debug('only project token allowed')
                return flask_socketio.disconnect()

            project_id = token['project']['id']

            build = conn.execute_one(
                '''
                SELECT id
                FROM job
                WHERE project_id = %s AND id = %s
            ''', [project_id, job_id])

            if not build:
                logger.debug('job does not belong to project')
                return flask_socketio.disconnect()
        finally:
            dbpool.put(conn)

        flask_socketio.join_room(job_id)

    @sio.on('listen:dashboard-console')
    def __listen_dashboard_console(job_id):
        logger.debug('listen:console for %s', job_id)

        if not job_id:
            logger.debug('job_id not set')
            return flask_socketio.disconnect()

        try:
            uuid.UUID(job_id)
        except:
            logger.debug('job_id not a uuid')
            return flask_socketio.disconnect()

        conn = dbpool.get()
        try:
            u = conn.execute_one_dict(
                '''
                SELECT p.public, j.project_id
                FROM project p
                INNER JOIN job j
                    ON j.project_id = p.id
                    AND j.id = %s
            ''', [job_id])

            if not u:
                logger.warn('job not found')
                return flask_socketio.disconnect()

            if not u['public']:
                token = get_token()
                if token['type'] == 'user':
                    user_id = token['user']['id']
                    collaborator = is_collaborator(user_id,
                                                   u['project_id'],
                                                   db=conn)

                    if not collaborator:
                        logger.warn('not a collaborator')
                        return flask_socketio.disconnect()
                else:
                    logger.debug('only user token allowed')
                    return flask_socketio.disconnect()
        finally:
            dbpool.put(conn)

        flask_socketio.join_room(job_id)

    logger.info('Starting DB listeners')
    sio.start_background_task(listeners.job.listen, sio)
    sio.start_background_task(listeners.console.listen, sio, client_manager)

    port = int(os.environ.get('INFRABOX_PORT', 8080))
    logger.info('Starting Server on port %s', port)
    sio.run(app, host='0.0.0.0', port=port)
示例#18
0
 def upload_cache(self, stream, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_CONTAINER_CONTENT_CACHE_BUCKET')
     self._upload(stream, bucket, key)
示例#19
0
import json
import requests

import threading
import time

from pyinfraboxutils import get_logger, get_env
from pyinfraboxutils import dbpool

logger = get_logger('OPA')

OPA_AUTH_URL = "http://%s:%s/v1/data/infrabox/authz" % (
    get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT'))
COLLABORATOR_DATA_DEST_URL = "http://%s:%s/v1/data/infrabox/collaborators" % (
    get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT'))
PROJECT_DATA_DEST_URL = "http://%s:%s/v1/data/infrabox/projects" % (
    get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT'))

exit_flag = 0


def opa_do_auth(input_dict):
    # Send request to Open Policy Agent and evaluate response
    payload = json.dumps(input_dict)
    logger.debug("Sending OPA Request: %s", payload)
    rsp = requests.post(OPA_AUTH_URL, data=json.dumps(input_dict))
    rsp_dict = rsp.json()
    logger.debug("OPA Response: %s", rsp.content)

    return "result" in rsp_dict and rsp_dict["result"] is True
示例#20
0
 def upload_output(self, stream, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_CONTAINER_OUTPUT_BUCKET')
     self._upload(stream, bucket, key)
示例#21
0
def main():
    # Arguments
    parser = argparse.ArgumentParser(prog="scheduler.py")
    parser.add_argument("--docker-registry",
                        required=True,
                        type=str,
                        help="Host for the registry to use")
    parser.add_argument("--loglevel",
                        choices=['debug', 'info', 'warning'],
                        help="Log level")
    parser.add_argument("--tag",
                        required=True,
                        type=str,
                        help="Image tag to use for internal images")

    args = parser.parse_args()

    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_ROOT_URL')
    get_env('INFRABOX_DOCKER_REGISTRY_URL')
    get_env('INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES')
    get_env('INFRABOX_GENERAL_WORKER_NAMESPACE')
    get_env('INFRABOX_JOB_MAX_OUTPUT_SIZE')
    get_env('INFRABOX_JOB_MOUNT_DOCKER_SOCKET')
    get_env('INFRABOX_JOB_SECURITY_CONTEXT_CAPABILITIES_ENABLED')

    if get_env('INFRABOX_GERRIT_ENABLED') == 'true':
        get_env('INFRABOX_GERRIT_USERNAME')
        get_env('INFRABOX_GERRIT_HOSTNAME')
        get_env('INFRABOX_GERRIT_PORT')

    # try to read from filesystem
    with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as f:
        args.token = str(f.read()).strip()

    args.api_server = "https://" + get_env('INFRABOX_KUBERNETES_MASTER_HOST') \
                                 + ":" + get_env('INFRABOX_KUBERNETES_MASTER_PORT')

    os.environ[
        'REQUESTS_CA_BUNDLE'] = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'

    conn = connect_db()
    conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)

    start_http_server(8000)

    scheduler = Scheduler(conn, args)
    scheduler.run()
示例#22
0
 def download_source(self, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_PROJECT_UPLOAD_BUCKET')
     return self._download(bucket, key)
示例#23
0
def main():
    get_env('INFRABOX_SERVICE')
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_GITHUB_WEBHOOK_SECRET')

    connect_db()  # Wait until DB is ready

    install(InfraBoxPostgresPlugin())
    run(host='0.0.0.0', port=8080)
示例#24
0
 def download_output(self, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_CONTAINER_OUTPUT_BUCKET')
     return self._download(bucket, key)
示例#25
0
 def __init__(self, args):
     self.args = args
     self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE")
     self.logger = get_logger("scheduler")
     self.function_controller = FunctionInvocationController(args)
     self.pipeline_controller = PipelineInvocationController(args)
示例#26
0
 def delete_cache(self, key):
     bucket = get_env('INFRABOX_STORAGE_GCS_CONTAINER_CONTENT_CACHE_BUCKET')
     return self._delete(bucket, key)
示例#27
0
def main():  # pragma: no cover
    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_DATABASE_DB')

    from gevent.wsgi import WSGIServer
    http_server = WSGIServer(('', 8081), app, log=logger)
    http_server.serve_forever()
示例#28
0
 def _delete(self, bucket, key):
     client = gcs.Client(project=get_env('INFRABOX_STORAGE_GCS_PROJECT_ID'))
     bucket = client.get_bucket(bucket)
     blob = bucket.blob(key)
     blob.delete()
示例#29
0
def main():
    # Arguments
    parser = argparse.ArgumentParser(prog="scheduler.py")
    args = parser.parse_args()

    get_env('INFRABOX_VERSION')
    get_env('INFRABOX_CLUSTER_NAME')
    get_env('INFRABOX_DATABASE_DB')
    get_env('INFRABOX_DATABASE_USER')
    get_env('INFRABOX_DATABASE_PASSWORD')
    get_env('INFRABOX_DATABASE_HOST')
    get_env('INFRABOX_DATABASE_PORT')
    get_env('INFRABOX_ROOT_URL')
    get_env('INFRABOX_GENERAL_WORKER_NAMESPACE')

    # try to read from filesystem
    with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as f:
        args.token = str(f.read()).strip()

    args.api_server = "https://" + get_env('INFRABOX_KUBERNETES_MASTER_HOST') \
                                 + ":" + get_env('INFRABOX_KUBERNETES_MASTER_PORT')

    os.environ[
        'REQUESTS_CA_BUNDLE'] = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'

    conn = connect_db()
    conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)

    scheduler = Scheduler(conn, args)
    scheduler.run()
示例#30
0
def handle_job_update(conn, event):
    if event['type'] != 'UPDATE':
        return

    job_id = event['job_id']

    c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    c.execute(
        '''
        SELECT id, state, name, project_id, build_id
        FROM job
        WHERE id = %s
    ''', [job_id])

    job = c.fetchone()
    c.close()

    if not job:
        return

    project_id = job['project_id']
    build_id = job['build_id']

    c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    c.execute(
        '''
        SELECT id, name, type
        FROM project
        WHERE id = %s
    ''', [project_id])
    project = c.fetchone()
    c.close()

    if not project:
        return

    if project['type'] != 'gerrit':
        return

    c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    c.execute(
        '''
        SELECT id, build_number, restart_counter, commit_id
        FROM build
        WHERE id = %s
    ''', [build_id])
    build = c.fetchone()
    c.close()

    project_name = project['name']
    project_id = project['id']
    job_state = job['state']
    job_name = job['name']
    commit_sha = build['commit_id']
    build_id = build['id']
    build_number = build['build_number']
    build_restart_counter = build['restart_counter']

    if job_state in ('queued', 'scheduled', 'running'):
        return

    gerrit_port = int(get_env('INFRABOX_GERRIT_PORT'))
    gerrit_hostname = get_env('INFRABOX_GERRIT_HOSTNAME')
    gerrit_username = get_env('INFRABOX_GERRIT_USERNAME')
    gerrit_key_filename = get_env('INFRABOX_GERRIT_KEY_FILENAME')

    c = conn.cursor()
    c.execute('''
        SELECT root_url
        FROM cluster
        WHERE name = 'master'
    ''')
    dashboard_url = c.fetchone()[0]
    c.close()

    client = paramiko.SSHClient()
    client.load_system_host_keys()
    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    client.connect(username=gerrit_username,
                   hostname=gerrit_hostname,
                   port=gerrit_port,
                   key_filename=gerrit_key_filename)
    client.get_transport().set_keepalive(60)

    project_name = urllib.quote_plus(project_name).replace('+', '%20')
    build_url = "%s/dashboard/#/project/%s/build/%s/%s" % (
        dashboard_url, project_name, build_number, build_restart_counter)

    c = conn.cursor()
    c.execute(
        '''
        SELECT count(*)
        FROM build
        WHERE build_number = %s
        AND restart_counter > %s
        AND project_id = %s''',
        [build_number, build_restart_counter, project_id])
    newer_builds = c.fetchone()[0]
    c.close()

    update_vote = True
    if newer_builds > 0:
        update_vote = False

    c = conn.cursor()
    c.execute(
        '''SELECT state, count(*) FROM job WHERE build_id = %s GROUP BY state''',
        [build_id])
    states = c.fetchall()
    c.close()

    vote = None
    if len(states) == 1 and states[0][0] == 'finished':
        # all finished
        vote = "+1"
        message = "Build finished: %s" % build_url
    else:
        for s in states:
            if s[0] in ('running', 'scheduled', 'queued'):
                # still some running
                vote = "0"
                message = "Build running: %s" % build_url
                break
            elif s[0] != 'finished':
                # not successful
                vote = "-1"
                message = "Build failed: %s" % build_url

    if (job_name == 'Create Jobs' and vote == '0') or vote in ('-1', '+1'):
        logger.info('Setting InfraBox=%s for sha=%s', vote, commit_sha)
        cmd = 'gerrit review --project %s -m "%s" ' % (project_name, message)

        if update_vote:
            cmd += '--label InfraBox=%s ' % vote

        cmd += commit_sha
        execute_ssh_cmd(client, cmd)

    client.close()