def __init__(self, conn, args): self.conn = conn self.args = args self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE") self.logger = get_logger("scheduler") self.function_controller = FunctionInvocationController(args) self.pipeline_controller = PipelineInvocationController(args)
def __init__(self, conn, args): self.conn = conn self.args = args self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE") self.logger = get_logger("scheduler") if self.args.loglevel == 'debug': self.logger.setLevel(logging.DEBUG) elif self.args.loglevel == 'info': self.logger.setLevel(logging.INFO) else: self.logger.setLevel(logging.INFO)
def __init__(self, conn, args): self.conn = conn self.args = args self.namespace = get_env("INFRABOX_GENERAL_SYSTEM_NAMESPACE") self.check_interval = int(get_env('INFRABOX_HA_CHECK_INTERVAL')) self.active_timeout = get_env('INFRABOX_HA_ACTIVE_TIMEOUT') self.cluster_name = get_env('INFRABOX_CLUSTER_NAME') self.logger = get_logger("checker") self.root_url = get_env("INFRABOX_ROOT_URL") self.is_active = True self.check_result = True self.retry_times = 0 self.max_retry_times = 3
def main(): parser = argparse.ArgumentParser(prog="checker.py") args = parser.parse_args() # Validate if env vars are setted get_env('INFRABOX_VERSION') get_env('INFRABOX_CLUSTER_NAME') get_env('INFRABOX_DATABASE_DB') get_env('INFRABOX_DATABASE_USER') get_env('INFRABOX_DATABASE_PASSWORD') get_env('INFRABOX_DATABASE_HOST') get_env('INFRABOX_DATABASE_PORT') get_env('INFRABOX_ROOT_URL') get_env('INFRABOX_HA_CHECK_INTERVAL') get_env('INFRABOX_HA_ACTIVE_TIMEOUT') urllib3.disable_warnings() logger = get_logger("checker_main") # Try to read from filesystem with open('/var/run/secrets/kubernetes.io/serviceaccount/token', 'r') as f: args.token = str(f.read()).strip() kube_apiserver_host = get_env('INFRABOX_KUBERNETES_MASTER_HOST') kube_apiserver_port = get_env('INFRABOX_KUBERNETES_MASTER_PORT') args.api_server = "https://" + kube_apiserver_host + ":" + kube_apiserver_port conn = connect_db() conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) is_monitoring_enabled = get_env("INFRABOX_MONITORING_ENABLED") == 'true' if is_monitoring_enabled: logger.info("Monitoring enabled. Starting HTTP server for metrics") server_port = os.environ.get('INFRABOX_PORT', 8080) start_http_server(server_port) checker = Checker(conn, args) checker.run()
def __init__(self, conn, args): self.conn = conn self.args = args self.ha_enabled = get_env("INFRABOX_HA_ENABLED") self.monitoring_enabled = get_env("INFRABOX_MONITORING_ENABLED") self.namespace = get_env("INFRABOX_GENERAL_SYSTEM_NAMESPACE") self.check_interval = int(get_env('INFRABOX_HA_CHECK_INTERVAL')) self.active_timeout = get_env('INFRABOX_HA_ACTIVE_TIMEOUT') self.cluster_name = get_env('INFRABOX_CLUSTER_NAME') self.logger = get_logger("checker") self.root_url = get_env("INFRABOX_ROOT_URL") self.is_cluster_healthy = True self.retry_times = 0 self.max_retry_times = 3 self.infrabox_api_call_errors = Counter( 'infrabox_api_errors_total', 'Errors in requests to InfraBox API') self.storage_checker_errors = Counter( 'storage_checker_errors_total', 'Errors uploding/downloading files to/from storage') self.infrabox_dashboard_access_errors = Counter( 'infrabox_dashboard_access_errors_total', 'Errors acessing dashboard')
import base64 from functools import wraps from flask import Flask, g, jsonify, request, abort from pyinfraboxutils import get_logger from pyinfraboxutils.db import DB, connect_db from pyinfraboxutils.token import decode app = Flask(__name__) app.url_map.strict_slashes = False logger = get_logger('ibflask') def get_token(): auth = dict(request.headers).get('Authorization', None) cookie = request.cookies.get('token', None) if auth: if auth.startswith("Basic "): auth = auth.split(" ")[1] try: decoded = base64.b64decode(auth) except: logger.warn('could not base64 decode auth header') abort(401, 'Unauthorized') s = decoded.split('infrabox:')
import os from pyinfraboxutils import get_logger, get_env, print_stackdriver from pyinfraboxutils.db import connect_db logger = get_logger("migrate") def get_sql_files(current_schema_version): dir_path = os.path.dirname(os.path.realpath(__file__)) migration_path = os.path.join(dir_path, 'migrations') files = [ f for f in os.listdir(migration_path) if os.path.isfile(os.path.join(migration_path, f)) ] files.sort(key=lambda f: int(f[:-4])) files = files[current_schema_version:] return [(os.path.join(migration_path, f), int(f[:-4])) for f in files] def apply_migration(conn, migration): logger.info("Starting to apply migration %s", migration[1]) with open(migration[0]) as sql_file: sql = sql_file.read().strip() cur = conn.cursor() if sql: cur.execute(sql)
import time from pyinfraboxutils import get_logger, get_env from pyinfraboxutils import dbpool from pyinfraboxutils.storage import storage logger = get_logger("gc") class GC(object): def run(self): # TODO: Delete storage objects: uploads, outputs # TODO: Delete images from registry while True: db = dbpool.get() try: logger.info('Starting next GC run') self._gc(db) logger.info('Finished GC run') logger.info('') except Exception as e: logger.exception(e) finally: dbpool.put(db) time.sleep(3600) def _gc(self, db): self._gc_job_console_output(db) self._gc_job_output(db)
import os import time import sys from pyinfraboxutils import get_logger logger = get_logger('infrabox') def _is_leader(conn, service_name, cluster_name=None): if os.environ.get('INFRABOX_DISABLE_LEADER_ELECTION', 'false') == 'true': return True conn.rollback() c = conn.cursor() c.execute( """ INSERT INTO leader_election (service_name, cluster_name, last_seen_active) VALUES (%s, %s, now()) ON CONFLICT (service_name) DO UPDATE SET cluster_name = CASE WHEN leader_election.last_seen_active < now() - interval '30 second' THEN EXCLUDED.cluster_name ELSE leader_election.cluster_name END, last_seen_active = CASE WHEN leader_election.cluster_name = EXCLUDED.cluster_name THEN EXCLUDED.last_seen_active ELSE leader_election.last_seen_active END RETURNING service_name, cluster_name; """, [service_name, cluster_name]) r = c.fetchone()
import uuid import os import requests from flask import g, request, abort, redirect from flask_restplus import Resource from pyinfraboxutils import get_logger, get_root_url from pyinfraboxutils.ibrestplus import api from pyinfraboxutils.token import encode_user_token logger = get_logger('github') GITHUB_CLIENT_ID = os.environ['INFRABOX_GITHUB_CLIENT_ID'] GITHUB_CLIENT_SECRET = os.environ['INFRABOX_GITHUB_CLIENT_SECRET'] GITHUB_AUTHORIZATION_URL = os.environ[ 'INFRABOX_GITHUB_LOGIN_URL'] + "/oauth/authorize" GITHUB_TOKEN_URL = os.environ[ 'INFRABOX_GITHUB_LOGIN_URL'] + "/oauth/access_token" GITHUB_USER_PROFILE_URL = os.environ['INFRABOX_GITHUB_API_URL'] + "/user" GITHUB_CALLBACK_URL = get_root_url('global') + "/github/auth/callback" # TODO(ib-steffen): move into DB states = {} def get_next_page(r): link = r.headers.get('Link', None) if not link:
import time import os import psycopg2 from eventlet.db_pool import ConnectionPool from pyinfraboxutils.db import DB from pyinfraboxutils import get_logger logger = get_logger('dbpool') POOL = ConnectionPool(psycopg2, dbname=os.environ['INFRABOX_DATABASE_DB'], user=os.environ['INFRABOX_DATABASE_USER'], password=os.environ['INFRABOX_DATABASE_PASSWORD'], host=os.environ['INFRABOX_DATABASE_HOST'], port=os.environ['INFRABOX_DATABASE_PORT'], min_size=0, max_size=10) def get(): conn = POOL.get() return DB(conn) def put(db): try: db.rollback() except Exception as e: logger.exception(e)
import json import requests import threading import time from pyinfraboxutils import get_logger, get_env from pyinfraboxutils import dbpool logger = get_logger('OPA') OPA_AUTH_URL = "http://%s:%s/v1/data/infrabox/allow" % ( get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT')) COLLABORATOR_DATA_DEST_URL = "http://%s:%s/v1/data/infrabox/collaborators" % ( get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT')) PROJECT_DATA_DEST_URL = "http://%s:%s/v1/data/infrabox/projects" % ( get_env('INFRABOX_OPA_HOST'), get_env('INFRABOX_OPA_PORT')) exit_flag = 0 def opa_do_auth(input_dict): # Send request to Open Policy Agent and evaluate response payload = json.dumps(input_dict) logger.debug("Sending OPA Request: %s", payload) rsp = requests.post(OPA_AUTH_URL, data=json.dumps(input_dict)) rsp_dict = rsp.json() logger.debug("OPA Response: %s", rsp.content) return "result" in rsp_dict and rsp_dict["result"] is True
import json import hashlib import hmac import requests from pyinfraboxutils import get_env, get_logger from pyinfraboxutils.ibbottle import InfraBoxPostgresPlugin from pyinfraboxutils.db import connect_db from bottle import post, run, request, response, install, get logger = get_logger("gitlab") def remove_ref(ref): return "/".join(ref.split("/")[2:]) def res(status, message): response.status = status return {"message": message} def get_commits(url, token): headers = {"Private-Token:" + token} r = requests.get(url, headers=headers, verify=False) result = [] result.extend(r.json())
from flask import g, jsonify import eventlet from eventlet import wsgi eventlet.monkey_patch() from pyinfraboxutils import get_env, get_logger from pyinfraboxutils.ibflask import app logger = get_logger('state') app.config['OPA_ENABLED'] = False @app.route('/') def ping(): return jsonify({}) @app.route('/<cluster_name>') def s(cluster_name): status = g.db.execute_one_dict(""" SELECT active, enabled FROM cluster WHERE name = %s """, [cluster_name]) if not status: return jsonify({}), 404 if not status['active'] or not status['enabled']: return jsonify(status), 503
import json from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from eventlet.hubs import trampoline from pyinfraboxutils.db import connect_db from pyinfraboxutils import dbpool from pyinfraboxutils import get_logger logger = get_logger('console_listener') def __handle_event(event, socketio, client_manager): job_id = event['job_id'] console_id = event['id'] if not client_manager.has_clients(job_id): return logger.info('start console %s', console_id) conn = dbpool.get() try: r = conn.execute_one( ''' SELECT output FROM console WHERE id = %s ''', [console_id]) logger.info('retrived console %s', console_id) if not r: return
def __init__(self, conn, args): self.conn = conn self.args = args self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE") self.logger = get_logger("scheduler")
import time import os import requests from prometheus_client import start_http_server, Gauge from pyinfraboxutils import get_env, get_logger, print_stackdriver from pyinfraboxutils.leader import elect_leader from pyinfraboxutils.db import connect_db logger = get_logger("stats") # pylint: disable=no-value-for-parameter, no-member JOBS_RUNNING = Gauge('infrabox_jobs_running', 'Jobs with state running') JOBS_SCHEDULED = Gauge('infrabox_jobs_scheduled', 'Jobs with state scheduled') JOBS_QUEUED = Gauge('infrabox_jobs_queued', 'Jobs with state queued') JOBS_FINISHED = Gauge('infrabox_jobs_finished', 'Jobs with state finished') JOBS_KILLED = Gauge('infrabox_jobs_killed', 'Jobs with state killed') JOBS_ERROR = Gauge('infrabox_jobs_error', 'Jobs with state error') JOBS_FAILURE = Gauge('infrabox_jobs_failure', 'Jobs with state failure') JOBS_SKIPPED = Gauge('infrabox_jobs_skipped', 'Jobs with state skipped') def get_jobs(conn): cursor = conn.cursor() cursor.execute('SELECT count(*), state FROM job GROUP BY state') result = cursor.fetchall() cursor.close() JOBS_RUNNING.set(0) JOBS_SCHEDULED.set(0) JOBS_QUEUED.set(0) JOBS_FINISHED.set(0)
import eventlet from eventlet import wsgi eventlet.monkey_patch() from flask import jsonify from pyinfraboxutils import get_env, get_logger from pyinfraboxutils.ibflask import app from pyinfraboxutils.db import DB, connect_db from pyinfraboxutils.ibopa import opa_start_push_loop import psycopg2 import psycopg2.extensions logger = get_logger('docker-registry-auth') app.config['OPA_ENABLED'] = True @app.route('/ping') def ping(): return jsonify({'status': 200}) @app.route('/v2/') # prevent 301 redirects @app.route('/v2') def v2(): # Authorization in src/openpolicyagent/policies/docker-registry-auth.rego return jsonify({'status': 200})
import json import hashlib import hmac import requests from pyinfraboxutils import get_env, get_logger from pyinfraboxutils.ibbottle import InfraBoxPostgresPlugin from pyinfraboxutils.db import connect_db from bottle import post, run, request, response, install, get logger = get_logger("github") def res(status, message): response.status = status return {"message": message} def remove_ref(ref): return "/".join(ref.split("/")[2:]) def get_next_page(r): link = r.headers.get('Link', None) if not link: return None n1 = link.find('rel=\"next\"') if not n1: return None
import json import select import psycopg2 import paramiko from pyinfraboxutils import get_logger, get_env, print_stackdriver from pyinfraboxutils.db import connect_db from pyinfraboxutils.leader import elect_leader, is_leader logger = get_logger("gerrit") def main(): get_env('INFRABOX_SERVICE') get_env('INFRABOX_VERSION') get_env('INFRABOX_DATABASE_DB') get_env('INFRABOX_DATABASE_USER') get_env('INFRABOX_DATABASE_PASSWORD') get_env('INFRABOX_DATABASE_HOST') get_env('INFRABOX_DATABASE_PORT') get_env('INFRABOX_GERRIT_PORT') get_env('INFRABOX_GERRIT_HOSTNAME') get_env('INFRABOX_GERRIT_USERNAME') get_env('INFRABOX_GERRIT_KEY_FILENAME') get_env('INFRABOX_ROOT_URL') conn = connect_db() conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) logger.info("Connected to database")
import os import requests from flask import g, abort, request from flask_restplus import Resource, fields from pyinfraboxutils import get_logger from pyinfrabox.utils import validate_uuid4 from pyinfraboxutils.ibrestplus import api from pyinfraboxutils.ibflask import auth_required, OK from api.namespaces import project as ns logger = get_logger('project') project_model = api.model( 'Project', { 'id': fields.String(required=True), 'name': fields.String(required=True), 'type': fields.String(required=True), 'public': fields.String(required=True) }) add_project_schema = { 'type': "object", 'properties': { 'name': { 'type': "string", 'pattern': r"^[0-9a-zA-Z_\-/]+$", "minLength": 3 },
from datetime import datetime from minio import Minio import jwt from flask import Flask, jsonify, request, send_file from pyinfrabox.badge import validate_badge from pyinfrabox.markup import validate_markup from pyinfrabox.testresult import validate_result from pyinfrabox import ValidationError from pyinfraboxutils import get_logger, get_env from pyinfraboxutils.db import connect_db logger = get_logger('job-api') app = Flask(__name__) def execute_many(stmt, args): c = conn.cursor() c.execute(stmt, args) r = c.fetchall() c.close() return r def execute_one(stmt, args): r = execute_many(stmt, args)
import json from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from eventlet.hubs import trampoline from pyinfraboxutils import get_logger from pyinfraboxutils.db import connect_db from pyinfraboxutils import dbpool logger = get_logger('job_listener') def __handle_event(event, socketio): job_id = event['job_id'] db = dbpool.get() try: job = db.execute_one_dict( ''' SELECT id, state, to_char(start_date, 'YYYY-MM-DD HH24:MI:SS') start_date, type, dockerfile, to_char(end_date, 'YYYY-MM-DD HH24:MI:SS') end_date, name, cpu, memory, dependencies, to_char(created_at, 'YYYY-MM-DD HH24:MI:SS') created_at, message, project_id, build_id FROM job WHERE id = %s ''', [job_id]) if not job: return
import socketio from flask import jsonify from pyinfraboxutils import get_env, print_stackdriver, get_logger from pyinfraboxutils.ibrestplus import app, api from pyinfraboxutils.ibflask import get_token, is_collaborator from pyinfraboxutils import dbpool # TODO: do it the same way in api import dashboard_api.handlers import dashboard_api.listeners.job import dashboard_api.listeners.console logger = get_logger('dashboard-api') # TODO: Move to common class ClientManager(socketio.base_manager.BaseManager): def __init__(self): super(ClientManager, self).__init__() self.__rooms = {} def enter_room(self, sid, namespace, room): super(ClientManager, self).enter_room(sid, namespace, room) logger.debug('%s joined room %s', sid, room) if room not in self.__rooms: self.__rooms[room] = 0
#pylint: disable=wrong-import-position import subprocess import os import traceback from gevent.wsgi import WSGIServer from flask import Flask, request from flask_restplus import Api, Resource, fields from pyinfraboxutils import print_stackdriver, get_logger app = Flask(__name__) api = Api(app) logger = get_logger('api') ns = api.namespace('/', description='Clone repo') @ns.route('/ping') class Ping(Resource): def get(self): return {'status': 200} clone_model = api.model( 'Clone', { 'commit': fields.String(required=True, description='Commit'), 'clone_url': fields.String(required=True, description='Clone URL'), 'branch': fields.String(required=False, description='Branch'), 'ref': fields.String(required=False, description='Ref'),
from urlparse import urlparse from flask import g, request, abort, redirect, make_response from flask_restx import Resource from onelogin.saml2.auth import OneLogin_Saml2_Auth from onelogin.saml2.utils import OneLogin_Saml2_Utils from pyinfraboxutils import get_logger, get_root_url, get_env from pyinfraboxutils.ibrestplus import api from pyinfraboxutils.token import encode_user_token logger = get_logger("saml") get_env("INFRABOX_ACCOUNT_SAML_SETTINGS_PATH") get_env("INFRABOX_ACCOUNT_SAML_EMAIL_FORMAT") get_env("INFRABOX_ACCOUNT_SAML_NAME_FORMAT") get_env("INFRABOX_ACCOUNT_SAML_USERNAME_FORMAT") def init_saml_auth(): parsed_url = urlparse(request.url) request_data = { "https": "on" if request.scheme == "https" else "off", "http_host": request.host, "server_port": parsed_url.port, "script_name": request.path, "get_data": request.args.copy(), "post_data": request.form.copy(), "query_string": request.query_string
def __init__(self, args, resource): self.args = args self.namespace = get_env("INFRABOX_GENERAL_WORKER_NAMESPACE") self.logger = get_logger("controller") self.resource = resource
from flask_restplus import Resource, fields import ldap import bcrypt from pyinfraboxutils import get_logger from pyinfraboxutils.ibflask import OK from pyinfraboxutils.token import encode_user_token from pyinfraboxutils.ibrestplus import api, response_model login_model = api.model('Login', { 'email': fields.String(required=True), 'password': fields.String(required=True), }) logger = get_logger('ldap') def authenticate(email, password): ldap_server = os.environ['INFRABOX_ACCOUNT_LDAP_URL'] ldap_user = os.environ['INFRABOX_ACCOUNT_LDAP_DN'] ldap_password = os.environ['INFRABOX_ACCOUNT_LDAP_PASSWORD'] ldap_base_dn = os.environ['INFRABOX_ACCOUNT_LDAP_BASE'] search_filter = "(mail=%s)" % str(email) user_dn = None connect = ldap.initialize(ldap_server) try: connect.bind_s(ldap_user, ldap_password) result = connect.search_s(ldap_base_dn, ldap.SCOPE_SUBTREE, search_filter, attrlist=['dn'])
#pylint: disable=superfluous-parens import logging import time import os import sys import subprocess import shutil import psycopg2 import psycopg2.extensions from pyinfraboxutils import get_logger from pyinfraboxutils.db import connect_db from pyinfraboxutils.token import encode_job_token logger = get_logger('scheduler') def execute(command): logger.info(command) process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) # Poll process for new output until finished while True: line = process.stdout.readline() if not line: break
import bcrypt from pyinfraboxutils import get_logger from pyinfraboxutils.ibflask import OK from pyinfraboxutils.ibrestplus import api from pyinfraboxutils.token import encode_user_token from dashboard_api.namespaces import account as ns login_model = api.model( 'Login', { 'email': fields.String(required=True), 'password': fields.String(required=True), }) logger = get_logger('login') @ns.route('/login') class Login(Resource): @api.expect(login_model) def post(self): b = request.get_json() email = b['email'] password = b['password'] user = g.db.execute_one_dict( ''' SELECT id, password FROM "user"