Exemplo n.º 1
0
    def post(self):
        job_id = g.token['job']['id']

        key = "%s.tar.gz" % job_id
        key = key.replace('/', '_')

        stream = request.files['output.tar.gz'].stream

        # determine all children
        jobs = g.db.execute_many_dict(
            '''
            SELECT cluster_name, dependencies
            FROM job
            WHERE build_id = (SELECT build_id FROM job WHERE id = %s)
            AND state = 'queued'
        ''', [job_id])

        clusters = set()

        for j in jobs:
            dependencies = j.get('dependencies', None)

            if not dependencies:
                continue

            for dep in dependencies:
                if dep['job-id'] != job_id:
                    continue

                clusters.add(j['cluster_name'])

        clusters = g.db.execute_many_dict(
            '''
            SELECT root_url
            FROM cluster
            WHERE active = true
            AND name = ANY (%s)
            AND name != %s
        ''', [list(clusters), os.environ['INFRABOX_CLUSTER_NAME']])

        g.release_db()

        storage.upload_output(stream, key)

        for c in clusters:
            stream.seek(0)
            url = '%s/api/job/output' % c['root_url']
            files = {'output.tar.gz': stream}
            token = encode_job_token(job_id)
            headers = {'Authorization': 'bearer ' + token}
            r = requests.post(url, files=files, headers=headers, timeout=120)

            if r.status_code != 200:
                abort(500, "Failed to upload data")

        return jsonify({})
Exemplo n.º 2
0
    def kube_job(self, job_id, _build_id, cpu, memory, _job_type):
        # repo_dir is also  hard coded in docker-compose.yml for job-git
        repo_dir = '/tmp/infrabox-compose/repo'
        clear_dir(repo_dir)

        token = encode_job_token(job_id)

        prefix = os.environ.get('INFRABOX_DOCKER_COMPOSE_PROJECT_PREFIX',
                                'compose')

        cmd = [
            'docker', 'run', '--rm', '-e',
            "INFRABOX_JOB_ID=%s" % job_id, '-e',
            "INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES=true", '-e',
            "INFRABOX_JOB_API_URL=http://nginx-ingress/api/job", '-e',
            "INFRABOX_JOB_GIT_URL=http://job-git:8080", '-e',
            "INFRABOX_SERVICE=job", '-e', "INFRABOX_VERSION=latest", '-e',
            "INFRABOX_DOCKER_REGISTRY_URL=localhost:8090", '-e',
            "INFRABOX_LOCAL_CACHE_ENABLED=false", '-e',
            "INFRABOX_JOB_MAX_OUTPUT_SIZE=%s" %
            os.environ['INFRABOX_JOB_MAX_OUTPUT_SIZE'], '-e',
            "INFRABOX_ROOT_URL=http://localhost", '-e',
            "INFRABOX_JOB_MOUNT_DOCKER_SOCKET=false", '-e',
            "INFRABOX_JOB_TOKEN=%s" % token, '-e',
            "INFRABOX_JOB_DAEMON_JSON=%s" % self.daemon_json, '-e',
            "INFRABOX_JOB_REPO_MOUNT_PATH=%s" % repo_dir, '-e',
            "INFRABOX_JOB_RESOURCES_LIMITS_MEMORY=%s" % memory, '-e',
            "INFRABOX_JOB_RESOURCES_LIMITS_CPU=%s" % cpu, '--privileged',
            '--network=%s_infrabox' % prefix, '-v',
            '/var/run/docker.sock:/var/run/docker.sock', '-v',
            '/tmp/infrabox-compose/repo:/tmp/infrabox-compose/repo',
            '--name=ib-job-%s' % job_id,
            '--link=%s_nginx-ingress_1:nginx-ingress' % prefix,
            os.environ['INFRABOX_DOCKER_REGISTRY'] +
            '/job:%s' % os.environ['INFRABOX_JOB_VERSION']
        ]

        execute(cmd)
Exemplo n.º 3
0
    def kube_job(self, job_id, cpu, mem, services=None):
        h = {'Authorization': 'Bearer %s' % self.args.token}

        job_token = encode_job_token(job_id).decode()

        env = [{
            'name': 'INFRABOX_JOB_ID',
            'value': job_id
        }, {
            'name': 'INFRABOX_JOB_TOKEN',
            'value': job_token
        }, {
            'name': 'INFRABOX_JOB_RESOURCES_LIMITS_MEMORY',
            'value': str(mem)
        }, {
            'name': 'INFRABOX_JOB_RESOURCES_LIMITS_CPU',
            'value': str(cpu)
        }]

        root_url = os.environ['INFRABOX_ROOT_URL']

        if services:
            for s in services:
                if 'annotations' not in s['metadata']:
                    s['metadata']['annotations'] = {}

                s['metadata']['annotations']['infrabox.net/job-id'] = job_id
                s['metadata']['annotations'][
                    'infrabox.net/job-token'] = job_token
                s['metadata']['annotations'][
                    'infrabox.net/root-url'] = root_url

        job = {
            'apiVersion': 'core.infrabox.net/v1alpha1',
            'kind': 'IBPipelineInvocation',
            'metadata': {
                'name': job_id
            },
            'spec': {
                'pipelineName': 'infrabox-default-pipeline',
                'services': services,
                'steps': {
                    'run': {
                        'resources': {
                            'limits': {
                                'memory': '%sMi' % mem,
                                'cpu': cpu
                            }
                        },
                        'env': env,
                    }
                }
            }
        }

        r = requests.post(
            self.args.api_server +
            '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibpipelineinvocations'
            % self.namespace,
            headers=h,
            json=job,
            timeout=10)

        if r.status_code != 201:
            self.logger.warn(r.text)
            return False

        return True
Exemplo n.º 4
0
    def kube_job(self, job_id, cpu, mem, services=None):
        h = {'Authorization': 'Bearer %s' % self.args.token}

        job_token = encode_job_token(job_id).decode()

        env = [{
            'name': 'INFRABOX_JOB_ID',
            'value': job_id
        }, {
            'name': 'INFRABOX_JOB_TOKEN',
            'value': job_token
        }, {
            'name': 'INFRABOX_JOB_RESOURCES_LIMITS_MEMORY',
            'value': str(mem)
        }, {
            'name': 'INFRABOX_JOB_RESOURCES_LIMITS_CPU',
            'value': str(cpu)
        }]

        # Get ssh key for private repos
        cursor = self.conn.cursor()
        cursor.execute(
            '''
            SELECT p.type, p.id
            FROM project p
            JOIN job j
            ON j.project_id = p.id
            WHERE j.id = %s
        ''', [job_id])
        result = cursor.fetchone()
        cursor.close()

        project_type = result[0]
        project_id = result[1]

        private_key = None
        if project_type == 'github':
            cursor = self.conn.cursor()
            cursor.execute(
                '''
                SELECT r.private_key
                FROM repository r
                WHERE r.project_id = %s
            ''', [project_id])
            result = cursor.fetchone()
            cursor.close()
            private_key = result[0]

            env += [{
                'name': 'INFRABOX_GIT_PORT',
                'value': '443'
            }, {
                'name': 'INFRABOX_GIT_HOSTNAME',
                'value': 'github.com'
            }, {
                'name': 'INFRABOX_GIT_PRIVATE_KEY',
                'value': private_key
            }]
        elif project_type == 'gerrit':
            with open('/tmp/gerrit/id_rsa') as key:
                env += [{
                    'name': 'INFRABOX_GIT_PORT',
                    'value': os.environ['INFRABOX_GERRIT_PORT']
                }, {
                    'name': 'INFRABOX_GIT_HOSTNAME',
                    'value': os.environ['INFRABOX_GERRIT_HOSTNAME']
                }, {
                    'name': 'INFRABOX_GIT_PRIVATE_KEY',
                    'value': key.read()
                }]

        root_url = os.environ['INFRABOX_ROOT_URL']

        if services:
            for s in services:
                if 'annotations' not in s['metadata']:
                    s['metadata']['annotations'] = {}

                s['metadata']['annotations']['infrabox.net/job-id'] = job_id
                s['metadata']['annotations'][
                    'infrabox.net/job-token'] = job_token
                s['metadata']['annotations'][
                    'infrabox.net/root-url'] = root_url

        job = {
            'apiVersion': 'core.infrabox.net/v1alpha1',
            'kind': 'IBPipelineInvocation',
            'metadata': {
                'name': job_id
            },
            'spec': {
                'pipelineName': 'infrabox-default-pipeline',
                'services': services,
                'steps': {
                    'run': {
                        'resources': {
                            'limits': {
                                'memory': '%sMi' % mem,
                                'cpu': cpu
                            }
                        },
                        'env': env,
                    }
                }
            }
        }

        r = requests.post(
            self.args.api_server +
            '/apis/core.infrabox.net/v1alpha1/namespaces/%s/ibpipelineinvocations'
            % self.namespace,
            headers=h,
            json=job,
            timeout=10)

        if r.status_code != 201:
            self.logger.warn(r.text)
            return False

        return True
Exemplo n.º 5
0
 def get_job_authorization(job_id):  # pragma: no cover
     job_api_token = encode_job_token(job_id)
     h = {'Authorization': 'token %s' % job_api_token}
     return h
Exemplo n.º 6
0
    def post(self):
        job_id = g.token['job']['id']

        for f, _ in request.files.items():
            key = "%s/%s" % (job_id, f)

            stream = request.files[f].stream

            # determine all children
            jobs = g.db.execute_many_dict('''
                SELECT cluster_name, dependencies
                FROM job
                WHERE build_id = (SELECT build_id FROM job WHERE id = %s)
                AND state = 'queued'
            ''', [job_id])

            current_cluster = g.db.execute_one_dict('''
                SELECT cluster_name
                FROM job
                WHERE id = %s
            ''', [job_id])['cluster_name']

            clusters = set()

            for j in jobs:
                dependencies = j.get('dependencies', None)

                if not dependencies:
                    continue

                for dep in dependencies:
                    if dep['job-id'] != job_id:
                        continue

                    clusters.add(j['cluster_name'])

            clusters = g.db.execute_many_dict('''
                SELECT root_url
                FROM cluster
                WHERE active = true
                AND enabled = true
                AND name = ANY (%s)
                AND name != %s
                AND name != %s
            ''', [list(clusters), os.environ['INFRABOX_CLUSTER_NAME'], current_cluster])

            g.release_db()

            storage.upload_output(stream, key)

            for c in clusters:
                stream.seek(0)
                url = '%s/api/job/output' % c['root_url']
                files = {f: stream}
                token = encode_job_token(job_id)
                headers = {'Authorization': 'bearer ' + token}
                r = requests.post(url, files=files, headers=headers, timeout=120, verify=False)

                if r.status_code != 200:
                    app.logger.error(r.text)
                    abort(500, "Failed to upload data")

            return jsonify({})
Exemplo n.º 7
0
    def kube_job(self, job_id, build_id, cpu, mem, job_type, additional_env=None):
        h = {'Authorization': 'Bearer %s' % self.args.token}
        volumes = [{
            "name": "data-dir",
            "emptyDir": {}
        }, {
            "name": "repo",
            "emptyDir": {}
        }]

        volume_mounts = [{
            "mountPath": "/data",
            "name": "data-dir"
        }, {
            "mountPath": "/repo",
            "name": "repo"
        }]

        env = [{
            "name": "INFRABOX_JOB_ID",
            "value": job_id
        }, {
            "name": "INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES",
            "value": os.environ['INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES']
        }, {
            "name": "INFRABOX_JOB_API_URL",
            "value": os.environ['INFRABOX_JOB_API_URL']
        }, {
            "name": "INFRABOX_JOB_GIT_URL",
            "value": "http://localhost:8080"
        }, {
            "name": "INFRABOX_SERVICE",
            "value": "job"
        }, {
            "name": "INFRABOX_VERSION",
            "value": self.args.tag
        }, {
            "name": "INFRABOX_DOCKER_REGISTRY_URL",
            "value": os.environ['INFRABOX_DOCKER_REGISTRY_URL']
        }, {
            "name": "INFRABOX_LOCAL_CACHE_ENABLED",
            "value": os.environ['INFRABOX_LOCAL_CACHE_ENABLED']
        }, {
            "name": "INFRABOX_JOB_MAX_OUTPUT_SIZE",
            "value": os.environ['INFRABOX_JOB_MAX_OUTPUT_SIZE']
        }, {
            "name": "INFRABOX_JOB_MOUNT_DOCKER_SOCKET",
            "value": os.environ['INFRABOX_JOB_MOUNT_DOCKER_SOCKET']
        }, {
            "name": "INFRABOX_JOB_DAEMON_JSON",
            "value": self.daemon_json
        }, {
            "name": "INFRABOX_DASHBOARD_URL",
            "value": os.environ['INFRABOX_DASHBOARD_URL']
        }, {
            "name": "INFRABOX_JOB_TOKEN",
            "value": encode_job_token(job_id).decode()
        }]

        if additional_env:
            env += additional_env

        if use_host_docker_daemon():
            volumes.append({
                "name": "docker-socket",
                "hostPath": {
                    "path": "/var/run/docker.sock",
                    "type": "Socket"
                }
            })

            volume_mounts.append({
                "mountPath": "/var/run/docker.sock",
                "name": "docker-socket"
            })

        if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
            volumes.append({
                "name": "local-cache",
                "hostPath": {
                    "path": os.environ['INFRABOX_LOCAL_CACHE_HOST_PATH']
                }
            })

            volume_mounts.append({
                "mountPath": "/local-cache",
                "name": "local-cache"
            })

        clone_volume_mounts = [{
            "mountPath": "/repo",
            "name": "repo"
        }]

        clone_env = [{
            "name": "INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES",
            "value": os.environ['INFRABOX_GENERAL_DONT_CHECK_CERTIFICATES']
        }]

        if gerrit_enabled():
            gerrit_env = ({
                "name": "INFRABOX_GERRIT_HOSTNAME",
                "value": os.environ['INFRABOX_GERRIT_HOSTNAME']
            }, {
                "name": "INFRABOX_GERRIT_USERNAME",
                "value": os.environ['INFRABOX_GERRIT_USERNAME']
            }, {
                "name": "INFRABOX_GERRIT_PORT",
                "value": os.environ['INFRABOX_GERRIT_PORT']
            })

            env.extend(gerrit_env)
            clone_env.extend(gerrit_env)

            clone_volume_mounts.append({
                "name": "gerrit-ssh",
                "mountPath": "/tmp/gerrit/"
            })

            volumes.append({
                "name": "gerrit-ssh",
                "secret": {
                    "secretName": "infrabox-gerrit-ssh"
                }
            })

        run_job = {
            "kind": "Job",
            "apiVersion": "batch/v1",
            "metadata": {
                "name": job_id,
                "labels": {
                    "infrabox-job-type": "run-job",
                    "infrabox-job-id": job_id,
                    "infrabox-build-id": build_id
                }
            },
            "spec": {
                "template": {
                    "spec": {
                        "imagePullSecrets": [{"name": "infrabox-docker-secret"}],
                        "imagePullPolicy": "Always",
                        "automountServiceAccountToken": False,
                        "containers": [{
                            "name": "run-job",
                            "image": self.args.docker_registry + "/job:%s" % self.args.tag,
                            "command": ["/usr/local/bin/entrypoint.sh", "--type", job_type],

                            "securityContext": {
                                "privileged": True
                            },
                            "env": env,
                            "resources": {
                                "requests": {
                                    "cpu": cpu,
                                    "memory": "%sMi" % mem
                                },
                                "limits": {
                                    "cpu": cpu,
                                    "memory": "%sMi" % mem
                                }
                            },
                            "volumeMounts": volume_mounts
                        }, {
                            "name": "git-clone",
                            "image": self.args.docker_registry + "/job-git:%s" % self.args.tag,
                            "env": clone_env,
                            "volumeMounts": clone_volume_mounts
                        }],
                        "restartPolicy": "OnFailure",
                        "volumes": volumes
                    }
                }
            }
        }

        r = requests.post(self.args.api_server + '/apis/batch/v1/namespaces/%s/jobs' % self.namespace,
                          headers=h, json=run_job, timeout=10)

        if r.status_code != 201:
            self.logger.info('API Server response')
            self.logger.info(r.text)
            return False

        return True
Exemplo n.º 8
0
    def get(self, parent_job_id):
        job_id = g.token['job']['id']

        if not validate_uuid(parent_job_id):
            abort(400, "Invalid uuid")

        filename = request.args.get('filename', None)

        if not filename:
            abort(400, "Invalid filename")

        dependencies = g.db.execute_one(
            '''
            SELECT dependencies
            FROM job
            WHERE id = %s
        ''', [job_id])[0]

        is_valid_dependency = False
        for dep in dependencies:
            if dep['job-id'] == parent_job_id:
                is_valid_dependency = True
                break

        if not is_valid_dependency:
            abort(404, "Job not found")

        key = "%s/%s" % (parent_job_id, filename)

        f = storage.download_output(key)

        if f:
            g.release_db()
            return send_file(f)

        c = g.db.execute_one_dict(
            '''
            SELECT *
            FROM cluster
            WHERE name= (
                SELECT cluster_name
                FROM job
                where id = %s)
            ''', [parent_job_id])
        g.release_db()

        if c['name'] == os.environ['INFRABOX_CLUSTER_NAME']:
            abort(404)

        token = encode_job_token(job_id)
        headers = {'Authorization': 'token ' + token}
        url = '%s/api/job/output/%s?filename=%s' % (c['root_url'],
                                                    parent_job_id, filename)

        try:
            r = requests.get(url, headers=headers, timeout=120, verify=False)
            if r.status_code != 200:
                f = None
            else:
                f = BytesIO(r.content)
                f.seek(0)
        except:
            f = None
        if not f:
            abort(404)

        return send_file(f, attachment_filename=filename)