Example #1
0
    def run_docker_container(self, image_name):
        c = self.console
        collector = StatsCollector()

        container_name = self.job['id']
        cmd = ['docker', 'run', '--name', container_name]

        # Memory limit
        memory_limit = os.environ['INFRABOX_JOB_RESOURCES_LIMITS_MEMORY']
        cmd += ['-m', '%sm' % memory_limit]

        # repo mount
        cmd += ['-v', '%s:/infrabox' % self.mount_data_dir]

        # Mount context
        cmd += ['-v', '%s:/infrabox/context' % self._get_build_context_current_job()]

        # Add local cache
        if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
            cmd += ['-v', "/local-cache:/infrabox/local-cache"]

        # add env vars
        for name, value in self.environment.iteritems():
            cmd += ['-e', '%s=%s' % (name, value)]

        # add resource env vars
        os.makedirs('/tmp/serviceaccount')
        if os.environ.get('INFRABOX_RESOURCES_KUBERNETES_CA_CRT', None):
            with open('/tmp/serviceaccount/ca.crt', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_CA_CRT']))

            with open('/tmp/serviceaccount/token', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_TOKEN']))

            with open('/tmp/serviceaccount/namespace', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_NAMESPACE']))

            cmd += ['-v', '/tmp/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount']
            cmd += ['-e', 'INFRABOX_RESOURCES_KUBERNETES_MASTER_URL=%s' %
                    os.environ['INFRABOX_RESOURCES_KUBERNETES_MASTER_URL']]

        # add services
        if os.path.exists('/var/run/infrabox.net/services'):
            cmd += ['-v', '/var/run/infrabox.net/services:/var/run/infrabox.net/services']

        cmd += ['--tmpfs', '/infrabox/tmpfs']

        # Privileged
        security_context = self.job['definition'].get('security_context', {})
        privileged = security_context.get('privileged', False)
        if privileged:
            cmd += ['--privileged']
            cmd += ['-v', '/data/inner/docker:/var/lib/docker']

        cmd += [image_name]

        if self.job['definition'].get('command', None):
            cmd += self.job['definition']['command']

        try:
            c.header("Run container", show=True)
            c.execute(cmd, show=True, show_cmd=False)

            if self.job['definition'].get('cache', {}).get('image', False) or self.job['definition'].get('deployments', None):
                c.execute(("docker", "commit", container_name, image_name))
        except Exception as e:
            try:
                # Find out if container was killed due to oom
                out = subprocess.check_output(['docker', 'inspect', container_name,
                                               '-f', '{{.State.OOMKilled}}']).strip()
            except Exception as ex:
                logger.exception(ex)
                raise Failure("Could not get OOMKilled state of container")

            if out == 'true':
                raise Failure('Container was killed, because it ran out of memory')

            try:
                exit_code = subprocess.check_output(['docker', 'inspect', container_name,
                                                     '-f', '{{.State.ExitCode}}']).strip()
            except Exception as ex:
                logger.exception(ex)
                raise Failure("Could not get exit code of container")

            c.header("Finalize", show=True)
            logger.exception(e)
            raise Failure("Container run exited with error (exit code=%s)" % exit_code)

        finally:
            try:
                collector.stop()
                self.post_stats(collector.get_result())
                c.execute(("docker", "rm", container_name))
            except:
                pass
Example #2
0
    def run_job_docker_compose(self, c):
        c.header("Build containers", show=True)
        f = self.job['dockerfile']

        compose_file = os.path.normpath(os.path.join(self.job['definition']['infrabox_context'], f))
        compose_file_new = compose_file + ".infrabox.json"

        # rewrite compose file
        compose_file_content = create_from(compose_file)
        for service in compose_file_content['services']:
            service_cache_dir = os.path.join(self.infrabox_cache_dir, service)

            if not os.path.exists(service_cache_dir):
                makedirs(service_cache_dir)

            service_output_dir = os.path.join(self.infrabox_output_dir, service)
            makedirs(service_output_dir)

            service_testresult_dir = os.path.join(self.infrabox_testresult_dir, service)
            makedirs(service_testresult_dir)

            service_coverage_dir = os.path.join(self.infrabox_coverage_dir, service)
            makedirs(service_coverage_dir)

            service_markdown_dir = os.path.join(self.infrabox_markdown_dir, service)
            makedirs(service_markdown_dir)

            service_markup_dir = os.path.join(self.infrabox_markup_dir, service)
            makedirs(service_markup_dir)

            service_badge_dir = os.path.join(self.infrabox_badge_dir, service)
            makedirs(service_badge_dir)

            service_archive_dir = os.path.join(self.infrabox_archive_dir, service)
            makedirs(service_archive_dir)

            service_volumes = [
                "%s:/infrabox/cache" % service_cache_dir,
                "%s:/infrabox/inputs" % self.infrabox_inputs_dir,
                "%s:/infrabox/output" % service_output_dir,
                "%s:/infrabox/upload/testresult" % service_testresult_dir,
                "%s:/infrabox/upload/markdown" % service_markdown_dir,
                "%s:/infrabox/upload/markup" % service_markup_dir,
                "%s:/infrabox/upload/badge" % service_badge_dir,
                "%s:/infrabox/upload/coverage" % service_coverage_dir,
                "%s:/infrabox/upload/archive" % service_archive_dir,
            ]

            for v in compose_file_content['services'][service].get('volumes', []):
                v = v.replace('/infrabox/context', self.mount_repo_dir)
                service_volumes.append(v)

            # Mount /infrabox/context to the build context of the service if build.context
            # is set in the compose file for the service
            service_build = compose_file_content['services'][service].get('build', None)
            if service_build:
                service_build_context = service_build.get('context', None)
                if service_build_context:
                    build_context = os.path.join(os.path.dirname(compose_file), service_build_context)
                    service_volumes += ['%s:/infrabox/context' % build_context]
                else:
                    service_volumes += ['%s:/infrabox/context' % self.mount_repo_dir]
            else:
                service_volumes += ['%s:/infrabox/context' % self.mount_repo_dir]

            if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
                service_volumes.append("/local-cache:/infrabox/local-cache")

            compose_file_content['services'][service]['volumes'] = service_volumes

            image_name = get_registry_name() + '/' \
                         + self.project['id'] + '/' \
                         + self.job['name'] + '/' \
                         + service

            image_name_latest = image_name + ':latest'
            build = compose_file_content['services'][service].get('build', None)

            if build:
                compose_file_content['services'][service]['image'] = image_name_latest
                build['cache_from'] = [image_name_latest]
                self.get_cached_image(image_name_latest)

                if not build.get('args', None):
                    build['args'] = []

                build['args'] += ['INFRABOX_BUILD_NUMBER=%s' % self.build['build_number']]

        with open(compose_file_new, "w+") as out:
            json.dump(compose_file_content, out)

        collector = StatsCollector()

        try:
            try:
                c.execute(['docker-compose', '-f', compose_file_new, 'rm'],
                          env=self.environment)
            except Exception as e:
                logger.exception(e)


            self.environment['PATH'] = os.environ['PATH']
            c.execute(['docker-compose', '-f', compose_file_new, 'build'],
                      show=True, env=self.environment)
            c.header("Run docker-compose", show=True)


            cwd = self._get_build_context_current_job()

            c.execute(['docker-compose', '-f', compose_file_new, 'up',
                       '--abort-on-container-exit'], env=self.environment, show=True, cwd=cwd)
            c.execute(['docker-compose', '-f', compose_file_new, 'ps'], env=self.environment, cwd=cwd, show=True)
            c.execute(['get_compose_exit_code.sh', compose_file_new], env=self.environment, cwd=cwd, show=True)
        except:
            raise Failure("Failed to build and run container")
        finally:
            c.header("Finalize", show=True)

            try:
                collector.stop()
                self.post_stats(collector.get_result())
                c.execute(['docker-compose', '-f', compose_file_new, 'rm'],
                          env=self.environment)
            except Exception as e:
                logger.exception(e)

        for service in compose_file_content['services']:
            image_name = get_registry_name() + '/' \
                         + self.project['id'] + '/' \
                         + self.job['name'] + '/' \
                         + service

            image_name_latest = image_name + ':latest'

            build = compose_file_content['services'][service].get('build', None)
            if build:
                compose_file_content['services'][service]['image'] = service

            self.cache_docker_image(image_name_latest, image_name_latest)

        return True
Example #3
0
    def run_docker_compose(self, c):
        c.header("Build containers", show=True)
        f = self.job['dockerfile']

        if self.job.get('base_path', None):
            f = os.path.join(self.job['base_path'], f)

        compose_file = os.path.join('/repo', f)
        compose_file_new = compose_file + ".infrabox"

        # rewrite compose file
        compose_file_content = create_from(compose_file)
        for service in compose_file_content['services']:
            service_cache_dir = os.path.join(self.infrabox_cache_dir, service)

            if not os.path.exists(service_cache_dir):
                makedirs(service_cache_dir)

            service_output_dir = os.path.join(self.infrabox_output_dir, service)
            makedirs(service_output_dir)

            service_testresult_dir = os.path.join(self.infrabox_testresult_dir, service)
            makedirs(service_testresult_dir)

            service_markdown_dir = os.path.join(self.infrabox_markdown_dir, service)
            makedirs(service_markdown_dir)

            service_markup_dir = os.path.join(self.infrabox_markup_dir, service)
            makedirs(service_markup_dir)

            service_badge_dir = os.path.join(self.infrabox_badge_dir, service)
            makedirs(service_badge_dir)

            service_volumes = [
                "/repo:/infrabox/context",
                "%s:/infrabox/cache" % service_cache_dir,
                "%s:/infrabox/inputs" % self.infrabox_inputs_dir,
                "%s:/infrabox/output" % service_output_dir,
                "%s:/infrabox/upload/testresult" % service_testresult_dir,
                "%s:/infrabox/upload/markdown" % service_markdown_dir,
                "%s:/infrabox/upload/markup" % service_markup_dir,
                "%s:/infrabox/upload/badge" % service_badge_dir,
            ]

            if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
                service_volumes.append("/local-cache:/infrabox/local-cache")

            compose_file_content['services'][service]['volumes'] = service_volumes
        with open(compose_file_new, "w+") as out:
            yaml.dump(compose_file_content, out, default_flow_style=False)

        collector = StatsCollector()

        try:
            self.environment['PATH'] = os.environ['PATH']
            c.execute(['docker-compose', '-f', compose_file_new, 'build'],
                      show=True, env=self.environment)
            c.header("Run docker-compose", show=True)

            cwd = self.job.get('base_path', None)
            if cwd:
                cwd = os.path.join('/repo', cwd)


            c.execute(['docker-compose', '-f', compose_file_new, 'up',
                       '--abort-on-container-exit'], env=self.environment, show=True, cwd=cwd)
            c.execute(['docker-compose', '-f', compose_file_new, 'ps'], env=self.environment, cwd=cwd)
            c.execute(['get_compose_exit_code.sh', compose_file_new], env=self.environment, cwd=cwd)
        except:
            raise Failure("Failed to build and run container")
        finally:
            collector.stop()
            self.post_stats(collector.get_result())

        return True
Example #4
0
    def run_docker_container(self, image_name):
        if self.job['build_only']:
            return

        c = self.console
        collector = StatsCollector()

        container_name = self.job['id']
        cmd = ['docker', 'run', '-t', '--name', container_name, '-v', self.data_dir + ':/infrabox']

        # Mount context
        cmd += ['-v', '/repo:/infrabox/context']

        # Mount docker socket
        if os.environ['INFRABOX_JOB_MOUNT_DOCKER_SOCKET'] == 'true':
            cmd += ['-v', '/var/run/docker.sock:/var/run/docker.sock']

        # Add local cache
        if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
            cmd += ['-v', "/local-cache:/infrabox/local-cache"]

        # add env vars
        for name, value in self.environment.iteritems():
            cmd += ['-e', '%s=%s' % (name, value)]

        # add resource env vars
        os.makedirs('/tmp/serviceaccount')
        if os.environ.get('INFRABOX_RESOURCES_KUBERNETES_CA_CRT', None):
            with open('/tmp/serviceaccount/ca.crt', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_CA_CRT']))

            with open('/tmp/serviceaccount/token', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_TOKEN']))

            with open('/tmp/serviceaccount/namespace', 'w') as o:
                o.write(base64.b64decode(os.environ['INFRABOX_RESOURCES_KUBERNETES_NAMESPACE']))

            cmd += ['-v', '/tmp/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount']
            cmd += ['-e', 'INFRABOX_RESOURCES_KUBERNETES_MASTER_URL=%s' %
                    os.environ['INFRABOX_RESOURCES_KUBERNETES_MASTER_URL']]

        # Add capabilities
        security_context = self.job.get('security_context', {})

        if security_context:
            capabilities = security_context.get('capabilities', {})
            add_capabilities = capabilities.get('add', [])
            if add_capabilities:
                cmd += ['--cap-add=%s' % ','.join(add_capabilities)]

        cmd += [image_name]

        try:
            c.header("Run container", show=True)
            c.execute(cmd, show=True)
            c.execute(("docker", "commit", container_name, image_name))
        except:
            try:
                c.execute(("docker", "commit", container_name, image_name))
                self.push_container(image_name)
            except:
                pass

            raise Failure("Container run exited with error")
        finally:
            collector.stop()
            self.post_stats(collector.get_result())
Example #5
0
    def run_docker_container(self, image_name):
        if self.job['build_only']:
            return

        c = self.console
        collector = StatsCollector()

        container_name = self.job['id']
        cmd = ['docker', 'run', '--name', container_name]

        # Memory limit
        memory_limit = os.environ['INFRABOX_JOB_RESOURCES_LIMITS_MEMORY']
        cmd += ['-m', '%sm' % memory_limit]

        # repo mount
        cmd += ['-v', '%s:/infrabox' % self.mount_data_dir]

        # Mount context
        cmd += [
            '-v',
            '%s:/infrabox/context' % self._get_build_context_current_job()
        ]

        # Mount docker socket
        if os.environ['INFRABOX_JOB_MOUNT_DOCKER_SOCKET'] == 'true':
            cmd += ['-v', '/var/run/docker.sock:/var/run/docker.sock']

        # Add local cache
        if os.environ['INFRABOX_LOCAL_CACHE_ENABLED'] == 'true':
            cmd += ['-v', "/local-cache:/infrabox/local-cache"]

        # add env vars
        for name, value in self.environment.iteritems():
            cmd += ['-e', '%s=%s' % (name, value)]

        # add resource env vars
        os.makedirs('/tmp/serviceaccount')
        if os.environ.get('INFRABOX_RESOURCES_KUBERNETES_CA_CRT', None):
            with open('/tmp/serviceaccount/ca.crt', 'w') as o:
                o.write(
                    base64.b64decode(
                        os.environ['INFRABOX_RESOURCES_KUBERNETES_CA_CRT']))

            with open('/tmp/serviceaccount/token', 'w') as o:
                o.write(
                    base64.b64decode(
                        os.environ['INFRABOX_RESOURCES_KUBERNETES_TOKEN']))

            with open('/tmp/serviceaccount/namespace', 'w') as o:
                o.write(
                    base64.b64decode(
                        os.environ['INFRABOX_RESOURCES_KUBERNETES_NAMESPACE']))

            cmd += [
                '-v',
                '/tmp/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount'
            ]
            cmd += [
                '-e',
                'INFRABOX_RESOURCES_KUBERNETES_MASTER_URL=%s' %
                os.environ['INFRABOX_RESOURCES_KUBERNETES_MASTER_URL']
            ]

        # Add capabilities
        security_context = self.job.get('security_context', {})

        if security_context:
            capabilities = security_context.get('capabilities', {})
            add_capabilities = capabilities.get('add', [])
            if add_capabilities:
                cmd += ['--cap-add=%s' % ','.join(add_capabilities)]

        cmd += [image_name]

        try:
            c.header("Run container", show=True)
            c.execute(cmd, show=True)
            c.execute(("docker", "commit", container_name, image_name))
        except Exception as e:
            try:
                # Find out if container was killed due to oom
                out = subprocess.check_output([
                    'docker', 'inspect', container_name, '-f',
                    '{{.State.OOMKilled}}'
                ]).strip()
            except Exception as ex:
                logger.exception(ex)
                raise Failure("Could not get OOMKilled state of container")

            if out == 'true':
                raise Failure(
                    'Container was killed, because it ran out of memory')

            try:
                exit_code = subprocess.check_output([
                    'docker', 'inspect', container_name, '-f',
                    '{{.State.ExitCode}}'
                ]).strip()
            except Exception as ex:
                logger.exception(ex)
                raise Failure("Could not get exit code of container")

            try:
                c.execute(("docker", "commit", container_name, image_name))
                c.header("Finalize", show=True)
                self.push_container(image_name)
            except Exception as ex:
                logger.exception(ex)
                raise Failure("Could not commit and push container")

            logger.exception(e)
            raise Failure("Container run exited with error (exit code=%s)" %
                          exit_code)

        finally:
            try:
                collector.stop()
                self.post_stats(collector.get_result())
                c.execute(("docker", "rm", container_name))
            except:
                pass