예제 #1
0
    def test_mesos_executor_run_task(self):
        os.environ['MESOS_SLAVE_PID'] = str(self.slave.pid)

        launch_task_event, launch_task_call, launch_task_side_effect = self.mock_method(
        )
        executor = mock.create_autospec(Executor, spec_set=True)
        executor.launchTask = mock.Mock(side_effect=launch_task_side_effect)

        driver = PesosExecutorDriver(executor, context=self.context)
        assert driver.start() == mesos_pb2.DRIVER_RUNNING

        # wait until registered
        driver.executor_process.connected.wait(timeout=MAX_TIMEOUT)
        assert driver.executor_process.connected.is_set()

        # now launch task
        task_info = mesos_pb2.TaskInfo(
            name='task',
            task_id=mesos_pb2.TaskID(value='task-id'),
            slave_id=self.slave.slave_id,
            executor=self.executor_info,
        )
        self.slave.send_run_task(
            driver.executor_process.pid,
            mesos_pb2.FrameworkID(value=self.FRAMEWORK_ID),
            task_info,
        )

        launch_task_event.wait(timeout=MAX_TIMEOUT)
        assert launch_task_event.is_set()
        assert launch_task_call.mock_calls == [mock.call(driver, task_info)]
        assert driver.executor_process.tasks == {
            task_info.task_id.value: task_info
        }
예제 #2
0
    def _build_task(self, offer, executor_id, task_id, args):
        """
        Create a TaskInfo object for an offer, executor_id and task_id.
        """

        # Create the initial TaskInfo object
        task = mesos_pb2.TaskInfo()
        task.name = "Test Framework Task"
        task.task_id.value = "%d:%d" % (executor_id, task_id)
        task.slave_id.value = offer.slave_id.value

        # Configure the executor
        task.executor.executor_id.value = str(executor_id)
        task.executor.framework_id.value = offer.framework_id.value

        uri = task.executor.command.uris.add()
        uri.value = args.executor_uri

        task.executor.command.value = "./%s/bin/executor" % os.path.basename(
            uri.value).split(".")[0]

        # Add the task resource
        cpus = task.resources.add()
        cpus.name = "cpus"
        cpus.type = mesos_pb2.Value.SCALAR
        cpus.scalar.value = self.TASK_CPU

        mem = task.resources.add()
        mem.name = "mem"
        mem.type = mesos_pb2.Value.SCALAR
        mem.scalar.value = self.TASK_MEM

        return task
예제 #3
0
    def _prepare_task(self, driver, dockerfile, build_task, offer, cpu, mem,
                      role):

        # Define the mesos task
        task = mesos_pb2.TaskInfo()
        task.name = "%s/%s" % (":".join([
            build_task.image.registry.hostname,
            str(build_task.image.registry.port)
        ]), build_task.image.repository)
        task.task_id.value = build_task.task_id
        task.slave_id.value = offer.slave_id.value

        # Create the executor
        args = []
        if self.verbose:
            args.append("--verbose")

        task.executor.executor_id.value = build_task.task_id
        task.executor.command.value = "${MESOS_SANDBOX:-${MESOS_DIRECTORY}}/%s/bin/portainer %s build-executor" % (
            os.path.basename(
                self.executor_uri).rstrip(".tar.gz"), " ".join(args))

        if self.container_image:
            task.executor.container.type = mesos_pb2.ContainerInfo.DOCKER
            task.executor.container.docker.image = self.container_image
            task.executor.container.docker.privileged = True

        task.executor.name = "build"
        task.executor.source = "build %s" % (task.name)

        # Configure the mesos executor with the portainer executor uri
        portainer_executor = task.executor.command.uris.add()
        portainer_executor.value = self.executor_uri

        if build_task.context:
            # Add the docker context
            uri = task.executor.command.uris.add()
            uri.value = build_task.context_url
            uri.extract = False

        task.data = build_task.SerializeToString()
        task.executor.data = task.data

        # Build up the resources we require
        cpu_resource = task.resources.add()
        cpu_resource.name = "cpus"
        cpu_resource.type = mesos_pb2.Value.SCALAR
        cpu_resource.role = role
        cpu_resource.scalar.value = cpu

        mem_resource = task.resources.add()
        mem_resource.name = "mem"
        mem_resource.type = mesos_pb2.Value.SCALAR
        mem_resource.role = role
        mem_resource.scalar.value = mem

        self.task_history[build_task.task_id] = (dockerfile, build_task)

        return task
예제 #4
0
    def _pending_to_scheduled(self, schedule_queue):
        scheduled_tasks = []
        for offer, task in schedule_queue:
            task_info = mesos_pb2.TaskInfo()
            task.write(task_info)

            # populate task_id, slave_id
            task_info.task_id.value = '%s-%s' % (task.name, uuid.uuid4())
            task_info.slave_id.MergeFrom(offer.slave_id)

            if task_info.HasField('executor'):
                task_info.executor.executor_id.value = '%s-%s' % (task.name,
                                                                  uuid.uuid4())

            scheduled_tasks.append(task_info)
        return scheduled_tasks
예제 #5
0
파일: hello.py 프로젝트: keis/hello-mesos
def new_task(offer):
    task = mesos_pb2.TaskInfo()
    id = uuid.uuid4()
    task.task_id.value = str(id)
    task.slave_id.value = offer.slave_id.value
    task.name = "task {}".format(str(id))

    cpus = task.resources.add()
    cpus.name = "cpus"
    cpus.type = mesos_pb2.Value.SCALAR
    cpus.scalar.value = 1

    mem = task.resources.add()
    mem.name = "mem"
    mem.type = mesos_pb2.Value.SCALAR
    mem.scalar.value = 1

    return task
예제 #6
0
    def _prepare_task(self, driver, path, dockerfile, tags, offer, cpu, mem):
        """Prepare a given dockerfile build task atop the given mesos offer."""

        # Generate a task ID
        task_id = str(uuid.uuid1())
        logger.info("Preparing task %s to build %s", task_id, path)

        # Define the build that's required
        build_task = portainer_pb2.BuildTask()
        build_task.stream = self.stream

        # Create a custom docker context if there are local sources
        staging_context_path = None
        if dockerfile.has_local_sources:
            working_dir = os.path.abspath(os.path.dirname(path))

            # Generate the dockerfile build context
            _, context_path = tempfile.mkstemp()
            context = open(context_path, "w+b")

            logger.debug("Writing context tar to %s", context_path)
            context_size = self._make_build_context(context, working_dir,
                                                    dockerfile)

            # Put together the staging directory
            staging_dir = os.path.join("staging", task_id)
            context_filename = "docker_context.tar.gz"

            staging_context_path = os.path.join(staging_dir, context_filename)

            # Create the directory
            logger.debug("Task staging directory %s", staging_dir)
            self.filesystem.makedir(staging_dir, recursive=True)

            # Upload the build context (+ fancy progress bar)
            logger.info("Uploading context (%d bytes)", context_size)
            pbar = progressbar.ProgressBar(maxval=context_size, term_width=100)

            # Define a custom error handler for the async upload
            def handle_exception(e):
                logger.error("Caught exception uploading the context")
                raise e

            event = self.filesystem.setcontents_async(
                path=staging_context_path,
                data=context,
                progress_callback=pbar.update,
                finished_callback=pbar.finish,
                error_callback=handle_exception)

            # Hold up, let's wait until the upload finishes
            event.wait()

            # Close and clear up the tmp context
            logger.debug("Cleaning up local context %s", context_path)
            context.close()
            os.unlink(context_path)

            build_task.context = context_filename
        else:
            build_task.dockerfile = dockerfile.build()

        if self.docker_host:
            build_task.docker_host = self.docker_host

        # Pull out the repository from the dockerfile
        try:
            build_task.image.repository = dockerfile.get(
                "REPOSITORY", [self.repository]).next()[0]
        except (StopIteration, IndexError):
            raise ValueError("No REPOSITORY given for %s", path)

        # Pull out the registry from the dockerfile
        try:
            registry = self.push_registry.split(":")
            build_task.image.registry.hostname = registry[0]
            if len(registry) > 1:
                build_task.image.registry.port = int(registry[1])
        except ValueError:
            raise ValueError("Failed to parse REGISTRY in %s", path)

        # Add any tags
        build_task.image.tag.extend(tags)

        # Define the mesos task
        task = mesos_pb2.TaskInfo()
        task.name = "%s/%s" % (":".join(registry), build_task.image.repository)
        task.task_id.value = task_id
        task.slave_id.value = offer.slave_id.value

        # Create the executor
        args = []
        if self.verbose:
            args.append("--verbose")

        task.executor.executor_id.value = task_id
        task.executor.command.value = "./%s/bin/portainer %s build-executor" % (
            os.path.basename(
                self.executor_uri).rstrip(".tar.gz"), " ".join(args))

        # TODO(tarnfeld): Make this configurable
        # TODO(tarnfeld): Support the mesos 0.20.0 docker protobuf
        task.executor.command.container.image = "docker://jpetazzo/dind"

        # We have to mount the /var/lib/docker VOLUME inside of the sandbox
        task.executor.command.container.options.extend(["--privileged"])
        task.executor.command.container.options.extend(
            ["-v", "$MESOS_DIRECTORY/docker:/var/lib/docker"])

        task.executor.name = "build"
        task.executor.source = "portainer"

        # Configure the mesos executor with the portainer executor uri
        portainer_executor = task.executor.command.uris.add()
        portainer_executor.value = self.executor_uri

        if staging_context_path:
            # Add the docker context
            uri = task.executor.command.uris.add()
            uri.value = os.path.join(self.staging_uri, staging_context_path)
            uri.extract = False

        task.data = build_task.SerializeToString()
        task.executor.data = task.data

        # Build up the resources
        cpu_resource = task.resources.add()
        cpu_resource.name = "cpus"
        cpu_resource.type = mesos_pb2.Value.SCALAR
        cpu_resource.scalar.value = cpu

        mem_resource = task.resources.add()
        mem_resource.name = "mem"
        mem_resource.type = mesos_pb2.Value.SCALAR
        mem_resource.scalar.value = mem

        self.task_ids[task_id] = build_task

        logger.info("Prepared task %s to build %s", task_id, path)
        logger.debug("%s", build_task)

        return task
예제 #7
0
    def _prepare_task(self, driver, task_id, path, dockerfile, tags, offer,
                      cpu, mem):
        """Prepare a given dockerfile build task atop the given mesos offer."""

        logger.info("Preparing task %s to build %s", task_id, path)

        # Define the build that's required
        build_task = portainer_pb2.BuildTask()
        build_task.stream = self.stream

        # Create a custom docker context if there are local sources
        staging_context_path = None
        if dockerfile.has_local_sources:
            working_dir = os.path.abspath(os.path.dirname(path))

            # Generate the dockerfile build context
            _, context_path = tempfile.mkstemp()
            context = open(context_path, "w+b")

            logger.debug("Writing context tar to %s", context_path)
            context_size = self._make_build_context(context, working_dir,
                                                    dockerfile)

            # Put together the staging directory
            staging_dir = os.path.join("staging", task_id)
            context_filename = "docker_context.tar.gz"

            staging_context_path = os.path.join(staging_dir, context_filename)

            # Create the directory
            logger.debug("Task staging directory %s", staging_dir)
            self.filesystem.makedir(staging_dir, recursive=True)

            # Upload the build context (+ fancy progress bar)
            logger.info("Uploading context (%d bytes)", context_size)
            pbar = progressbar.ProgressBar(maxval=context_size, term_width=100)

            # Define a custom error handler for the async upload
            caught_exception = threading.Event()

            def handle_exception(e):
                (_, _, tb) = sys.exc_info()
                logger.error("Caught exception uploading the context: %s" %
                             e.message)
                logger.error(traceback.format_exc(tb))
                caught_exception.set()

            event = self.filesystem.setcontents_async(
                path=staging_context_path,
                data=context,
                progress_callback=pbar.update,
                finished_callback=pbar.finish,
                error_callback=handle_exception)

            # Hold up, let's wait until the upload finishes
            event.wait()

            # Close and clear up the tmp context
            logger.debug("Cleaning up local context %s", context_path)
            context.close()
            os.unlink(context_path)

            # Check to see if we caught any exceptions while uploading the context
            if caught_exception.is_set():
                raise TaskContextException(
                    "Exception raised while uploading context")

            build_task.context = context_filename
        else:
            build_task.dockerfile = dockerfile.build()

        # Configure properties on the docker daemon
        if self.docker_host:
            build_task.daemon.docker_host = self.docker_host
        if self.insecure_registries:
            for registry in [self.pull_registry, self.push_registry]:
                if registry:
                    build_task.daemon.insecure_registries.append(registry)

        # Pull out the repository from the dockerfile
        try:
            build_task.image.repository = dockerfile.get(
                "REPOSITORY", [self.repository]).next()[0]
        except (StopIteration, IndexError):
            raise ValueError("No REPOSITORY given for %s", path)

        # Pull out the registry from the dockerfile
        try:
            registry = self.push_registry.split(":")
            build_task.image.registry.hostname = registry[0]
            if len(registry) > 1:
                build_task.image.registry.port = int(registry[1])
        except ValueError:
            raise ValueError("Failed to parse REGISTRY in %s", path)

        # Add any tags
        build_task.image.tag.extend(tags)

        # Define the mesos task
        task = mesos_pb2.TaskInfo()
        task.name = "%s/%s" % (":".join(registry), build_task.image.repository)
        task.task_id.value = task_id
        task.slave_id.value = offer.slave_id.value

        # Create the executor
        args = []
        if self.verbose:
            args.append("--verbose")

        task.executor.executor_id.value = task_id
        task.executor.command.value = "${MESOS_SANDBOX:-${MESOS_DIRECTORY}}/%s/bin/portainer %s build-executor" % (
            os.path.basename(
                self.executor_uri).rstrip(".tar.gz"), " ".join(args))

        if self.container_image:
            task.executor.container.type = mesos_pb2.ContainerInfo.DOCKER
            task.executor.container.docker.image = self.container_image
            task.executor.container.docker.privileged = True

        task.executor.name = "build"
        task.executor.source = "build %s" % (task.name)

        # Configure the mesos executor with the portainer executor uri
        portainer_executor = task.executor.command.uris.add()
        portainer_executor.value = self.executor_uri

        if staging_context_path:
            # Add the docker context
            uri = task.executor.command.uris.add()
            uri.value = os.path.join(self.staging_uri, staging_context_path)
            uri.extract = False

        task.data = build_task.SerializeToString()
        task.executor.data = task.data

        # Build up the resources we require
        cpu_resource = task.resources.add()
        cpu_resource.name = "cpus"
        cpu_resource.type = mesos_pb2.Value.SCALAR
        cpu_resource.scalar.value = cpu

        mem_resource = task.resources.add()
        mem_resource.name = "mem"
        mem_resource.type = mesos_pb2.Value.SCALAR
        mem_resource.scalar.value = mem

        self.task_ids[task_id] = build_task

        logger.info("Prepared task %s to build %s", task_id, path)
        logger.debug("%s", build_task)

        return task