Пример #1
0
 def __init__(self, cache):
     super(DiskVolume, self).__init__()
     self._cache = cache
     self._path = config.get(NAME, "path")
     raise_error_if(not self._path, "volume path not configured")
     fs.makedirs(self._path)
     self._upload = config.getboolean(NAME, "upload", True)
     self._download = config.getboolean(NAME, "download", True)
Пример #2
0
 def diff(self):
     diff = self.diff_unchecked()
     dlim = config.getsize("git", "maxdiffsize", "1M")
     raise_error_if(
         len(diff) > dlim,
         "git patch for '{}' exceeds configured size limit of {} bytes - actual size {}"
         .format(self.path, dlim, len(diff)))
     return diff
Пример #3
0
 def __init__(self, cache):
     super(FtpStorage, self).__init__()
     self._cache = cache
     self._uri = config.get(NAME, "host")
     raise_error_if(not self._uri, "ftp URI not configured")
     self._path = config.get(NAME, "path", "")
     self._upload = config.getboolean(NAME, "upload", True)
     self._download = config.getboolean(NAME, "download", True)
     self._tls = config.getboolean(NAME, "tls", False)
     self._disabled = False
Пример #4
0
Файл: http.py Проект: srand/jolt
 def __init__(self, cache):
     super(Http, self).__init__()
     self._cache = cache
     self._uri = config.get(NAME, "uri")
     raise_error_if(not self._uri, "HTTP URI not configured")
     if self._uri[-1] != "/":
         self._uri += "/"
     self._upload = config.getboolean(NAME, "upload", True)
     self._download = config.getboolean(NAME, "download", True)
     self._disabled = False
Пример #5
0
def load_or_set(file_or_str):
    if fs.path.exists(file_or_str):
        _config.add_file("cli", file_or_str)
        _config.load()
    else:
        key_value = file_or_str.split("=", 1)
        raise_error_if(len(key_value) <= 1, "syntax error in configuration: '{}'".format(file_or_str))
        section_key = key_value[0].split(".", 1)
        raise_error_if(len(section_key) <= 1, "syntax error in configuration: '{}'".format(file_or_str))
        _config.set(section_key[0], section_key[1], key_value[1], alias="cli")
Пример #6
0
Файл: cli.py Проект: srand/jolt
def _list(ctx, task=None, all=False, reverse=None):
    """
    List all tasks, or dependencies of a task.

    By default, when no TASK is specified, all known task names
    are listed in alphabetical order.

    When a TASK is specified, only direct dependencies of that task
    are listed. Use -a to also list its indirect dependencies.

    Multiple TASK names are allowed.
    """

    raise_error_if(not task and reverse, "TASK required with --reverse")

    registry = TaskRegistry.get()

    if not task:
        classes = registry.get_task_classes()
        for task in sorted(classes, key=lambda x: x.name):
            if task.name:
                print(task.name)
        return

    task = [utils.stable_task_name(t) for t in task]
    reverse = [utils.stable_task_name(t) for t in utils.as_list(reverse or [])]

    try:
        dag = graph.GraphBuilder(registry,
                                 ctx.obj["manifest"]).build(task,
                                                            influence=False)
    except JoltError as e:
        raise e
    except Exception:
        raise_error(
            "an exception occurred during task dependency evaluation, see log for details"
        )

    task = reverse or task
    nodes = dag.select(lambda graph, node: node.short_qualified_name in task or
                       node.qualified_name in task)
    nodes = list(nodes)
    iterator = dag.predecessors if reverse else dag.successors

    tasklist = set()
    while nodes:
        node = nodes.pop()
        for task in iterator(node):
            if all and task.short_qualified_name not in tasklist:
                new_node = dag.get_task(task.qualified_name)
                nodes.append(new_node)
            tasklist.add(task.short_qualified_name)

    for task in sorted(list(tasklist)):
        print(task)
Пример #7
0
 def __init__(self):
     self._server = config.get("email", "server")
     self._to = config.get("email", "to")
     self._from = config.get("email", "from", "jolt@localhost")
     self._subject = config.get("email", "subject", "Jolt Build Report")
     self._stylesheet = config.get(
         "email", "stylesheet",
         fs.path.join(fs.path.dirname(__file__), "email.xslt"))
     self._artifact = config.get("email", "artifact")
     self._failure = config.getboolean("email", "on_failure", True)
     self._success = config.getboolean("email", "on_success", True)
     raise_error_if(not self._server, "email.server not configured")
     raise_error_if(not self._server, "email.to not configured")
Пример #8
0
def new_git(url, path, relpath, refspecs=None):
    refspecs = utils.as_list(refspecs or [])
    try:
        git = _gits[path]
        raise_error_if(git.url != url,
                       "multiple git repositories required at {}", relpath)
        raise_error_if(
            git.refspecs != refspecs,
            "conflicting refspecs detected for git repository at  {}", relpath)
        return git
    except Exception:
        git = _gits[path] = GitRepository(url, path, relpath, refspecs)
        return git
Пример #9
0
 def topological_nodes(self):
     with self._mutex:
         G = self.clone()
         S = G.roots
         L = []
         while S:
             for n in S:
                 L.append(n)
                 G.remove_node(n)
             S = G.roots
         if len(G.nodes) > 0:
             log.debug("[GRAPH] Graph has cycles between these nodes:")
             for node in G.nodes:
                 log.debug("[GRAPH]   " + node.short_qualified_name)
             raise_error_if(len(G.nodes) > 0, "graph has cycles")
         return L
Пример #10
0
 def clone(self):
     log.info("Cloning into {0}", self.path)
     if fs.path.exists(self.path):
         with self.tools.cwd(self.path):
             self.tools.run(
                 "git init && git remote add origin {} && git fetch",
                 self.url,
                 output_on_error=True)
     else:
         self.tools.run("git clone {0} {1}",
                        self.url,
                        self.path,
                        output_on_error=True)
     raise_error_if(not fs.path.exists(self._git_folder()),
                    "git: failed to clone repository '{0}'", self.relpath)
     self._init_repo()
Пример #11
0
 def __init__(self,
              plugin="telemetry",
              uri=None,
              local=True,
              network=True,
              queued=True,
              started=True,
              failed=True,
              finished=True):
     self._uri = uri or config.get(plugin, "uri", uri)
     self._network = config.getboolean(plugin, "network", network)
     self._local = config.getboolean(plugin, "local", local)
     self._queued = config.getboolean(plugin, "queued", queued)
     self._started = config.getboolean(plugin, "started", started)
     self._failed = config.getboolean(plugin, "failed", failed)
     self._finished = config.getboolean(plugin, "finished", finished)
     raise_error_if(not self._uri, "telemetry.uri not configured")
Пример #12
0
def _list(ctx, task=None, reverse=None):
    """
    List all tasks, or dependencies of a task.

    """

    raise_error_if(not task and reverse, "TASK required with --reverse")

    registry = TaskRegistry.get()

    if not task:
        classes = registry.get_task_classes()
        classes += registry.get_test_classes()
        for task in sorted(classes, key=lambda x: x.name):
            if task.name:
                print(task.name)
        return

    task = [utils.stable_task_name(t) for t in task]
    reverse = [utils.stable_task_name(t) for t in utils.as_list(reverse or [])]

    try:
        dag = graph.GraphBuilder(registry,
                                 ctx.obj["manifest"]).build(task,
                                                            influence=False)
    except JoltError as e:
        raise e
    except Exception:
        raise_error(
            "an exception occurred during task dependency evaluation, see log for details"
        )

    task = reverse or task
    nodes = dag.select(lambda graph, node: node.short_qualified_name in task or
                       node.qualified_name in task)

    tasklist = set()
    iterator = dag.predecessors if reverse else dag.successors

    for node in nodes:
        for task in iterator(node):
            tasklist.add(task.short_qualified_name)

    for task in sorted(list(tasklist)):
        print(task)
Пример #13
0
    def _get_auth(self):
        service = config.get(NAME, "keyring.service")
        if not service:
            return None, None

        username = config.get(NAME, "keyring.username")
        if not username:
            username = input(NAME + " username: "******"no username configured for " + NAME)
            config.set(NAME, "keyring.username", username)
            config.save()

        password = config.get(
            NAME, "keyring.password") or keyring.get_password(NAME, username)
        if not password:
            password = getpass.getpass(NAME + " password: "******"no password in keyring for " + NAME)
            keyring.set_password(service, username, password)
        return username, password
Пример #14
0
def getsize(section, key, default=None, alias=None):
    units = {"B": 1, "K": 1024, "M": 1024**2, "G": 1024**3, "T": 1024**4}
    value = get(section, key, default=None, alias=alias)
    if value is None:
        if type(default) == int:
            return default
        else:
            value = str(default)
    value = value.strip()
    value = value.split()
    if len(value) == 1 and value[0][-1] in units:
        size, unit = value[0][:-1], value[0][-1]
    else:
        raise_error_if(
            len(value) != 2,
            "config: size '{2}' invalid for '{0}.{1}', expected '<size> <unit>'", value, section, key)
        size, unit = value[0], value[1]
    raise_error_if(
        unit not in units,
        "config: unit invalid for '{0}.{1}', expected [B,K,M,G,T]", section, key)
    return int(size) * units[unit]
Пример #15
0
 def get_parameters(self, task):
     registry = TaskRegistry()
     registry.add_task_class(Jolt)
     acache = ArtifactCache.get()
     env = JoltEnvironment(cache=acache)
     gb = GraphBuilder(registry, JoltManifest())
     dag = gb.build(["jolt"])
     task = dag.select(lambda graph, task: True)
     assert len(task) == 1, "too many selfdeploy tasks found"
     task = task[0]
     if not acache.is_available_remotely(task):
         factory = LocalExecutorFactory()
         executor = LocalExecutor(factory, task, force_upload=True)
         executor.run(env)
     jolt_url = acache.location(task)
     raise_error_if(not jolt_url, "failed to deploy jolt to a remote cache")
     return {
         "jolt_url": jolt_url,
         "jolt_identity": task.identity[:8],
         "jolt_requires": config.get("selfdeploy", "requires", "")
     }
Пример #16
0
def symlink(src, dest, *args, **kwargs):
    if os.name == "nt":
        # Try to use junctions first.
        try:
            import _winapi
            _winapi.CreateJunction(src, dest)
            return
        except KeyboardInterrupt as e:
            raise e
        except Exception:
            # Ok, probably linking a file and not a directory
            # trying a regular symlink.
            try:
                os.symlink(src, dest, *args, **kwargs)
            except OSError as e:
                raise_error_if(
                    "symbolic link privilege not held" in str(e),
                    "Permission denied while attempting to create a symlink: {}\n"
                    "Please ensure the 'Create symbolic links' right is granted "
                    "to your user in the 'Local Security Policy'.", dest)
                raise e
    else:
        os.symlink(src, dest, *args, **kwargs)
Пример #17
0
Файл: cli.py Проект: srand/jolt
 def _print_key(section, opt):
     value = config.get(section, opt, alias=alias)
     raise_error_if(value is None, "no such key: {}".format(key))
     print("{} = {}".format(key, value))
Пример #18
0
Файл: cli.py Проект: srand/jolt
def _config(ctx, list, delete, global_, user, key, value):
    """
    Configure Jolt.

    You can query/set/replace/unset configuration keys with this command.
    Key strings are constructed from the configuration section and the
    option separated by a dot.

    There are tree different configuration sources:

       - A global configuration file

       - A user configuration file

       - Temporary configuration passed on the command line.

    When reading, the values are read from all configuration sources.
    The options --global and --user can be used to tell the command to read
    from only one of the sources. If a configuration key is available from
    multiple sources, temporary CLI configuration has priority followed by
    the user configuration file and lastly the global configuration file.

    When writing, the new values are written to the user configuration by default.
    The options --global and --user can be used to tell the command to write
    to only one of the sources.

    When removing keys, the values are removed from all sources.
    The options --global and --user can be used to restrict removal to one of
    the sources.

    To assign a value to a key:

      $ jolt config jolt.default all   # Change name of the default task

    To list existing keys:

      $ jolt config -l                 # List all existing keys

      $ jolt config -l -g              # List keys in the global config file

      $ jolt config jolt.colors        # Display the value of a key.

    To delete an existing key:

      $ jolt config -d jolt.colors

    To pass temporary configuration:

      $ jolt -c jolt.colors=true config -l

    """

    if delete and not key:
        raise click.UsageError("--delete requires KEY")

    if not key and not list and not key:
        print(ctx.get_help())
        sys.exit(1)

    if global_ and user:
        raise click.UsageError("--global and --user are mutually exclusive")

    alias = None

    if global_:
        alias = "global"
    if user:
        alias = "user"

    def _print_key(section, opt):
        value = config.get(section, opt, alias=alias)
        raise_error_if(value is None, "no such key: {}".format(key))
        print("{} = {}".format(key, value))

    def _print_section(section):
        print("{}".format(section))

    if list:
        for section, option, value in config.items(alias):
            if option:
                print("{}.{} = {}".format(section, option, value))
            else:
                print(section)
    elif delete:
        raise_error_if(config.delete(key, alias) <= 0, "no such key: {}", key)
        config.save()
    elif key:
        section, opt = config.split(key)
        if value:
            raise_error_if(opt is None,
                           "invalid configuration key: {}".format(key))
            config.set(section, opt, value, alias)
            try:
                config.save()
            except Exception as e:
                log.exception()
                raise_error("failed to write configuration file: {}".format(e))
        else:
            if opt:
                _print_key(section, opt)
            else:
                _print_section(section)
Пример #19
0
Файл: cli.py Проект: srand/jolt
def build(ctx, task, network, keep_going, default, local, no_download,
          no_upload, download, upload, worker, force, salt, copy, debug,
          result, jobs):
    """
    Build task artifact.

    TASK is the name of the task to execute. It is optionally followed by a colon and
    parameter value assignments. Assignments are separated by commas. Example:

       taskname:param1=value1,param2=value2

    Default parameter values can be overridden for any task in the dependency tree
    with --default. DEFAULT is a qualified task name, just like TASK, but parameter
    assignments change default values.

    By default, a task is executed locally and the resulting artifact is stored
    in the local artifact cache. If an artifact is already available in the cache,
    no execution takes place. Artifacts are identified with a hash digest,
    constructed from hashing task attributes.

    When remote cache providers are configured, artifacts may be downloaded from and/or
    uploaded to the remote cache as execution progresses. Several options exist to control
    the behavior, such as --local which disables all remote caches.

    Distributed task execution is enabled by passing the --network option. Tasks are then
    distributed to and executed by a pool of workers, if one has been configured.

    Rebuilds can be forced with either --force or --salt. --force rebuilds the requested
    task, but not its dependencies. --salt affects the entire dependency tree. Both add
    an extra attribute to the task hash calculation in order to taint the identity and
    induce a cache miss. In both cases, existing intermediate files in build directories
    are removed before execution starts.

    """
    raise_error_if(network and local,
                   "The -n and -l flags are mutually exclusive")

    raise_error_if(network and debug,
                   "The -g and -n flags are mutually exclusive")

    raise_error_if(
        no_download and download,
        "The --download and --no-download flags are mutually exclusive")

    raise_error_if(
        no_upload and upload,
        "The --upload and --no-upload flags are mutually exclusive")

    duration = utils.duration()

    task = list(task)
    task = [utils.stable_task_name(t) for t in task]

    if network:
        _download = config.getboolean("network", "download", True)
        _upload = config.getboolean("network", "upload", True)
    else:
        _download = config.getboolean("jolt", "download", True)
        _upload = config.getboolean("jolt", "upload", True)

    if local:
        _download = False
        _upload = False
    else:
        if no_download:
            _download = False
        if no_upload:
            _upload = False
        if download:
            _download = True
        if upload:
            _upload = True

    options = JoltOptions(network=network,
                          local=local,
                          download=_download,
                          upload=_upload,
                          keep_going=keep_going,
                          default=default,
                          worker=worker,
                          debug=debug,
                          salt=salt,
                          jobs=jobs)

    acache = cache.ArtifactCache.get(options)

    executors = scheduler.ExecutorRegistry.get(options)
    if worker:
        log.set_worker()
        log.verbose("Local build as a worker")
        strategy = scheduler.WorkerStrategy(executors, acache)
    elif network:
        log.verbose("Distributed build as a user")
        strategy = scheduler.DistributedStrategy(executors, acache)
    else:
        log.verbose("Local build as a user")
        strategy = scheduler.LocalStrategy(executors, acache)

    hooks.TaskHookRegistry.get(options)
    registry = TaskRegistry.get(options)

    for params in default:
        registry.set_default_parameters(params)

    manifest = ctx.obj["manifest"]

    for mb in manifest.builds:
        for mt in mb.tasks:
            task.append(mt.name)
        for mt in mb.defaults:
            registry.set_default_parameters(mt.name)

    if force:
        for goal in task:
            registry.get_task(goal, manifest=manifest).taint = uuid.uuid4()

    gb = graph.GraphBuilder(registry, manifest, options, progress=True)
    dag = gb.build(task)

    gp = graph.GraphPruner(strategy)
    dag = gp.prune(dag)

    goal_tasks = dag.goals
    goal_task_duration = 0

    queue = scheduler.TaskQueue(strategy)

    try:
        if not dag.has_tasks():
            return

        progress = log.progress(
            "Progress",
            dag.number_of_tasks(filterfn=lambda t: not t.is_resource()),
            " tasks",
            estimates=False,
            debug=debug)

        with progress:
            while dag.has_tasks():
                # Find all tasks ready to be executed
                leafs = dag.select(lambda graph, task: task.is_ready())

                # Order the tasks by their weights to improve build times
                leafs.sort(key=lambda x: x.weight)

                while leafs:
                    task = leafs.pop()
                    queue.submit(acache, task)

                task, error = queue.wait()

                if not task:
                    dag.debug()
                    break
                elif task.is_goal() and task.duration_running:
                    goal_task_duration += task.duration_running.seconds

                if not task.is_resource():
                    progress.update(1)

                if not keep_going and error is not None:
                    queue.abort()
                    raise error

        if dag.failed:
            log.error("List of failed tasks")
            for failed in dag.failed:
                log.error("- {}", failed.log_name.strip("()"))
            raise_error("no more tasks could be executed")

        for goal in goal_tasks:
            if acache.is_available_locally(goal):
                with acache.get_artifact(goal) as artifact:
                    log.info("Location: {0}", artifact.path)
                    if copy:
                        artifact.copy("*",
                                      utils.as_dirpath(
                                          fs.path.join(
                                              workdir,
                                              click.format_filename(copy))),
                                      symlinks=True)
    except KeyboardInterrupt:
        print()
        log.warning("Interrupted by user")
        try:
            queue.abort()
            sys.exit(1)
        except KeyboardInterrupt:
            print()
            log.warning("Interrupted again, exiting")
            _exit(1)
    finally:
        log.info("Total execution time: {0} {1}", str(duration),
                 str(queue.duration_acc) if network else '')
        if result:
            with report.update() as manifest:
                manifest.duration = str(goal_task_duration)
                manifest.write(result)
Пример #20
0
Файл: cli.py Проект: srand/jolt
def cli(ctx, verbose, extra_verbose, config_file, debugger, profile, force,
        salt, debug, network, local, keep_going, jobs):
    """
    A task execution tool.

    When invoked without any commands and arguments, Jolt by default tries
    to execute and build the artifact of a task called `default`. To build
    artifacts of other tasks use the build subcommand.

    The Jolt command line interface is hierarchical. One set of options
    can be passed to the top-level command and a different set of options
    to the subcommands, simultaneously. For example, verbose output is
    a top-level option while forced rebuild is a build command option.
    They may combined like this:

      $ jolt --verbose build --force taskname

    Most build command options are available also at the top-level when
    build is invoked implicitly for the default task.

    """

    global debug_enabled
    debug_enabled = debugger

    log.verbose("Jolt command: {}",
                " ".join([fs.path.basename(sys.argv[0])] + sys.argv[1:]))
    log.verbose("Jolt host: {}", environ.get("HOSTNAME", "localhost"))
    log.verbose("Jolt install path: {}", fs.path.dirname(__file__))

    if ctx.invoked_subcommand in ["config"]:
        # Don't attempt to load any task recipes as they might require
        # plugins that are not yet configured.
        return

    if ctx.invoked_subcommand is None:
        build = ctx.command.get_command(ctx, "build")

    manifest = JoltManifest()
    utils.call_and_catch(manifest.parse)
    manifest.process_import()
    ctx.obj["manifest"] = manifest

    if manifest.version:
        from jolt.version_utils import requirement, version
        req = requirement(manifest.version)
        ver = version(__version__)
        raise_error_if(not req.satisfied(ver),
                       "this project requires Jolt version {} (running {})",
                       req, __version__)

    loader = JoltLoader.get()
    tasks = loader.load()
    for cls in tasks:
        TaskRegistry.get().add_task_class(cls)

    if ctx.invoked_subcommand in ["build", "clean"] and loader.joltdir:
        ctx.obj["workspace_lock"] = utils.LockFile(
            fs.path.join(loader.joltdir, "build"), log.info,
            "Workspace is locked by another process, please wait...")
        atexit.register(ctx.obj["workspace_lock"].close)

    # If no command is given, we default to building the default task.
    # If the default task doesn't exist, help is printed inside build().
    if ctx.invoked_subcommand is None:
        task = config.get("jolt", "default", "default")
        taskname, _ = utils.parse_task_name(task)
        if TaskRegistry.get().get_task_class(taskname) is not None:
            ctx.invoke(build,
                       task=[task],
                       force=force,
                       salt=salt,
                       debug=debug,
                       network=network,
                       local=local,
                       keep_going=keep_going,
                       jobs=jobs)
        else:
            print(cli.get_help(ctx))
            sys.exit(1)
Пример #21
0
 def __init__(self):
     self._path = config.get("symlinks", "path", "artifacts")
     raise_error_if(not self._path, "symlinks.path not configured")
Пример #22
0
 def __init__(self, uri=None):
     uri = config.get("dashboard", "uri", "http://dashboard")
     error.raise_error_if(not uri, "dashboard.uri not configured")
     super().__init__(plugin="dashboard", uri=uri + "/api/v1/tasks", local=False)
Пример #23
0
 def __init__(self):
     self._uri = config.get("logstash", "http.uri")
     self._failed_enabled = config.getboolean("logstash", "failed", False)
     self._finished_enabled = config.getboolean("logstash", "finished",
                                                False)
     raise_error_if(not self._uri, "logstash.http.uri not configured")
Пример #24
0
def register(factory):
    raise_error_if(not issubclass(factory, LoaderFactory),
                   "{} is not a LoaderFactory", factory.__name__)
    _loaders.append(factory)
Пример #25
0
    def load(self):
        raise_error_if(self.source is not None, "recipe already loaded: {}",
                       self.path)

        with open(self.path) as f:
            self.source = f.read()
Пример #26
0
    def save(self):
        raise_error_if(self.source is None, "recipe source unknown: {}",
                       self.path)

        with open(self.path, "w") as f:
            f.write(self.source)
Пример #27
0
def register(cls):
    raise_error_if(not issubclass(cls, YamlTaskBuilder),
                   "{} is not a YamlTaskBuilder", cls.__name__)
    _builders[cls.name] = cls()