def __init__(self, cache): super(DiskVolume, self).__init__() self._cache = cache self._path = config.get(NAME, "path") raise_error_if(not self._path, "volume path not configured") fs.makedirs(self._path) self._upload = config.getboolean(NAME, "upload", True) self._download = config.getboolean(NAME, "download", True)
def __init__(self, cache): super(FtpStorage, self).__init__() self._cache = cache self._uri = config.get(NAME, "host") raise_error_if(not self._uri, "ftp URI not configured") self._path = config.get(NAME, "path", "") self._upload = config.getboolean(NAME, "upload", True) self._download = config.getboolean(NAME, "download", True) self._tls = config.getboolean(NAME, "tls", False) self._disabled = False
def __init__(self, cache): super(Http, self).__init__() self._cache = cache self._uri = config.get(NAME, "uri") raise_error_if(not self._uri, "HTTP URI not configured") if self._uri[-1] != "/": self._uri += "/" self._upload = config.getboolean(NAME, "upload", True) self._download = config.getboolean(NAME, "download", True) self._disabled = False
def __init__(self): self._server = config.get("email", "server") self._to = config.get("email", "to") self._from = config.get("email", "from", "jolt@localhost") self._subject = config.get("email", "subject", "Jolt Build Report") self._stylesheet = config.get( "email", "stylesheet", fs.path.join(fs.path.dirname(__file__), "email.xslt")) self._artifact = config.get("email", "artifact") self._failure = config.getboolean("email", "on_failure", True) self._success = config.getboolean("email", "on_success", True) raise_error_if(not self._server, "email.server not configured") raise_error_if(not self._server, "email.to not configured")
def task_post_ninja_file(self, task, deps, tools): disabled = config.getboolean("ninja-cache", "disable", False) if not disabled: objcache = ninjacli.Cache( tools.builddir("ninja", task.task.incremental)) objcache.load_manifests(tools.getenv("JOLT_CACHEDIR"), tools.getenv("JOLT_CANONTASK")) objcache.save()
def task_pre_ninja_file(self, task, deps, tools): if not isinstance(task.task, ninja.CXXLibrary) or task.task.shared: return cli = fs.path.join(fs.path.dirname(__file__), "ninjacli.py") disabled = config.getboolean("ninja-cache", "disable", False) tools.setenv("CCWRAP", "{} {} -- ".format(sys.executable, cli)) tools.setenv("CXXWRAP", "{} {} -- ".format(sys.executable, cli)) tools.setenv("JOLT_CACHEDIR", cache.ArtifactCache.get().root) tools.setenv("JOLT_CANONTASK", utils.canonical(task.task.name)) tools.setenv("NINJACACHE_DISABLE", "1" if disabled else "0") tools.setenv("NINJACACHE_MAXARTIFACTS", config.getint("ninja-cache", "maxartifacts", 0)) if log.is_verbose(): tools.setenv("NINJACACHE_VERBOSE", "1")
def __init__(self, plugin="telemetry", uri=None, local=True, network=True, queued=True, started=True, failed=True, finished=True): self._uri = uri or config.get(plugin, "uri", uri) self._network = config.getboolean(plugin, "network", network) self._local = config.getboolean(plugin, "local", local) self._queued = config.getboolean(plugin, "queued", queued) self._started = config.getboolean(plugin, "started", started) self._failed = config.getboolean(plugin, "failed", failed) self._finished = config.getboolean(plugin, "finished", finished) raise_error_if(not self._uri, "telemetry.uri not configured")
def build(ctx, task, network, keep_going, default, local, no_download, no_upload, download, upload, worker, force, salt, copy, debug, result, jobs): """ Build task artifact. TASK is the name of the task to execute. It is optionally followed by a colon and parameter value assignments. Assignments are separated by commas. Example: taskname:param1=value1,param2=value2 Default parameter values can be overridden for any task in the dependency tree with --default. DEFAULT is a qualified task name, just like TASK, but parameter assignments change default values. By default, a task is executed locally and the resulting artifact is stored in the local artifact cache. If an artifact is already available in the cache, no execution takes place. Artifacts are identified with a hash digest, constructed from hashing task attributes. When remote cache providers are configured, artifacts may be downloaded from and/or uploaded to the remote cache as execution progresses. Several options exist to control the behavior, such as --local which disables all remote caches. Distributed task execution is enabled by passing the --network option. Tasks are then distributed to and executed by a pool of workers, if one has been configured. Rebuilds can be forced with either --force or --salt. --force rebuilds the requested task, but not its dependencies. --salt affects the entire dependency tree. Both add an extra attribute to the task hash calculation in order to taint the identity and induce a cache miss. In both cases, existing intermediate files in build directories are removed before execution starts. """ raise_error_if(network and local, "The -n and -l flags are mutually exclusive") raise_error_if(network and debug, "The -g and -n flags are mutually exclusive") raise_error_if( no_download and download, "The --download and --no-download flags are mutually exclusive") raise_error_if( no_upload and upload, "The --upload and --no-upload flags are mutually exclusive") duration = utils.duration() task = list(task) task = [utils.stable_task_name(t) for t in task] if network: _download = config.getboolean("network", "download", True) _upload = config.getboolean("network", "upload", True) else: _download = config.getboolean("jolt", "download", True) _upload = config.getboolean("jolt", "upload", True) if local: _download = False _upload = False else: if no_download: _download = False if no_upload: _upload = False if download: _download = True if upload: _upload = True options = JoltOptions(network=network, local=local, download=_download, upload=_upload, keep_going=keep_going, default=default, worker=worker, debug=debug, salt=salt, jobs=jobs) acache = cache.ArtifactCache.get(options) executors = scheduler.ExecutorRegistry.get(options) if worker: log.set_worker() log.verbose("Local build as a worker") strategy = scheduler.WorkerStrategy(executors, acache) elif network: log.verbose("Distributed build as a user") strategy = scheduler.DistributedStrategy(executors, acache) else: log.verbose("Local build as a user") strategy = scheduler.LocalStrategy(executors, acache) hooks.TaskHookRegistry.get(options) registry = TaskRegistry.get(options) for params in default: registry.set_default_parameters(params) manifest = ctx.obj["manifest"] for mb in manifest.builds: for mt in mb.tasks: task.append(mt.name) for mt in mb.defaults: registry.set_default_parameters(mt.name) if force: for goal in task: registry.get_task(goal, manifest=manifest).taint = uuid.uuid4() gb = graph.GraphBuilder(registry, manifest, options, progress=True) dag = gb.build(task) gp = graph.GraphPruner(strategy) dag = gp.prune(dag) goal_tasks = dag.goals goal_task_duration = 0 queue = scheduler.TaskQueue(strategy) try: if not dag.has_tasks(): return progress = log.progress( "Progress", dag.number_of_tasks(filterfn=lambda t: not t.is_resource()), " tasks", estimates=False, debug=debug) with progress: while dag.has_tasks(): # Find all tasks ready to be executed leafs = dag.select(lambda graph, task: task.is_ready()) # Order the tasks by their weights to improve build times leafs.sort(key=lambda x: x.weight) while leafs: task = leafs.pop() queue.submit(acache, task) task, error = queue.wait() if not task: dag.debug() break elif task.is_goal() and task.duration_running: goal_task_duration += task.duration_running.seconds if not task.is_resource(): progress.update(1) if not keep_going and error is not None: queue.abort() raise error if dag.failed: log.error("List of failed tasks") for failed in dag.failed: log.error("- {}", failed.log_name.strip("()")) raise_error("no more tasks could be executed") for goal in goal_tasks: if acache.is_available_locally(goal): with acache.get_artifact(goal) as artifact: log.info("Location: {0}", artifact.path) if copy: artifact.copy("*", utils.as_dirpath( fs.path.join( workdir, click.format_filename(copy))), symlinks=True) except KeyboardInterrupt: print() log.warning("Interrupted by user") try: queue.abort() sys.exit(1) except KeyboardInterrupt: print() log.warning("Interrupted again, exiting") _exit(1) finally: log.info("Total execution time: {0} {1}", str(duration), str(queue.duration_acc) if network else '') if result: with report.update() as manifest: manifest.duration = str(goal_task_duration) manifest.write(result)
def __init__(self): self._uri = config.get("logstash", "http.uri") self._failed_enabled = config.getboolean("logstash", "failed", False) self._finished_enabled = config.getboolean("logstash", "finished", False) raise_error_if(not self._uri, "logstash.http.uri not configured")