def copy_closures(self, configs_path, include, exclude, max_concurrent_copy): """Copy the closure of each machine configuration to the corresponding machine.""" def worker(m): if not should_do(m, include, exclude): return m.logger.log("copying closure...") m.new_toplevel = os.path.realpath(configs_path + "/" + m.name) if not os.path.exists(m.new_toplevel): raise Exception("can't find closure of machine ‘{0}’".format(m.name)) m.copy_closure_to(m.new_toplevel) nixops.parallel.run_tasks( nr_workers=max_concurrent_copy, tasks=self.active.itervalues(), worker_fun=worker) self.logger.log(ansi_success("{0}> closures copied successfully".format(self.name), outfile=self.logger._log_file))
def _deploy(self, dry_run=False, build_only=False, create_only=False, copy_only=False, evaluate_only=False, include=[], exclude=[], check=False, kill_obsolete=False, allow_reboot=False, allow_recreate=False, force_reboot=False, max_concurrent_copy=5, sync=True, always_activate=False, repair=False, dry_activate=False): """Perform the deployment defined by the deployment specification.""" self.evaluate_active(include, exclude, kill_obsolete) if evaluate_only: return # Assign each resource an index if it doesn't have one. for r in self.active_resources.itervalues(): if r.index == None: r.index = self._get_free_resource_index() # FIXME: Logger should be able to do coloring without the need # for an index maybe? r.logger.register_index(r.index) self.logger.update_log_prefixes() # Start or update the active resources. Non-machine resources # are created first, because machines may depend on them # (e.g. EC2 machines depend on EC2 key pairs or EBS volumes). # FIXME: would be nice to have a more fine-grained topological # sort. if not dry_run and not build_only: for r in self.active_resources.itervalues(): defn = self.definitions[r.name] if r.get_type() != defn.get_type(): raise Exception("the type of resource ‘{0}’ changed from ‘{1}’ to ‘{2}’, which is currently unsupported" .format(r.name, r.get_type(), defn.get_type())) r._created_event = threading.Event() r._errored = False def worker(r): try: if not should_do(r, include, exclude): return # Sleep until all dependencies of this resource have # been created. deps = r.create_after(self.active_resources.itervalues(), self.definitions[r.name]) for dep in deps: dep._created_event.wait() # !!! Should we print a message here? if dep._errored: r._errored = True return # Now create the resource itself. if not r.creation_time: r.creation_time = int(time.time()) r.create(self.definitions[r.name], check=check, allow_reboot=allow_reboot, allow_recreate=allow_recreate) if is_machine(r): # The first time the machine is created, # record the state version. We get it from # /etc/os-release, rather than from the # configuration's state.systemVersion # attribute, because the machine may have been # booted from an older NixOS image. if not r.state_version: os_release = r.run_command("cat /etc/os-release", capture_stdout=True) match = re.search('VERSION_ID="([0-9]+\.[0-9]+).*"', os_release) if match: r.state_version = match.group(1) r.log("setting state version to {0}".format(r.state_version)) else: r.warn("cannot determine NixOS version") r.wait_for_ssh(check=check) r.generate_vpn_key(check=check) except: r._errored = True raise finally: r._created_event.set() nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=worker) if create_only: return # Build the machine configurations. if dry_run: self.build_configs(dry_run=dry_run, repair=repair, include=include, exclude=exclude) return # Record configs_path in the state so that the ‘info’ command # can show whether machines have an outdated configuration. self.configs_path = self.build_configs(repair=repair, include=include, exclude=exclude) if build_only: return # Copy the closures of the machine configurations to the # target machines. self.copy_closures(self.configs_path, include=include, exclude=exclude, max_concurrent_copy=max_concurrent_copy) if copy_only: return # Active the configurations. self.activate_configs(self.configs_path, include=include, exclude=exclude, allow_reboot=allow_reboot, force_reboot=force_reboot, check=check, sync=sync, always_activate=always_activate, dry_activate=dry_activate) if dry_activate: return # Trigger cleanup of resources, e.g. disks that need to be detached etc. Needs to be # done after activation to make sure they are not in use anymore. def cleanup_worker(r): if not should_do(r, include, exclude): return # Now create the resource itself. r.after_activation(self.definitions[r.name]) nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=cleanup_worker) self.logger.log(ansi_success("{0}> deployment finished successfully".format(self.name), outfile=self.logger._log_file))
def _deploy(self, dry_run=False, build_only=False, create_only=False, copy_only=False, include=[], exclude=[], check=False, kill_obsolete=False, allow_reboot=False, allow_recreate=False, force_reboot=False, max_concurrent_copy=5, sync=True, always_activate=False, repair=False): """Perform the deployment defined by the deployment specification.""" self.evaluate_active(include, exclude, kill_obsolete) # Assign each resource an index if it doesn't have one. for r in self.active_resources.itervalues(): if r.index == None: r.index = self._get_free_resource_index() # FIXME: Logger should be able to do coloring without the need # for an index maybe? r.logger.register_index(r.index) self.logger.update_log_prefixes() # Start or update the active resources. Non-machine resources # are created first, because machines may depend on them # (e.g. EC2 machines depend on EC2 key pairs or EBS volumes). # FIXME: would be nice to have a more fine-grained topological # sort. if not dry_run and not build_only: for r in self.active_resources.itervalues(): defn = self.definitions[r.name] if r.get_type() != defn.get_type(): raise Exception("the type of resource ‘{0}’ changed from ‘{1}’ to ‘{2}’, which is currently unsupported" .format(r.name, r.get_type(), defn.get_type())) r._created_event = threading.Event() r._errored = False def worker(r): try: if not should_do(r, include, exclude): return # Sleep until all dependencies of this resource have # been created. deps = r.create_after(self.active_resources.itervalues(), self.definitions[r.name]) for dep in deps: dep._created_event.wait() # !!! Should we print a message here? if dep._errored: r._errored = True return # Now create the resource itself. if not r.creation_time: r.creation_time = int(time.time()) r.create(self.definitions[r.name], check=check, allow_reboot=allow_reboot, allow_recreate=allow_recreate) if is_machine(r): # The first time the machine is created, # record the state version. We get it from # /etc/os-release, rather than from the # configuration's state.systemVersion # attribute, because the machine may have been # booted from an older NixOS image. if not r.state_version: os_release = r.run_command("cat /etc/os-release", capture_stdout=True) match = re.search('VERSION_ID="([0-9]+\.[0-9]+)\..*"', os_release) if match: r.state_version = match.group(1) r.wait_for_ssh(check=check) r.generate_vpn_key(check=check) except: r._errored = True raise finally: r._created_event.set() nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=worker) if create_only: return # Build the machine configurations. if dry_run: self.build_configs(dry_run=True, repair=repair, include=include, exclude=exclude) return # Record configs_path in the state so that the ‘info’ command # can show whether machines have an outdated configuration. self.configs_path = self.build_configs(repair=repair, include=include, exclude=exclude) if build_only: return # Copy the closures of the machine configurations to the # target machines. self.copy_closures(self.configs_path, include=include, exclude=exclude, max_concurrent_copy=max_concurrent_copy) if copy_only: return # Active the configurations. self.activate_configs(self.configs_path, include=include, exclude=exclude, allow_reboot=allow_reboot, force_reboot=force_reboot, check=check, sync=sync, always_activate=always_activate) # Trigger cleanup of resources, e.g. disks that need to be detached etc. Needs to be # done after activation to make sure they are not in use anymore. def cleanup_worker(r): if not should_do(r, include, exclude): return # Now create the resource itself. r.after_activation(self.definitions[r.name]) nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=cleanup_worker) self.logger.log(ansi_success("{0}> deployment finished successfully".format(self.name), outfile=self.logger._log_file))
def success(self, msg): self.log(ansi_success(msg, outfile=self.main_logger._log_file))