def restore(self): domain = helpers.get_astute_dict()["DNS_DOMAIN"] dirname = "/var/log/remote/" pairs = [] with fuel_client.set_auth_context(self.context): for node in objects.Node.get_all(): fqdn = node.data["meta"]["system"]["fqdn"] # log creation not required for nodes in bootstrap if fqdn.startswith('bootstrap'): continue pairs.append((fqdn, node.data["ip"])) subprocess.call(["systemctl", "stop", "rsyslog"]) try: for fqdn, ip_addr in pairs: if not fqdn.endswith(domain): continue ip_addr_path = os.path.join(dirname, ip_addr) fqdn_path = os.path.join(dirname, fqdn) if os.path.islink(ip_addr_path): continue if os.path.isdir(ip_addr_path): os.rename(ip_addr_path, fqdn_path) else: os.mkdir(fqdn_path) os.symlink(fqdn, ip_addr_path) finally: subprocess.call(["systemctl", "start", "rsyslog"])
def update_cpio(img_path, dir_path=None): tmp_dir = tempfile.mkdtemp(dir=dir_path) try: with subprocess.popen(["gunzip", "-c", img_path], stdout=subprocess.PIPE) as proc: subprocess.call(["cpio", "-id"], stdin=proc.stdout, cwd=tmp_dir) yield tmp_dir tmp_dir_len = len(tmp_dir) with tempfile.NamedTemporaryFile(dir=dir_path) as new_img: with subprocess.popen(["cpio", "--format", "newc", "-o"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tmp_dir) as cpio: with subprocess.popen(["gzip", "-c"], stdin=cpio.stdout, stdout=new_img, cwd=tmp_dir): for path, dirs, files in os.walk(tmp_dir): for name in itertools.chain(dirs, files): p_name = os.path.join(path, name)[tmp_dir_len + 1:] cpio.stdin.write("{0}\n".format(p_name)) cpio.stdin.close() shutil.move(new_img.name, img_path) new_img.delete = False finally: shutil.rmtree(tmp_dir)
def prepare(): if not os.path.isdir(magic_consts.FUEL_CACHE): os.makedirs(magic_consts.FUEL_CACHE) subprocess.call(["yum", "-y", "install"] + magic_consts.PACKAGES) subprocess.call(["pip", "install", "wheel"]) # From patch_all_containers apply_patches()
def move_nodes(env, nodes): env_id = env.data['id'] for node in nodes: node_id = node.data['id'] subprocess.call( ["fuel2", "env", "move", "node", str(node_id), str(env_id)]) wait_for_nodes(nodes, "discover")
def restore(self): def get_release_key(release): return (release['version'], release['name']) with open(magic_consts.OPENSTACK_FIXTURES) as f: fixtures = yaml.load(f) loaded_existing_releases = self.__get_request("/api/v1/releases/") existing_releases = set(map(get_release_key, loaded_existing_releases)) releases = self.extend_fixtures(fixtures) for release in releases: key = get_release_key(release) if key in existing_releases: LOG.debug("Skipping to upload of the already existing " "release: %s - %s", release['name'], release['version']) continue self.__post_request("/api/v1/releases/", release) subprocess.call( [ "fuel", "release", "--sync-deployment-tasks", "--dir", "/etc/puppet/", ], env=self.context.get_credentials_env())
def update_cpio(img_path, dir_path=None): tmp_dir = tempfile.mkdtemp(dir=dir_path) try: with subprocess.popen( ["gunzip", "-c", img_path], stdout=subprocess.PIPE) as proc: subprocess.call( ["cpio", "-id"], stdin=proc.stdout, cwd=tmp_dir) yield tmp_dir tmp_dir_len = len(tmp_dir) with tempfile.NamedTemporaryFile(dir=dir_path) as new_img: with subprocess.popen( ["cpio", "--format", "newc", "-o"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tmp_dir) as cpio: with subprocess.popen( ["gzip", "-c"], stdin=cpio.stdout, stdout=new_img, cwd=tmp_dir): for path, dirs, files in os.walk(tmp_dir): for name in itertools.chain(dirs, files): p_name = os.path.join(path, name)[tmp_dir_len + 1:] cpio.stdin.write("{0}\n".format(p_name)) cpio.stdin.close() shutil.move(new_img.name, img_path) new_img.delete = False finally: shutil.rmtree(tmp_dir)
def systems_edit_profile(profile_name, new_profile_name): out = subprocess.call_output( ["cobbler", "system", "find", "--profile", profile_name]) system_names = out.strip().split() for system_name in system_names: subprocess.call(["cobbler", "system", "edit", "--name", system_name, "--profile", new_profile_name])
def restore(self): # NOTE(akscram): Ubuntu systems created in the 7.0 release # use the 'bootstrap' profile that was removed since 9.0. with cobbler.rename_bootstrap_profile_for_systems(): super(CobblerArchivator, self).restore() subprocess.call(["systemctl", "stop", "cobblerd"]) puppet.apply_task("cobbler")
def patch_img(): root_img = os.path.join(magic_consts.ACTIVE_IMG_PATH, "root.squashfs") active_metadata_path = os.path.join( magic_consts.ACTIVE_IMG_PATH, "metadata.yaml") patch_file = os.path.join(magic_consts.CWD, "patches/fuel_agent/patch") path_archname_pairs = [(os.path.join(magic_consts.ACTIVE_IMG_PATH, p), p) for p in ["vmlinuz", "initrd.img"]] with temp_util.temp_dir() as temp_dir: patched_img = os.path.join(temp_dir, "root.squashfs") patched_metadata_path = os.path.join(temp_dir, "metadata.yaml") _patch_squashfs(root_img, patched_img, patch_file) _mk_metadata(active_metadata_path, patched_metadata_path, patched_img) path_archname_pairs.append((patched_img, "root.squashfs")) path_archname_pairs.append((patched_metadata_path, "metadata.yaml")) with tempfile.NamedTemporaryFile() as archive_file: with tarfile.open(name=archive_file.name, mode="w:gz") as archive: for path, archname in path_archname_pairs: archive.add(path, archname) LOG.info("Import image using fuel-bootstrap") subprocess.call(["fuel-bootstrap", "import", archive_file.name]) LOG.info("Activate image using `fuel-bootstrap activate`")
def apply_all_tasks(): try: subprocess.call([magic_consts.PUPPET_APPLY_TASKS_SCRIPT]) except subprocess.CalledProcessError as exc: LOG.error("Cannot apply Puppet state on host: %s", exc) raise
def prepare(): if not os.path.isdir(magic_consts.FUEL_CACHE): os.makedirs(magic_consts.FUEL_CACHE) subprocess.call(["yum", "-y", "install"] + magic_consts.PACKAGES) # From patch_all_containers apply_patches() docker.run_in_container("nailgun", ["pkill", "-f", "wsgi"]) patch_initramfs()
def move_nodes(env, nodes): env_id = env.data['id'] for node in nodes: node_id = node.data['id'] subprocess.call( ["fuel2", "env", "move", "node", str(node_id), str(env_id)]) LOG.info("Nodes provision started. Please wait...") wait_for_nodes(nodes, "provisioned")
def install_octane_nailgun(): octane_nailgun = os.path.join(magic_consts.CWD, '..', 'octane_nailgun') subprocess.call(["python", "setup.py", "bdist_wheel"], cwd=octane_nailgun) wheel = glob.glob(os.path.join(octane_nailgun, 'dist', '*.whl'))[0] subprocess.call(["dockerctl", "copy", wheel, "nailgun:/root/"]) docker.run_in_container("nailgun", ["pip", "install", "-U", "/root/" + os.path.basename(wheel)]) docker.run_in_container("nailgun", ["pkill", "-f", "wsgi"])
def _patch_squashfs(root_img, patched_img, *patches): with temp_util.temp_dir() as patch_dir: LOG.info("unsquash root image to temporary directory") subprocess.call(["unsquashfs", "-f", "-d", patch_dir, root_img]) LOG.info("apply patch to root image") patch.patch_apply(patch_dir, patches) LOG.info("create new root.squashfs image") subprocess.call(["mksquashfs", patch_dir, patched_img])
def restore(self): dump = self.archive.extractfile(self.filename) subprocess.call(["systemctl", "stop"] + self.services) subprocess.call(["sudo", "-u", "postgres", "dropdb", "--if-exists", self.db]) with subprocess.popen(["sudo", "-u", "postgres", "psql"], stdin=subprocess.PIPE) as process: shutil.copyfileobj(dump, process.stdin) puppet.apply_task(self.db)
def restore(self): super(SshArchivator, self).restore() subprocess.call( ["fuel-bootstrap", "build", "--activate"], env=self.context.get_credentials_env(), stderr_log_level=logging.INFO, ) # Remove old images cause they were created with old ssh keys pair fuel_bootstrap.delete_not_active_images()
def patch_puppet(revert=False): direction = "-R" if revert else "-N" puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet") for d in os.listdir(puppet_patch_dir): d = os.path.join(puppet_patch_dir, d) if not os.path.isdir(d): continue with open(os.path.join(d, "patch")) as patch: subprocess.call(["patch", direction, "-p3"], stdin=patch, cwd=magic_consts.PUPPET_DIR)
def apply_host(): cmd = ['puppet', 'apply', '-d', '-v'] path = os.path.join(magic_consts.PUPPET_DIR, 'nailgun', 'examples', 'host-only.pp') cmd.append(path) try: subprocess.call(cmd) except subprocess.CalledProcessError as exc: LOG.error("Cannot apply Puppet state on host: %s", exc.message) raise
def stop_container(container): _container_action(container, "stop") container_id = subprocess.call_output([ 'docker', 'ps', '--filter', 'name={0}'.format(container), '--format', '{{.ID}}' ]).strip() if container_id: subprocess.call(["docker", "stop", container_id])
def restore(self): super(NailgunPluginsArchivator, self).restore() if os.path.exists(self.path): subprocess.call([ "fuel", "plugins", "--sync", "--user", self.context.user, "--password", self.context.password ])
def patch_initramfs(): backup = magic_consts.BOOTSTRAP_INITRAMFS + '.bkup' chroot = tempfile.mkdtemp() try: os.rename(magic_consts.BOOTSTRAP_INITRAMFS, backup) subprocess.call("gunzip -c {0} | cpio -id".format(backup), shell=True, cwd=chroot) patch_fuel_agent(chroot) with open(magic_consts.BOOTSTRAP_INITRAMFS, "wb") as f: subprocess.call("find | grep -v '^\.$' | cpio --format newc -o" " | gzip -c", shell=True, stdout=f, cwd=chroot) finally: shutil.rmtree(chroot)
def patch_initramfs(): bootstrap = '/var/www/nailgun/bootstrap' initramfs = os.path.join(bootstrap, 'initramfs.img') backup = initramfs + '.bkup' chroot = os.path.join(bootstrap, 'initramfs') os.rename(initramfs, backup) os.makedirs(chroot) subprocess.call("gunzip -c {0} | cpio -id".format(backup), shell=True, cwd=chroot) patch_fuel_agent(chroot) with open(initramfs, "wb") as f: subprocess.call("find | grep -v '^\.$' | cpio --format newc -o" " | gzip -c", shell=True, stdout=f, cwd=chroot)
def patch_apply(cwd, patches, revert=False): for path in patches: with open(path, 'rb') as patch: try: subprocess.call(["patch", "-R", "-p1"], stdin=patch, cwd=cwd) except subprocess.CalledProcessError: if not revert: pass else: raise if not revert: patch.seek(0) subprocess.call(["patch", "-N", "-p1"], stdin=patch, cwd=cwd)
def restore(self): dump = self.archive.extractfile(self.filename) subprocess.call([ "systemctl", "stop", "docker-{0}.service".format(self.db) ]) docker.stop_container(self.db) docker.run_in_container( "postgres", ["sudo", "-u", "postgres", "dropdb", "--if-exists", self.db], ) with docker.in_container("postgres", ["sudo", "-u", "postgres", "psql"], stdin=subprocess.PIPE) as process: process.stdin.write(dump.read()) subprocess.call([ "systemctl", "start", "docker-{0}.service".format(self.db) ]) docker.start_container(self.db)
def post_restore_action(self, context): data, _ = docker.run_in_container( "nailgun", ["cat", "/usr/share/fuel-openstack-metadata/openstack.yaml"], stdout=subprocess.PIPE) fixtures = yaml.load(data) base_release_fields = fixtures[0]['fields'] for fixture in fixtures[1:]: release = helpers.merge_dicts( base_release_fields, fixture['fields']) self.__post_data_to_nailgun( "/api/v1/releases/", release, context.password) subprocess.call([ "fuel", "release", "--sync-deployment-tasks", "--dir", "/etc/puppet/", ])
def patch_puppet(revert=False): puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet") for d in os.listdir(puppet_patch_dir): d = os.path.join(puppet_patch_dir, d) if not os.path.isdir(d): continue with open(os.path.join(d, "patch")) as patch: try: subprocess.call(["patch", "-R", "-p3"], stdin=patch, cwd=magic_consts.PUPPET_DIR) except subprocess.CalledProcessError: if not revert: pass else: raise if not revert: patch.seek(0) subprocess.call(["patch", "-N", "-p3"], stdin=patch, cwd=magic_consts.PUPPET_DIR)
def get_docker_container_names(**filtering): cmd = ["docker", "ps", '--all'] for key, value in filtering.iteritems(): cmd.append("--filter") cmd.append("{0}={1}".format(key, value)) if not get_docker_container_names.use_without: try: stdout, _ = subprocess.call(cmd + ['--format="{{.Names}}"'], stdout=subprocess.PIPE) except subprocess.CalledProcessError: get_docker_container_names.use_without = True else: full_names = stdout.strip().split() if get_docker_container_names.use_without: stdout, _ = subprocess.call(cmd, stdout=subprocess.PIPE) lines = stdout.strip().split("\n") name_idx = lines[0].index("NAMES") full_names = [l[name_idx:].split(' ', 1)[0] for l in lines[1:]] return [n.rsplit("-", 1)[-1] for n in full_names]
def apply_task(task): filename = '{0}.pp'.format(task) path = os.path.join(magic_consts.PUPPET_TASKS_DIR, filename) cmd = ['puppet', 'apply', '-d', '-v', "--color", "false", '--detailed-exitcodes', path] try: subprocess.call(cmd) except subprocess.CalledProcessError as exc: # NOTE(akscram): Detailed exit codes of puppet apply: # 0: The run succeeded with no changes or failures; the system # was already in the desired state. # 1: The run failed, or wasn't attempted due to another run # already in progress. # 2: The run succeeded, and some resources were changed. # 4: The run succeeded, and some resources failed. # 6: The run succeeded, and included both changes and failures. if exc.returncode != 2: LOG.error("Cannot apply the Puppet task: %s, %s", task, exc.message) raise
def _post_restore_action(self): data, _ = docker.run_in_container( "nailgun", ["cat", magic_consts.OPENSTACK_FIXTURES], stdout=subprocess.PIPE) fixtures = yaml.load(data) base_release_fields = fixtures[0]['fields'] for fixture in fixtures[1:]: release = helpers.merge_dicts( base_release_fields, fixture['fields']) self.__post_data_to_nailgun( "/api/v1/releases/", release, self.context.user, self.context.password) subprocess.call( [ "fuel", "release", "--sync-deployment-tasks", "--dir", "/etc/puppet/", ], env=self.context.get_credentials_env()) values = [] for line in self._run_sql_in_container( "select id, generated from attributes;"): c_id, c_data = line.split("|", 1) data = json.loads(c_data) data["deployed_before"] = {"value": True} values.append("({0}, '{1}')".format(c_id, json.dumps(data))) if values: self._run_sql_in_container( 'update attributes as a set generated = b.generated ' 'from (values {0}) as b(id, generated) ' 'where a.id = b.id;'.format(','.join(values)) ) self._create_links_on_remote_logs()
def destroyed_container(container): name = get_docker_container_name(container) subprocess.call(["dockerctl", "destroy", name]) try: yield finally: subprocess.call(["dockerctl", "start", container]) subprocess.call(["dockerctl", "check", container])
def get_docker_container_names(**filtering): cmd = [ "docker", "ps", '--all', '--format="{{.Names}}"', ] for key, value in filtering.iteritems(): cmd.append("--filter") cmd.append("{0}={1}".format(key, value)) stdout, _ = subprocess.call(cmd, stdout=subprocess.PIPE) full_names = stdout.strip().split() return [n.rsplit("-", 1)[-1] for n in full_names]
def clone_env(env_id, release): LOG.info("Cloning env %s for release %s", env_id, release.data['name']) res, _ = subprocess.call( ["fuel2", "env", "clone", "-f", "json", str(env_id), uuid.uuid4().hex, str(release.data['id'])], stdout=subprocess.PIPE, ) for kv in json.loads(res): if kv['Field'] == 'id': seed_id = kv['Value'] break else: raise Exception("Couldn't find new environment ID in fuel CLI output:" "\n%s" % res) return seed_id