def backup(self): ipaddr = helpers.get_astute_dict()["ADMIN_NETWORK"]["ipaddress"] results, _ = docker.run_in_container( "postgres", [ "sudo", "-u", "postgres", "psql", self.db, "--tuples-only", "-c", self.sql ], stdout=subprocess.PIPE) results = results.strip() if not results: return rows = results.split("\n") already_backuped = set() for line in rows: data = json.loads(line) for value in self._get_values_list(data): if ipaddr in value['uri']: path = urlparse.urlsplit(value['uri']).path dir_name = path.lstrip("/").split('/', 1)[0] if dir_name in already_backuped: continue already_backuped.add(dir_name) path = os.path.join(self.path, dir_name) self.archive.add(path, os.path.join(self.name, dir_name))
def restore(self): domain = helpers.get_astute_dict()["DNS_DOMAIN"] dirname = "/var/log/remote/" pairs = [] with fuel_client.set_auth_context(self.context): for node in objects.Node.get_all(): fqdn = node.data["meta"]["system"]["fqdn"] # log creation not required for nodes in bootstrap if fqdn.startswith('bootstrap'): continue pairs.append((fqdn, node.data["ip"])) subprocess.call(["systemctl", "stop", "rsyslog"]) try: for fqdn, ip_addr in pairs: if not fqdn.endswith(domain): continue ip_addr_path = os.path.join(dirname, ip_addr) fqdn_path = os.path.join(dirname, fqdn) if os.path.islink(ip_addr_path): continue if os.path.isdir(ip_addr_path): os.rename(ip_addr_path, fqdn_path) else: os.mkdir(fqdn_path) os.symlink(fqdn, ip_addr_path) finally: subprocess.call(["systemctl", "start", "rsyslog"])
def _get_mirrors(self): ipaddr = helpers.get_astute_dict()["ADMIN_NETWORK"]["ipaddress"] dirs_to_backup = set() for data in self._get_attributes(): for value in self._get_values_list(data): if ipaddr in value['uri']: path = urlparse.urlsplit(value['uri']).path dir_name = path.lstrip("/").split('/', 1)[0] dirs_to_backup.add(dir_name) return list(dirs_to_backup)
def restore(self): _, tmp_file_name = tempfile.mkstemp(dir="/etc/fuel", prefix=".astute.yaml.octane") shutil.copy("/etc/fuel/astute.yaml", tmp_file_name) try: data = helpers.get_astute_dict() data["FUEL_ACCESS"]["password"] = self.context.password with open("/etc/fuel/astute.yaml", "w") as current: yaml.safe_dump(data, current, default_flow_style=False) puppet.apply_host() finally: shutil.move(tmp_file_name, "/etc/fuel/astute.yaml")
def preupgrade_compute(release_id, node_ids): nodes = [objects.node.Node(node_id) for node_id in node_ids] release = objects.Release(release_id) check_sanity(nodes, release) master_ip = helpers.get_astute_dict()["ADMIN_NETWORK"]['ipaddress'] repos = get_repos(release, master_ip) packages = get_package_list(release) for node in nodes: change_repositories(node, repos) stop_compute_services(node) apt.upgrade_packages(node, packages)
def restore(self): _, tmp_file_name = tempfile.mkstemp( dir="/etc/fuel", prefix=".astute.yaml.octane") shutil.copy("/etc/fuel/astute.yaml", tmp_file_name) try: data = helpers.get_astute_dict() data["FUEL_ACCESS"]["password"] = self.context.password with open("/etc/fuel/astute.yaml", "w") as current: yaml.safe_dump(data, current, default_flow_style=False) puppet.apply_host() finally: shutil.move(tmp_file_name, "/etc/fuel/astute.yaml")
def preupgrade_compute_with_graph(release_id, node_ids): nodes = [objects.node.Node(node_id) for node_id in node_ids] release = objects.Release(release_id) check_sanity(nodes, release) master_ip = helpers.get_astute_dict()["ADMIN_NETWORK"]['ipaddress'] repos = get_repos(release, master_ip) packages = get_package_list(release) env_id = nodes[0].env.id env = objects.environment.Environment(env_id) # Add following data to cluster attributes: # - new repositories # - list of packages to be updated add_upgrade_attrs_to_settings(env, repos, packages) deploy.upload_graph(env_id, "orig") deploy.execute_graph_and_wait('preupgrade-compute', env_id, node_ids)
def _create_links_on_remote_logs(self): domain = helpers.get_astute_dict()["DNS_DOMAIN"] dirname = "/var/log/docker-logs/remote/" with fuel_client.set_auth_context(self.context): pairs = [(n.data["meta"]["system"]["fqdn"], n.data["ip"]) for n in node.Node.get_all()] docker.run_in_container("rsyslog", ["service", "rsyslog", "stop"]) try: for fqdn, ip_addr in pairs: if not fqdn.endswith(domain): continue ip_addr_path = os.path.join(dirname, ip_addr) fqdn_path = os.path.join(dirname, fqdn) if os.path.islink(ip_addr_path): continue if os.path.isdir(ip_addr_path): os.rename(ip_addr_path, fqdn_path) else: os.mkdir(fqdn_path) os.symlink(fqdn, ip_addr_path) finally: docker.run_in_container("rsyslog", ["service", "rsyslog", "start"])
def backup(self): ipaddr = helpers.get_astute_dict()["ADMIN_NETWORK"]["ipaddress"] results, _ = docker.run_in_container("postgres", [ "sudo", "-u", "postgres", "psql", self.db, "--tuples-only", "-c", self.sql ], stdout=subprocess.PIPE) results = results.strip() if not results: return rows = results.split("\n") already_backuped = set() for line in rows: data = json.loads(line) for value in self._get_values_list(data): if ipaddr in value['uri']: path = urlparse.urlsplit(value['uri']).path dir_name = path.lstrip("/").split('/', 1)[0] if dir_name in already_backuped: continue already_backuped.add(dir_name) path = os.path.join(self.path, dir_name) self.archive.add(path, os.path.join(self.name, dir_name))
def test_get_astute_dict(mocker): mock_load = mocker.patch("octane.util.helpers.load_yaml") data = helpers.get_astute_dict() mock_load.assert_called_once_with("/etc/fuel/astute.yaml") assert data is mock_load.return_value
def get_default_profile(): astute = helpers.get_astute_dict() default_profile = astute.get("bootstrap_profile", "ubuntu_bootstrap") return default_profile