def sudo_write_file(filename, contents, mode=0o644): """Write (or overwrite) file as root. USE WITH EXTREME CARE. Runs an atomic update using non-interactive `sudo`. This will fail if it needs to prompt for a password. When running in a snap or devel mode, this function calls `atomic_write` directly. :type contents: `bytes`. """ from provisioningserver.config import is_dev_environment if not isinstance(contents, bytes): raise TypeError("Content must be bytes, got: %r" % (contents,)) if snap.running_in_snap(): atomic_write(contents, filename, mode=mode) else: maas_write_file = get_library_script_path("maas-write-file") command = _with_dev_python(maas_write_file, filename, "%.4o" % mode) if not is_dev_environment(): command = sudo(command) proc = Popen(command, stdin=PIPE) stdout, stderr = proc.communicate(contents) if proc.returncode != 0: raise ExternalProcessError(proc.returncode, command, stderr)
def _applyConfiguration(self, configuration): """Configure the proxy server. :param configuration: The configuration object obtained from `_getConfiguration`. """ service = service_monitor.getServiceByName(self.service_name) if configuration.is_rack and not configuration.is_region: if not configuration.enabled: # Proxy should be off and remain off. service.off("not enabled") return service_monitor.ensureService(self.service_name) # Proxy enabled; configure it. d = deferToThread(self._configure, configuration) # Ensure that the service is on. service.on() if snap.running_in_snap(): # XXX: andreserl 2016-05-09 bug=1687620. When running in a # snap, supervisord tracks services. It does not support # reloading. Instead, we need to restart the service. d.addCallback( callOut, service_monitor.restartService, self.service_name ) else: d.addCallback( callOut, service_monitor.reloadService, self.service_name ) return d else: # Proxy is managed by the region. service.any("managed by the region")
def get_dns_default_controls(): """Include the default RNDC controls (default RNDC key on port 953)?""" if running_in_snap(): # The default controls don't work in a confined snap, since it # implicitly requires access to /etc/bind return False setting = os.getenv("MAAS_DNS_DEFAULT_CONTROLS", "1") return setting == "1"
def get_config_v4( template_name: str, global_dhcp_snippets: Sequence[dict], failover_peers: Sequence[dict], shared_networks: Sequence[dict], hosts: Sequence[dict], omapi_key: str, ) -> str: """Return a DHCP config file based on the supplied parameters. :param template_name: Template file name: `dhcpd.conf.template` for the IPv4 template. :return: A full configuration, as a string. """ template = load_template("dhcp", template_name) dhcp_socket = get_maas_data_path("dhcpd.sock") # Helper functions to stuff into the template namespace. helpers = { "oneline": normalise_whitespace, "commalist": normalise_any_iterable_to_comma_list, "quoted_commalist": normalise_any_iterable_to_quoted_comma_list, "running_in_snap": snap.running_in_snap(), } for shared_network in shared_networks: interface = shared_network.get("interface", None) for subnet in shared_network["subnets"]: rack_ip = get_rack_ip_for_subnet( 4, subnet["subnet_cidr"], interface ) if rack_ip is not None: subnet["next_server"] = rack_ip subnet["bootloader"] = compose_conditional_bootloader( False, rack_ip, subnet.get("disabled_boot_architectures") ) ntp_servers = subnet["ntp_servers"] # Is a list. ntp_servers_ipv4, ntp_servers_ipv6 = _get_addresses(*ntp_servers) subnet["ntp_servers_ipv4"] = ", ".join(ntp_servers_ipv4) subnet["ntp_servers_ipv6"] = ", ".join(ntp_servers_ipv6) try: return template.substitute( global_dhcp_snippets=global_dhcp_snippets, hosts=hosts, failover_peers=failover_peers, shared_networks=shared_networks, omapi_key=omapi_key, dhcp_helper=(get_path("/usr/sbin/maas-dhcp-helper")), dhcp_socket=dhcp_socket, **helpers, ) except (KeyError, NameError) as error: raise DHCPConfigError( "Failed to render DHCP configuration." ) from error
def create_gnupg_home(tftp_root=None): """Upgrade hook: create maas user's GNUPG home directory.""" gpghome = get_maas_user_gpghome() if not os.path.isdir(gpghome): makedirs(gpghome) if os.geteuid() == 0 and not snap.running_in_snap(): # Make the maas user the owner of its GPG home. Do this only if # running as root; otherwise it would probably fail. We want to # be able to start a development instance without triggering that. check_call(["chown", "maas:maas", gpghome])
def get_ip_addr(): """Returns this system's local IP address information as a dictionary. :raises:ExternalProcessError: if IP address information could not be gathered. """ cmd_path = get_resources_bin_path() command = [cmd_path] if running_in_snap() else ["sudo", cmd_path] output = call_and_check(command) ifaces = parse_lxd_networks(json.loads(output)["networks"]) _update_interface_type(ifaces) _annotate_with_proc_net_bonding_original_macs(ifaces) return ifaces
def _applyConfiguration(self, configuration): """Configure the HTTP server. :param configuration: The configuration object obtained from `_getConfiguration`. """ d = deferToThread(self._configure, configuration.upstream_http) # XXX: blake_r 2018-06-12 bug=1687620. When running in a snap, # supervisord tracks services. It does not support reloading. # Instead, we need to restart the service. if snap.running_in_snap(): d.addCallback(lambda _: service_monitor.restartService("http")) else: d.addCallback(lambda _: service_monitor.reloadService("http")) return d
def get_maas_cert_tuple(): """Return a 2-tuple with certificate and private key paths. The format is the same used by python-requests.""" if running_in_snap(): cert_dir = SnapPaths.from_environ().common / "certificates" private_key = cert_dir / "maas.key" certificate = cert_dir / "maas.crt" else: private_key = Path( get_tentative_data_path("/etc/maas/certificates/maas.key")) certificate = Path( get_tentative_data_path("/etc/maas/certificates/maas.crt")) if not private_key.exists() or not certificate.exists(): return None return str(certificate), str(private_key)
def write_config( allowed_cidrs, peer_proxies=None, prefer_v4_proxy=False, maas_proxy_port=8000, ): """Write the proxy configuration.""" if peer_proxies is None: peer_proxies = [] snap_paths = snap.SnapPaths.from_environ() context = { "modified": str(datetime.date.today()), "fqdn": socket.getfqdn(), "cidrs": allowed_cidrs, "running_in_snap": snap.running_in_snap(), "snap_path": snap_paths.snap, "snap_data_path": snap_paths.data, "snap_common_path": snap_paths.common, "dns_v4_first": prefer_v4_proxy, "maas_proxy_port": maas_proxy_port, } formatted_peers = [] for peer in peer_proxies: formatted_peers.append({ "address": urlparse(peer).hostname, "port": urlparse(peer).port }) context["peers"] = formatted_peers template_path = locate_template("proxy", MAAS_PROXY_CONF_TEMPLATE) template = tempita.Template.from_filename(template_path, encoding="UTF-8") try: content = template.substitute(context) except NameError as error: raise ProxyConfigFail(*error.args) # Squid prefers ascii. content = content.encode("ascii") target_path = get_proxy_config_path() atomic_write(content, target_path, overwrite=True, mode=0o644)
def get_maas_common_command(): """Return path to the maas-rack command. In production mode this will just return 'maas-rack', but in development mode it will return the path for the current development environment. """ # Avoid circular imports. from provisioningserver.config import is_dev_environment if is_dev_environment(): from maastesting import dev_root return os.path.join(dev_root, "bin/maas-common") elif snap.running_in_snap(): # there's no maas-common in the snap as maas-rack is always present return os.path.join( snap.SnapPaths.from_environ().snap, "bin/maas-rack" ) else: return get_path("usr/lib/maas/maas-common")
def sudo_delete_file(filename): """Delete file as root. USE WITH EXTREME CARE. Runs an atomic update using non-interactive `sudo`. This will fail if it needs to prompt for a password. When running in a snap this function calls `atomic_write` directly. """ from provisioningserver.config import is_dev_environment if snap.running_in_snap(): atomic_delete(filename) else: maas_delete_file = get_library_script_path("maas-delete-file") command = _with_dev_python(maas_delete_file, filename) if not is_dev_environment(): command = sudo(command) proc = Popen(command) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProcessError(proc.returncode, command, stderr)
def get_config_v6( template_name: str, global_dhcp_snippets: Sequence[dict], failover_peers: Sequence[dict], shared_networks: Sequence[dict], hosts: Sequence[dict], omapi_key: str, ) -> str: """Return a DHCP config file based on the supplied parameters. :param template_name: Template file name: `dhcpd6.conf.template` for the IPv6 template. :return: A full configuration, as a string. """ template = load_template("dhcp", template_name) # Helper functions to stuff into the template namespace. helpers = { "oneline": normalise_whitespace, "commalist": normalise_any_iterable_to_comma_list, "quoted_commalist": normalise_any_iterable_to_quoted_comma_list, "running_in_snap": snap.running_in_snap(), } shared_networks = _process_network_parameters_v6( failover_peers, shared_networks ) try: return template.substitute( global_dhcp_snippets=global_dhcp_snippets, hosts=hosts, failover_peers=failover_peers, shared_networks=shared_networks, omapi_key=omapi_key, **helpers, ) except (KeyError, NameError) as error: raise DHCPConfigError( "Failed to render DHCP configuration." ) from error
def _performServiceAction(self, service, action): """Start or stop the service.""" lock = self._getServiceLock(service.name) if snap.running_in_snap(): exec_action = self._execSupervisorServiceAction service_name = service.snap_service_name else: exec_action = self._execSystemDServiceAction service_name = service.service_name extra_opts = getattr(service, "%s_extra_opts" % action, None) exit_code, output, error = yield lock.run(exec_action, service_name, action, extra_opts=extra_opts) if exit_code != 0: error_msg = "Service '%s' failed to %s: %s" % ( service.name, action, error, ) maaslog.error(error_msg) raise ServiceActionError(error_msg)
def write_config(write_local, forwarders=None, port=None): """Write the syslog configuration.""" context = { "user": "******", "group": "maas", "drop_priv": True, "work_dir": get_syslog_workdir_path(), "log_dir": get_syslog_log_path(), "write_local": write_local, "port": port if port else 5247, "forwarders": (sorted(forwarders, key=itemgetter("name")) if forwarders is not None else []), } # Running inside the snap rsyslog is root. if snap.running_in_snap(): context["user"] = "******" context["group"] = "root" context["drop_priv"] = False template_path = locate_template("syslog", MAAS_SYSLOG_CONF_TEMPLATE) template = tempita.Template.from_filename(template_path, encoding="UTF-8") try: content = template.substitute(context) except NameError as error: raise SyslogConfigFail(*error.args) # Squid prefers ascii. content = content.encode("ascii") target_path = get_syslog_config_path() atomic_write(content, target_path, overwrite=True, mode=0o644)
def proxy_update_config(reload_proxy=True): """Regenerate the proxy configuration file.""" @transactional def _write_config(): allowed_subnets = Subnet.objects.filter(allow_proxy=True) cidrs = [subnet.cidr for subnet in allowed_subnets] config = Config.objects.get_configs([ "http_proxy", "maas_proxy_port", "use_peer_proxy", "prefer_v4_proxy", "enable_http_proxy", ]) kwargs = { "prefer_v4_proxy": config["prefer_v4_proxy"], "maas_proxy_port": config["maas_proxy_port"], } if (config["enable_http_proxy"] and config["http_proxy"] and config["use_peer_proxy"]): kwargs["peer_proxies"] = [config["http_proxy"]] write_config(cidrs, **kwargs) if is_proxy_enabled(): d = deferToDatabase(_write_config) if reload_proxy: # XXX: andreserl 2016-05-09 bug=1687620. When running in a snap, # supervisord tracks services. It does not support reloading. # Instead, we need to restart the service. if snap.running_in_snap(): d.addCallback(lambda _: service_monitor.restartService( "proxy", if_on=True)) else: d.addCallback(lambda _: service_monitor.reloadService( "proxy", if_on=True)) return d else: return succeed(None)
def get_running_version() -> MAASVersion: """Return the version for the running MAAS.""" git_rev = None revno = 0 version_str = "" if snap.running_in_snap(): version_str = snap.get_snap_version().version else: deb_versions = deb.get_deb_versions_info() if deb_versions: version_str = deb_versions.current.version if not version_str: version_str = _get_version_from_python_package() git_rev = _get_maas_repo_hash() revno = _get_maas_repo_commit_count() maas_version = MAASVersion.from_string(version_str) if (not maas_version.git_rev) and git_rev: maas_version = dataclasses.replace(maas_version, git_rev=git_rev) if (not maas_version.revno) and revno: maas_version = dataclasses.replace(maas_version, revno=revno) return maas_version
def test_in_snap(self): self.patch(os, "environ", {"SNAP": factory.make_name()}) self.assertTrue(running_in_snap())
def test_not_in_snap(self): self.patch(os, "environ", {}) self.assertFalse(running_in_snap())
def sudo(command_args): """Wrap the command arguments in a sudo command, if not in debug mode.""" if snap.running_in_snap(): return command_args else: return ["sudo", "-n", *command_args]
def _loadServiceState(self, service): """Return service status.""" if snap.running_in_snap(): return self._loadSupervisorServiceState(service) else: return self._loadSystemDServiceState(service)
def runscripts(scripts, url, creds, tmpdir, post_process_hook=None): in_snap = running_in_snap() total_scripts = len(scripts) current_script = 1 failed_scripts = [] out_dir = os.path.join(tmpdir, "out") os.makedirs(out_dir) for script_name in sorted(scripts.keys()): signal_wrapper( url, creds, "WORKING", "Starting %s [%d/%d]" % (script_name, current_script, total_scripts), ) if script_name == LXD_OUTPUT_NAME: # Execute the LXD binary directly as we are already on the # rack controller and don't need to download it. script_path = get_resources_bin_path() else: script_path = os.path.join(os.path.dirname(__file__), script_name) combined_path = os.path.join(out_dir, script_name) stdout_name = "%s.out" % script_name stdout_path = os.path.join(out_dir, stdout_name) stderr_name = "%s.err" % script_name stderr_path = os.path.join(out_dir, stderr_name) result_name = "%s.yaml" % script_name result_path = os.path.join(out_dir, result_name) env = copy.deepcopy(os.environ) env["OUTPUT_COMBINED_PATH"] = combined_path env["OUTPUT_STDOUT_PATH"] = stdout_path env["OUTPUT_STDERR_PATH"] = stderr_path env["RESULT_PATH"] = result_path timeout = 60 command = [script_path] if in_snap else ["sudo", "-E", script_path] try: proc = Popen(command, stdin=DEVNULL, stdout=PIPE, stderr=PIPE, env=env) capture_script_output(proc, combined_path, stdout_path, stderr_path, timeout) except OSError as e: if isinstance(e.errno, int) and e.errno != 0: exit_status = e.errno else: # 2 is the return code bash gives when it can't execute. exit_status = 2 result = str(e).encode() if result == b"": result = b"Unable to execute script" files = {script_name: result, stderr_name: result} signal_wrapper( url, creds, "WORKING", files=files, exit_status=exit_status, error="Failed to execute %s [%d/%d]: %d" % (script_name, current_script, total_scripts, exit_status), ) failed_scripts.append(script_name) except TimeoutExpired: files = { script_name: open(combined_path, "rb").read(), stdout_name: open(stdout_path, "rb").read(), stderr_name: open(stderr_path, "rb").read(), } signal_wrapper( url, creds, "TIMEDOUT", files=files, error="Timeout(%s) expired on %s [%d/%d]" % (str(timeout), script_name, current_script, total_scripts), ) failed_scripts.append(script_name) else: if post_process_hook is not None: post_process_hook(script_name, combined_path, stdout_path, stderr_path) files = { script_name: open(combined_path, "rb").read(), stdout_name: open(stdout_path, "rb").read(), stderr_name: open(stderr_path, "rb").read(), } if os.path.exists(result_path): files[result_name] = open(result_path, "rb").read() signal_wrapper( url, creds, "WORKING", files=files, exit_status=proc.returncode, error="Finished %s [%d/%d]: %d" % ( script_name, current_script, total_scripts, proc.returncode, ), ) if proc.returncode != 0: failed_scripts.append(script_name) current_script += 1 return failed_scripts