def _create_random_extension_events_dir_with_events( no_of_extensions, events_path, no_of_chars=10): if os.path.isdir(events_path): # If its a directory, get all files from that directory test_events_paths = glob.glob(os.path.join(events_path, "*")) else: test_events_paths = [events_path] extension_names = {} for i in range(no_of_extensions): # pylint: disable=unused-variable ext_name = "Microsoft.OSTCExtensions.{0}".format(''.join( random.sample(string.ascii_letters, no_of_chars))) no_of_good_events = 0 for test_events_file_path in test_events_paths: if not os.path.exists( test_events_file_path) or not os.path.isfile( test_events_file_path): continue no_of_good_events += TestExtensionTelemetryHandler._parse_file_and_count_good_events( test_events_file_path) events_dir = os.path.join(conf.get_ext_log_dir(), ext_name, EVENTS_DIRECTORY) fileutil.mkdir(events_dir) shutil.copy(test_events_file_path, events_dir) extension_names[ext_name] = no_of_good_events return extension_names
def mount_cgroups(self): try: if not os.path.exists(_cgroup_path()): fileutil.mkdir(_cgroup_path()) self.mount(device='cgroup_root', mount_point=_cgroup_path(), option="-t tmpfs", chk_err=False) elif not os.path.isdir(_cgroup_path()): logger.error("Could not mount cgroups: ordinary file at {0}".format(_cgroup_path())) return for metric_hierarchy in ['cpu,cpuacct', 'memory']: target_path = _cgroup_path(metric_hierarchy) if not os.path.exists(target_path): fileutil.mkdir(target_path) self.mount(device=metric_hierarchy, mount_point=target_path, option="-t cgroup -o {0}".format(metric_hierarchy), chk_err=False) for metric_hierarchy in ['cpu', 'cpuacct']: target_path = _cgroup_path(metric_hierarchy) if not os.path.exists(target_path): os.symlink(_cgroup_path('cpu,cpuacct'), target_path) except Exception as e: logger.error("Could not mount cgroups: {0}", ustr(e))
def test_collect_and_send_with_send_event_generating_exception( self, mock_lib_dir, *args): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_dir) with _create_monitor_handler( enabled_operations=["collect_and_send_events" ]) as monitor_handler: sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. for power in sizes: size = 2**power * 1024 self._create_extension_event(size) # This test validates that if we hit an issue while sending an event, we never send it again. with patch("azurelinuxagent.common.logger.warn") as mock_warn: with patch( "azurelinuxagent.common.protocol.wire.WireClient.send_event" ) as patch_send_event: patch_send_event.side_effect = Exception() monitor_handler.run_and_wait() self.assertEqual(1, mock_warn.call_count) self.assertEqual(0, len(os.listdir(self.event_dir)))
def test_collect_and_send_with_http_post_returning_503( self, mock_lib_dir, *_): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_dir) with _create_monitor_handler( enabled_operations=["collect_and_send_events" ]) as monitor_handler: def http_post_handler(url, _, **__): if self.is_telemetry_request(url): return MockHttpResponse( restutil.httpclient.SERVICE_UNAVAILABLE) return None protocol = monitor_handler.get_mock_wire_protocol() protocol.set_http_handlers(http_post_handler=http_post_handler) sizes = [1, 2, 3] # get the powers of 2, and multiple by 1024. for power in sizes: size = 2**power * 1024 self._create_extension_event(size) with patch("azurelinuxagent.common.logger.warn") as mock_warn: monitor_handler.run_and_wait() self.assertEqual(1, mock_warn.call_count) message = "[ProtocolError] [Wireserver Exception] [ProtocolError] [Wireserver Failed] URI http://{0}/machine?comp=telemetrydata [HTTP Failed] Status Code 503".format( protocol.get_endpoint()) self.assertIn(message, mock_warn.call_args[0][0]) self.assertEqual(0, len(os.listdir(self.event_dir)))
def save_event(self, data): if self.event_dir is None: logger.warn( "Cannot save event -- Event reporter is not initialized.") return fileutil.mkdir(self.event_dir, mode=0o700) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: existing_events.sort() oldest_files = existing_events[:-999] logger.warn("Too many files under: {0}, removing oldest".format( self.event_dir)) try: for f in oldest_files: os.remove(os.path.join(self.event_dir, f)) except IOError as e: raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: msg = "Failed to write events to file:{0}".format(e) raise EventError(msg)
def test_collect_and_send_with_call_wireserver_returns_http_error( self, mock_lib_dir, *args): mock_lib_dir.return_value = self.lib_dir fileutil.mkdir(self.event_dir) add_event(name="MonitorTests", op=WALAEventOperation.HeartBeat, is_success=True, message="Test heartbeat") with _create_monitor_handler( enabled_operations=["collect_and_send_events" ]) as monitor_handler: def http_post_handler(url, _, **__): if self.is_telemetry_request(url): return HttpError("A test exception") return None monitor_handler.get_mock_wire_protocol().set_http_handlers( http_post_handler=http_post_handler) with patch("azurelinuxagent.common.logger.warn") as mock_warn: monitor_handler.run_and_wait() self.assertEqual(1, mock_warn.call_count) self.assertEqual(0, len(os.listdir(self.event_dir)))
def save_event(self, data): if self.event_dir is None: logger.warn("Cannot save event -- Event reporter is not initialized.") return try: fileutil.mkdir(self.event_dir, mode=0o700) except (IOError, OSError) as e: msg = "Failed to create events folder {0}. Error: {1}".format(self.event_dir, ustr(e)) raise EventError(msg) try: existing_events = os.listdir(self.event_dir) if len(existing_events) >= MAX_NUMBER_OF_EVENTS: logger.periodic_warn(logger.EVERY_MINUTE, "[PERIODIC] Too many files under: {0}, current count: {1}, " "removing oldest event files".format(self.event_dir, len(existing_events))) existing_events.sort() oldest_files = existing_events[:-999] for event_file in oldest_files: os.remove(os.path.join(self.event_dir, event_file)) except (IOError, OSError) as e: msg = "Failed to remove old events from events folder {0}. Error: {1}".format(self.event_dir, ustr(e)) raise EventError(msg) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + AGENT_EVENT_FILE_EXTENSION) except (IOError, OSError) as e: msg = "Failed to write events to file: {0}".format(e) raise EventError(msg)
def mount_resource_disk(self, mount_point, fs): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/" + device mountlist = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mountlist, device) if(existing): logger.info("Resource disk {0}1 is already mounted", device) return existing fileutil.mkdir(mount_point, mode=0o755) logger.info("Detect GPT...") partition = device + "1" ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("({0}) {1}".format(device, ret[1])) if "gpt" in ret[1]: logger.info("GPT detected") logger.info("Get GPT partitions") parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] logger.info("Found more than {0} GPT partitions.", len(parts)) if len(parts) > 1: logger.info("Remove old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition: {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Create a new GPT partition using entire disk space") shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition: {0} with fstype {1}",partition,fs) shellutil.run("mkfs." + fs + " " + partition + " -F") else: logger.info("GPT not detected") logger.info("Check fstype") ret = shellutil.run_get_output("sfdisk -q -c {0} 1".format(device)) if ret[1].rstrip() == "7" and fs != "ntfs": logger.info("The partition is formatted with ntfs") logger.info("Format partition: {0} with fstype {1}",partition,fs) shellutil.run("sfdisk -c {0} 1 83".format(device)) shellutil.run("mkfs." + fs + " " + partition + " -F") logger.info("Mount resource disk") ret = shellutil.run("mount {0} {1}".format(partition, mount_point), chk_err=False) if ret: logger.warn("Failed to mount resource disk. Retry mounting") shellutil.run("mkfs." + fs + " " + partition + " -F") ret = shellutil.run("mount {0} {1}".format(partition, mount_point)) if ret: raise ResourceDiskError("({0}) {1}".format(partition, ret)) logger.info("Resource disk ({0}) is mounted at {1} with fstype {2}", device, mount_point, fs) return mount_point
def _build_test_data(cls): """ Build a dummy file structure which will be used as a foundation for the log collector tests """ cls._create_file_of_specific_size( os.path.join(cls.root_collect_dir, "waagent.log"), SMALL_FILE_SIZE) # small text file cls._create_file_of_specific_size( os.path.join(cls.root_collect_dir, "waagent.log.1"), LARGE_FILE_SIZE) # large text file cls._create_file_of_specific_size(os.path.join(cls.root_collect_dir, "waagent.log.2.gz"), SMALL_FILE_SIZE, binary=True) # small binary file cls._create_file_of_specific_size(os.path.join(cls.root_collect_dir, "waagent.log.3.gz"), LARGE_FILE_SIZE, binary=True) # large binary file mkdir(os.path.join(cls.root_collect_dir, "another_dir")) cls._create_file_of_specific_size( os.path.join(cls.root_collect_dir, "less_important_file"), SMALL_FILE_SIZE) cls._create_file_of_specific_size( os.path.join(cls.root_collect_dir, "another_dir", "least_important_file"), SMALL_FILE_SIZE)
def __create_unit_file(path, contents): parent, _ = os.path.split(path) if not os.path.exists(parent): fileutil.mkdir(parent, mode=0o755) exists = os.path.exists(path) fileutil.write_file(path, contents) _log_cgroup_info("{0} {1}", "Updated" if exists else "Created", path)
def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey if path is None: raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() crt_path = os.path.join(lib_dir, thumbprint + '.crt') if not os.path.isfile(crt_path): raise OSUtilError("Can't find {0}.crt".format(thumbprint)) pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) else: raise OSUtilError("SSH public key Fingerprint and Value are None") self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') fileutil.chowner(path, username) fileutil.chmod(path, 0o644)
def save_event(self, data): if self.event_dir is None: logger.warn("Cannot save event -- Event reporter is not initialized.") return fileutil.mkdir(self.event_dir, mode=0o700) existing_events = os.listdir(self.event_dir) if len(existing_events) >= 1000: existing_events.sort() oldest_files = existing_events[:-999] logger.warn("Too many files under: {0}, removing oldest".format(self.event_dir)) try: for f in oldest_files: os.remove(os.path.join(self.event_dir, f)) except IOError as e: raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) try: with open(filename + ".tmp", 'wb+') as hfile: hfile.write(data.encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") except IOError as e: raise EventError("Failed to write events to file:{0}", e)
def mount_cgroups(self): try: if not os.path.exists(_cgroup_path()): fileutil.mkdir(_cgroup_path()) self.mount(device='cgroup_root', mount_point=_cgroup_path(), option="-t tmpfs", chk_err=False) elif not os.path.isdir(_cgroup_path()): logger.error( "Could not mount cgroups: ordinary file at {0}".format( _cgroup_path())) return for metric_hierarchy in ['cpu,cpuacct', 'memory']: target_path = _cgroup_path(metric_hierarchy) if not os.path.exists(target_path): fileutil.mkdir(target_path) self.mount(device=metric_hierarchy, mount_point=target_path, option="-t cgroup -o {0}".format(metric_hierarchy), chk_err=False) for metric_hierarchy in ['cpu', 'cpuacct']: target_path = _cgroup_path(metric_hierarchy) if not os.path.exists(target_path): os.symlink(_cgroup_path('cpu,cpuacct'), target_path) except Exception as e: logger.error("Could not mount cgroups: {0}", ustr(e))
def deploy_ssh_pubkey(self, username, pubkey): """ Deploy authorized_key """ path, thumbprint, value = pubkey if path is None: raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) if value is not None: if not value.startswith("ssh-"): raise OSUtilError("Bad public key: {0}".format(value)) fileutil.write_file(path, value) elif thumbprint is not None: lib_dir = conf.get_lib_dir() crt_path = os.path.join(lib_dir, thumbprint + '.crt') if not os.path.isfile(crt_path): raise OSUtilError("Can't find {0}.crt".format(thumbprint)) pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) else: raise OSUtilError("SSH public key Fingerprint and Value are None") self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') fileutil.chowner(path, username) fileutil.chmod(path, 0o644)
def _write_file(self, filename, contents=None): full_name = os.path.join(self.tmp_dir, filename) fileutil.mkdir(os.path.dirname(full_name)) with open(full_name, 'w') as file_handler: data = contents if contents is not None else filename file_handler.write(data) return full_name
def __init__(self, lib_dir): self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) if not os.path.isdir(self._source): try: fileutil.mkdir(self._source, mode=0o700) except IOError as e: logger.error("{0} : {1}", self._source, e.strerror)
def download(self): self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") package = None for uri in self.pkg.uris: try: package = self.protocol.download_ext_handler_pkg(uri.uri) if package is not None: break except Exception as e: logger.warn("Error while downloading extension: {0}", e) if package is None: raise ExtensionError("Failed to download extension") self.logger.verbose("Unpack extension package") pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: fileutil.write_file(pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) except IOError as e: raise ExtensionError(u"Failed to write and unzip plugin", e) #Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) self.report_event(message="Download succeeded") self.logger.info("Initialize extension directory") #Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: raise ExtensionError(u"Failed to save HandlerManifest.json", e) #Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: raise ExtensionError(u"Failed to create status or config dir", e) #Save HandlerEnvironment.json self.create_handler_env()
def download(self): self.logger.info("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") package = None for uri in self.pkg.uris: try: package = self.protocol.download_ext_handler_pkg(uri.uri) if package is not None: break except Exception as e: logger.warn("Error while downloading extension: {0}", e) if package is None: raise ExtensionError("Failed to download extension") self.logger.info("Unpack extension package") pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: fileutil.write_file(pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) except IOError as e: raise ExtensionError(u"Failed to write and unzip plugin", e) #Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) self.report_event(message="Download succeeded") self.logger.info("Initialize extension directory") #Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: raise ExtensionError(u"Failed to save HandlerManifest.json", e) #Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: raise ExtensionError(u"Failed to create status or config dir", e) #Save HandlerEnvironment.json self.create_handler_env()
def _mkdir(self, name): directory = self.history_dir(name) try: fileutil.mkdir(directory, mode=0o700) return True except IOError as exception: logger.error("{0} : {1}".format(directory, exception.strerror)) return False
def _mkdir(self, timestamp): d = self.history_dir(timestamp) try: fileutil.mkdir(d, mode=0o700) return True except IOError as e: logger.error("{0} : {1}".format(d, e.strerror)) return False
def setUpClass(cls): AgentTestCase.setUpClass() prefix = "{0}_".format(cls.__class__.__name__) cls.tmp_dir = tempfile.mkdtemp(prefix=prefix) cls.root_collect_dir = os.path.join(cls.tmp_dir, "files_to_collect") mkdir(cls.root_collect_dir) cls._mock_constants()
def __init__(self, lib_dir): self._source = os.path.join(lib_dir, ARCHIVE_DIRECTORY_NAME) if not os.path.isdir(self._source): try: fileutil.mkdir(self._source, mode=0o700) except IOError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror)
def _mkdir(self, timestamp): d = self.history_dir(timestamp) try: fileutil.mkdir(d, mode=0o700) return True except IOError as e: logger.error("{0} : {1}".format(d, e.strerror)) return False
def __init__(self, lib_dir): self._source = lib_dir directory = os.path.join(self._source, _ARCHIVE_DIRECTORY_NAME) if not os.path.exists(directory): try: fileutil.mkdir(directory) except OSError as exception: if exception.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, exception.strerror)
def __init__(self, lib_dir): self._source = lib_dir d = os.path.join(self._source, ARCHIVE_DIRECTORY_NAME) if not os.path.exists(d): try: fileutil.mkdir(d) except OSError as e: if e.errno != errno.EEXIST: logger.error("{0} : {1}", self._source, e.strerror)
def setUp(self): AgentTestCase.setUp(self) self.lib_dir = tempfile.mkdtemp() self.event_dir = os.path.join(self.lib_dir, EVENTS_DIRECTORY) fileutil.mkdir(self.event_dir) self.log_file = tempfile.mkstemp(prefix="logfile-")[1] logger.reset_periodic()
def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) state_file = os.path.join(state_dir, "HandlerState") fileutil.write_file(state_file, handler_state) except IOError as e: self.logger.error("Failed to set state: {0}", e)
def set_handler_state(self, handler_state): state_dir = self.get_conf_dir() state_file = os.path.join(state_dir, "HandlerState") try: if not os.path.exists(state_dir): fileutil.mkdir(state_dir, mode=0o700) fileutil.write_file(state_file, handler_state) except IOError as e: fileutil.clean_ioerror(e, paths=[state_file]) self.logger.error("Failed to set state: {0}", e)
def test_del_lib_dir_files(self, distro_name, distro_version, distro_full_name, mock_conf): dirs = [ 'WALinuxAgent-2.2.26/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint', 'Extensions.1.xml', 'ExtensionsConfig.1.xml', 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', 'GoalState.2.xml', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp for d in dirs: fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) self.assertEqual(fileutil.rm_files, actions[1].func) self.assertEqual(11, len(actions[0].args)) self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) for f in actions[1].args: self.assertTrue(f[len(tmp)+1:] in files)
def test_del_lib_dir_files(self, distro_name, distro_version, distro_full_name, mock_conf): dirs = [ 'WALinuxAgent-2.2.26/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status' ] files = [ 'HostingEnvironmentConfig.xml', 'Incarnation', 'Protocol', 'SharedConfig.xml', 'WireServerEndpoint', 'Extensions.1.xml', 'ExtensionsConfig.1.xml', 'GoalState.1.xml', 'Extensions.2.xml', 'ExtensionsConfig.2.xml', 'GoalState.2.xml', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/42.settings', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerStatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/config/HandlerState', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/status/12.notstatus', 'Microsoft.Azure.Extensions.CustomScript-2.0.6/mrseq', 'WALinuxAgent-2.2.26/config/0.settings' ] tmp = tempfile.mkdtemp() mock_conf.return_value = tmp for d in dirs: fileutil.mkdir(os.path.join(tmp, d)) for f in files: fileutil.write_file(os.path.join(tmp, f), "Value") deprovision_handler = get_deprovision_handler(distro_name, distro_version, distro_full_name) warnings = [] actions = [] deprovision_handler.del_lib_dir_files(warnings, actions) deprovision_handler.del_ext_handler_files(warnings, actions) self.assertTrue(len(warnings) == 0) self.assertTrue(len(actions) == 2) self.assertEqual(fileutil.rm_files, actions[0].func) self.assertEqual(fileutil.rm_files, actions[1].func) self.assertEqual(11, len(actions[0].args)) self.assertEqual(3, len(actions[1].args)) for f in actions[0].args: self.assertTrue(os.path.basename(f) in files) for f in actions[1].args: self.assertTrue(f[len(tmp)+1:] in files)
def download(self): self.logger.info("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") package = None for uri in self.pkg.uris: try: package = self.protocol.download_ext_handler_pkg(uri.uri) except ProtocolError as e: logger.warn("Failed download extension: {0}", e) if package is None: raise ExtensionError("Failed to download extension") self.logger.info("Unpack extension package") pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: fileutil.write_file(pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) except IOError as e: raise ExtensionError(u"Failed to write and unzip plugin", e) chmod = "find {0} -type f | xargs chmod u+x".format(self.get_base_dir()) shellutil.run(chmod) self.report_event(message="Download succeeded") self.logger.info("Initialize extension directory") #Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: raise ExtensionError(u"Failed to save HandlerManifest.json", e) #Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: raise ExtensionError(u"Failed to create status or config dir", e) #Save HandlerEnvironment.json self.create_handler_env()
def download(self): self.logger.info("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") package = None for uri in self.pkg.uris: try: package = self.protocol.download_ext_handler_pkg(uri.uri) except ProtocolError as e: logger.warn("Failed download extension: {0}", e) if package is None: raise ExtensionError("Failed to download extension") self.logger.info("Unpack extension package") pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: fileutil.write_file(pkg_file, bytearray(package), asbin=True) zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) except IOError as e: raise ExtensionError(u"Failed to write and unzip plugin", e) chmod = "find {0} -type f | xargs chmod u+x".format(self.get_base_dir()) shellutil.run(chmod) self.report_event(message="Download succeeded") self.logger.info("Initialize extension directory") #Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: raise ExtensionError(u"Failed to save HandlerManifest.json", e) #Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: raise ExtensionError(u"Failed to create status or config dir", e) #Save HandlerEnvironment.json self.create_handler_env()
def mount_cgroups(): def cgroup_path(tail=""): return os.path.join(CGROUPS_FILE_SYSTEM_ROOT, tail).rstrip(os.path.sep) try: osutil = get_osutil() path = cgroup_path() if not os.path.exists(path): fileutil.mkdir(path) osutil.mount(device='cgroup_root', mount_point=path, option="-t tmpfs", chk_err=False) elif not os.path.isdir(cgroup_path()): logger.error("Could not mount cgroups: ordinary file at {0}", path) return controllers_to_mount = ['cpu,cpuacct', 'memory'] errors = 0 cpu_mounted = False for controller in controllers_to_mount: try: target_path = cgroup_path(controller) if not os.path.exists(target_path): fileutil.mkdir(target_path) osutil.mount( device=controller, mount_point=target_path, option="-t cgroup -o {0}".format(controller), chk_err=False) if controller == 'cpu,cpuacct': cpu_mounted = True except Exception as exception: errors += 1 if errors == len(controllers_to_mount): raise logger.warn("Could not mount cgroup controller {0}: {1}", controller, ustr(exception)) if cpu_mounted: for controller in ['cpu', 'cpuacct']: target_path = cgroup_path(controller) if not os.path.exists(target_path): os.symlink(cgroup_path('cpu,cpuacct'), target_path) except OSError as oe: # pylint: disable=C0103 # log a warning for read-only file systems logger.warn("Could not mount cgroups: {0}", ustr(oe)) raise except Exception as e: # pylint: disable=C0103 logger.error("Could not mount cgroups: {0}", ustr(e)) raise
def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() # Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) # Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) # Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path=conf.get_agent_log_file()) # echo the log to /dev/console if the machine will be provisioned if conf.get_logs_console() and not ProvisionHandler.is_provisioned(): self.__add_console_appender(level) if event.send_logs_to_telemetry(): logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error("Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) # Init event reporter # Note that the reporter is not fully initialized here yet. Some telemetry fields are filled with data # originating from the goal state or IMDS, which requires a WireProtocol instance. Once a protocol # has been established, those fields must be explicitly initialized using # initialize_event_logger_vminfo_common_parameters(). Any events created before that initialization # will contain dummy values on those fields. event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), event.EVENTS_DIRECTORY) event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA")
def set_handler_state(self, handler_state): state_dir = self.get_handler_state_dir() if not os.path.exists(state_dir): try: fileutil.mkdir(state_dir, 0o700) except IOError as e: self.logger.error("Failed to create state dir: {0}", e) try: state_file = os.path.join(state_dir, "state") fileutil.write_file(state_file, handler_state) except IOError as e: self.logger.error("Failed to set state: {0}", e)
def daemon(self, child_args=None): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: # Ensure the most recent SharedConfig is available # - Changes to RDMA state may not increment the goal state # incarnation number. A forced update ensures the most # current values. protocol = self.protocol_util.get_protocol() client = protocol.client if client is None or type(client) is not WireClient: raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") while self.running: self.update_handler.run_latest(child_args=child_args)
def daemon(self, child_args=None): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: # Ensure the most recent SharedConfig is available # - Changes to RDMA state may not increment the goal state # incarnation number. A forced update ensures the most # current values. protocol = self.protocol_util.get_protocol() client = protocol.client if client is None or type(client) is not WireClient: raise Exception("Attempt to setup RDMA without Wireserver") client.update_goal_state(forced=True) setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") while self.running: self.update_handler.run_latest(child_args=child_args)
def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() #Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) #Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) #Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") if conf.get_logs_console(): logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") # See issue #1035 # logger.add_logger_appender(logger.AppenderType.TELEMETRY, # logger.LogLevel.WARNING, # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error("Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) #Init event reporter event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), "events") event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA")
def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.pkg_file = None self.is_upgrade = False self.set_logger() try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file)
def __init__(self, verbose, conf_file_path=None): """ Initialize agent running environment. """ self.conf_file_path = conf_file_path self.osutil = get_osutil() #Init stdout log level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.STDOUT, level) #Init config conf_file_path = self.conf_file_path \ if self.conf_file_path is not None \ else self.osutil.get_agent_conf_file_path() conf.load_conf_from_file(conf_file_path) #Init log verbose = verbose or conf.get_logs_verbose() level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO logger.add_logger_appender(logger.AppenderType.FILE, level, path="/var/log/waagent.log") if conf.get_logs_console(): logger.add_logger_appender(logger.AppenderType.CONSOLE, level, path="/dev/console") # See issue #1035 # logger.add_logger_appender(logger.AppenderType.TELEMETRY, # logger.LogLevel.WARNING, # path=event.add_log_event) ext_log_dir = conf.get_ext_log_dir() try: if os.path.isfile(ext_log_dir): raise Exception("{0} is a file".format(ext_log_dir)) if not os.path.isdir(ext_log_dir): fileutil.mkdir(ext_log_dir, mode=0o755, owner="root") except Exception as e: logger.error( "Exception occurred while creating extension " "log directory {0}: {1}".format(ext_log_dir, e)) #Init event reporter event.init_event_status(conf.get_lib_dir()) event_dir = os.path.join(conf.get_lib_dir(), "events") event.init_event_logger(event_dir) event.enable_unhandled_err_dump("WALA")
def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.is_upgrade = False prefix = "[{0}]".format(self.get_full_name()) self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) try: fileutil.mkdir(self.get_log_dir(), mode=0o744) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file)
def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None prefix = "[{0}]".format(self.get_full_name()) self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) try: fileutil.mkdir(self.get_log_dir(), mode=0o744) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file)
def __init__(self, ext_handler, protocol): self.ext_handler = ext_handler self.protocol = protocol self.operation = None self.pkg = None self.pkg_file = None self.is_upgrade = False self.logger = None self.set_logger() try: fileutil.mkdir(self.get_log_dir(), mode=0o755) except IOError as e: self.logger.error(u"Failed to create extension log dir: {0}", e) log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") self.logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, log_file)
def daemon(self): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") while self.running: self.update_handler.run_latest()
def daemon(self): logger.info("Run daemon") self.protocol_util = get_protocol_util() self.scvmm_handler = get_scvmm_handler() self.resourcedisk_handler = get_resourcedisk_handler() self.rdma_handler = get_rdma_handler() self.provision_handler = get_provision_handler() self.update_handler = get_update_handler() # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir()) if conf.get_detect_scvmm_env(): self.scvmm_handler.run() if conf.get_resourcedisk_format(): self.resourcedisk_handler.run() # Always redetermine the protocol start (e.g., wireserver vs. # on-premise) since a VHD can move between environments self.protocol_util.clear_protocol() self.provision_handler.run() # Enable RDMA, continue in errors if conf.enable_rdma(): self.rdma_handler.install_driver() logger.info("RDMA capabilities are enabled in configuration") try: setup_rdma_device() except Exception as e: logger.error("Error setting up rdma device: %s" % e) else: logger.info("RDMA capabilities are not enabled, skipping") while self.running: self.update_handler.run_latest()
def deploy_ssh_keypair(self, username, keypair): """ Deploy id_rsa and id_rsa.pub """ path, thumbprint = keypair path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) lib_dir = conf.get_lib_dir() prv_path = os.path.join(lib_dir, thumbprint + '.prv') if not os.path.isfile(prv_path): raise OSUtilError("Can't find {0}.prv".format(thumbprint)) shutil.copyfile(prv_path, path) pub_path = path + '.pub' crytputil = CryptUtil(conf.get_openssl_cmd()) pub = crytputil.get_pubkey_from_prv(prv_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') os.chmod(path, 0o644) os.chmod(pub_path, 0o600)
def deploy_ssh_keypair(self, username, keypair): """ Deploy id_rsa and id_rsa.pub """ path, thumbprint = keypair path = self._norm_path(path) dir_path = os.path.dirname(path) fileutil.mkdir(dir_path, mode=0o700, owner=username) lib_dir = conf.get_lib_dir() prv_path = os.path.join(lib_dir, thumbprint + '.prv') if not os.path.isfile(prv_path): raise OSUtilError("Can't find {0}.prv".format(thumbprint)) shutil.copyfile(prv_path, path) pub_path = path + '.pub' crytputil = CryptUtil(conf.get_openssl_cmd()) pub = crytputil.get_pubkey_from_prv(prv_path) fileutil.write_file(pub_path, pub) self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') os.chmod(path, 0o644) os.chmod(pub_path, 0o600)
def set_handler_status(self, status="NotReady", message="", code=0): state_dir = self.get_handler_state_dir() if not os.path.exists(state_dir): try: fileutil.mkdir(state_dir, 0o700) except IOError as e: self.logger.error("Failed to create state dir: {0}", e) handler_status = ExtHandlerStatus() handler_status.name = self.ext_handler.name handler_status.version = self.ext_handler.properties.version handler_status.message = message handler_status.code = code handler_status.status = status status_file = os.path.join(state_dir, "status") try: fileutil.write_file(status_file, json.dumps(get_properties(handler_status))) except (IOError, ValueError, ProtocolError) as e: self.logger.error("Failed to save handler status: {0}", e)
def test_command_extension_log_truncates_correctly(self, mock_log_dir): log_dir_path = os.path.join(self.tmp_dir, "log_directory") mock_log_dir.return_value = log_dir_path ext_handler_props = ExtHandlerProperties() ext_handler_props.version = "1.2.3" ext_handler = ExtHandler(name='foo') ext_handler.properties = ext_handler_props first_line = "This is the first line!" second_line = "This is the second line." old_logfile_contents = "{first_line}\n{second_line}\n".format(first_line=first_line, second_line=second_line) log_file_path = os.path.join(log_dir_path, "foo", "CommandExecution.log") fileutil.mkdir(os.path.join(log_dir_path, "foo"), mode=0o755) with open(log_file_path, "a") as log_file: log_file.write(old_logfile_contents) _ = ExtHandlerInstance(ext_handler=ext_handler, protocol=None, execution_log_max_size=(len(first_line)+len(second_line)//2)) with open(log_file_path) as truncated_log_file: self.assertEqual(truncated_log_file.read(), "{second_line}\n".format(second_line=second_line))
def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/{0}".format(device) partition = device + "1" mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing try: fileutil.mkdir(mount_point, mode=0o755) except OSError as ose: msg = "Failed to create mount point " \ "directory [{0}]: {1}".format(mount_point, ose) logger.error(msg) raise ResourceDiskError(msg=msg, inner=ose) logger.info("Examining partition table") ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("Could not determine partition info for " "{0}: {1}".format(device, ret[1])) force_option = 'F' if self.fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} -{2} {1}".format(self.fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) attempts = 5 while not os.path.exists(partition) and attempts > 0: logger.info("Waiting for partition [{0}], {1} attempts remaining", partition, attempts) sleep(5) attempts -= 1 if not os.path.exists(partition): raise ResourceDiskError("Partition was not created [{0}]".format(partition)) logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) # if the exit code is 32, then the resource disk can be already mounted if ret == 32 and output.find("is already mounted") != -1: logger.warn("Could not mount resource disk: {0}", output) elif ret != 0: # Some kernels seem to issue an async partition re-read after a # 'parted' command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") self.reread_partition_table(device) ret, output = shellutil.run_get_output(mount_string) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount. [{0}]", output) shellutil.run(mkfs_string) ret, output = shellutil.run_get_output(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "[{1}] {2}".format(partition, ret, output)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, self.fs) return mount_point
def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ufs': raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) # 1. Detect device err, output = shellutil.run_get_output('gpart list') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) disks = self.parse_gpart_list(output) device = self.osutil.device_for_ide_port(1) if device is None or not device in disks: # fallback logic to find device err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') if err: # try again on "3:1:0" err, output = shellutil.run_get_output('camcontrol periphlist 3:1:0') if err: raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' for line in output.split('\n'): index = line.find(':') if index > 0: geom_name = line[:index] if geom_name in disks: device = geom_name break if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Detect partition partition_table_type = disks[device] if partition_table_type == 'MBR': provider_name = device + 's1' elif partition_table_type == 'GPT': provider_name = device + 'p2' else: raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) if err or output.find(provider_name) == -1: raise ResourceDiskError("Resource disk partition not found.") partition = '/dev/' + provider_name logger.info('Resource disk partition {0} found.', partition) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) if err: raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" .format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) return mount_point
def mount_resource_disk(self, mount_point): fs = self.fs if fs != 'ffs': raise ResourceDiskError("Unsupported filesystem type: {0}, only " "ufs/ffs is supported.".format(fs)) # 1. Get device device = self.osutil.device_for_ide_port(1) if not device: raise ResourceDiskError("Unable to detect resource disk device.") logger.info('Resource disk device {0} found.', device) # 2. Get partition partition = "/dev/{0}a".format(device) # 3. Mount partition mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, partition) if existing: logger.info("Resource disk {0} is already mounted", partition) return existing fileutil.mkdir(mount_point, mode=0o755) mount_cmd = 'mount -t {0} {1} {2}'.format(self.fs, partition, mount_point) err = shellutil.run(mount_cmd, chk_err=False) if err: logger.info('Creating {0} filesystem on {1}'.format(fs, device)) fdisk_cmd = "/sbin/fdisk -yi {0}".format(device) err, output = shellutil.run_get_output(fdisk_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to create new MBR on {0}, " "error: {1}".format(device, output)) size_mb = conf.get_resourcedisk_swap_size_mb() if size_mb: if size_mb > 512 * 1024: size_mb = 512 * 1024 disklabel_cmd = ("echo -e '{0} 1G-* 50%\nswap 1-{1}M 50%' " "| disklabel -w -A -T /dev/stdin " "{2}").format(mount_point, size_mb, device) ret, output = shellutil.run_get_output( disklabel_cmd, chk_err=False) if ret: raise ResourceDiskError("Failed to create new disklabel " "on {0}, error " "{1}".format(device, output)) err, output = shellutil.run_get_output("newfs -O2 {0}a" "".format(device)) if err: raise ResourceDiskError("Failed to create new filesystem on " "partition {0}, error " "{1}".format(partition, output)) err, output = shellutil.run_get_output(mount_cmd, chk_err=False) if err: raise ResourceDiskError("Failed to mount partition {0}, " "error {1}".format(partition, output)) logger.info("Resource disk partition {0} is mounted at {1} with fstype " "{2}", partition, mount_point, fs) return mount_point
def download(self): begin_utc = datetime.datetime.utcnow() self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") uris_shuffled = self.pkg.uris random.shuffle(uris_shuffled) file_downloaded = False for uri in uris_shuffled: try: destination = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") file_downloaded = self.protocol.download_ext_handler_pkg(uri.uri, destination) if file_downloaded and os.path.exists(destination): self.pkg_file = destination break except Exception as e: logger.warn("Error while downloading extension: {0}", ustr(e)) if not file_downloaded: raise ExtensionError("Failed to download extension", code=1001) self.logger.verbose("Unzip extension package") try: zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir()) os.remove(self.pkg_file) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to unzip extension package", e, code=1001) # Add user execute permission to all files under the base dir for file in fileutil.get_all_files(self.get_base_dir()): fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR) duration = elapsed_milliseconds(begin_utc) self.report_event(message="Download succeeded", duration=duration) self.logger.info("Initialize extension directory") # Save HandlerManifest.json man_file = fileutil.search_file(self.get_base_dir(), 'HandlerManifest.json') if man_file is None: raise ExtensionError("HandlerManifest.json not found") try: man = fileutil.read_file(man_file, remove_bom=True) fileutil.write_file(self.get_manifest_file(), man) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to save HandlerManifest.json", e) # Create status and config dir try: status_dir = self.get_status_dir() fileutil.mkdir(status_dir, mode=0o700) seq_no, status_path = self.get_status_file_path() if status_path is not None: now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") status = { "version": 1.0, "timestampUTC": now, "status": { "name": self.ext_handler.name, "operation": "Enabling Handler", "status": "transitioning", "code": 0 } } fileutil.write_file(status_path, json.dumps(status)) conf_dir = self.get_conf_dir() fileutil.mkdir(conf_dir, mode=0o700) except IOError as e: fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file]) raise ExtensionError(u"Failed to create status or config dir", e) # Save HandlerEnvironment.json self.create_handler_env()
def initialize_environment(self): # Create lib dir if not os.path.isdir(conf.get_lib_dir()): fileutil.mkdir(conf.get_lib_dir(), mode=0o700) os.chdir(conf.get_lib_dir())
def mount_resource_disk(self, mount_point, fs): device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/{0}".format(device) partition = device + "1" mount_list = shellutil.run_get_output("mount")[1] existing = self.osutil.get_mount_point(mount_list, device) if existing: logger.info("Resource disk [{0}] is already mounted [{1}]", partition, existing) return existing fileutil.mkdir(mount_point, mode=0o755) logger.info("Examining partition table") ret = shellutil.run_get_output("parted {0} print".format(device)) if ret[0]: raise ResourceDiskError("Could not determine partition info for " "{0}: {1}".format(device, ret[1])) force_option = 'F' if fs == 'xfs': force_option = 'f' mkfs_string = "mkfs.{0} {1} -{2}".format(fs, partition, force_option) if "gpt" in ret[1]: logger.info("GPT detected, finding partitions") parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] logger.info("Found {0} GPT partition(s).", len(parts)) if len(parts) > 1: logger.info("Removing old GPT partitions") for i in range(1, len(parts) + 1): logger.info("Remove partition {0}", i) shellutil.run("parted {0} rm {1}".format(device, i)) logger.info("Creating new GPT partition") shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") ret = shellutil.run_get_output("sfdisk -q -c {0} 1".format(device)) ptype = ret[1].rstrip() if ptype == "7" and fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") shellutil.run("sfdisk -c {0} 1 83".format(device)) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: logger.info("The partition type is {0}", ptype) mount_options = conf.get_resourcedisk_mountoptions() mount_string = self.get_mount_string(mount_options, partition, mount_point) logger.info("Mount resource disk [{0}]", mount_string) ret = shellutil.run(mount_string, chk_err=False) if ret: # Some kernels seem to issue an async partition re-read after a # 'parted' command invocation. This causes mount to fail if the # partition re-read is not complete by the time mount is # attempted. Seen in CentOS 7.2. Force a sequential re-read of # the partition and try mounting. logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): shellutil.run("blockdev --rereadpt {0}".format(device), chk_err=False) ret = shellutil.run(mount_string, chk_err=False) if ret: logger.warn("Failed to mount resource disk. " "Attempting to format and retry mount.") shellutil.run(mkfs_string) ret = shellutil.run(mount_string) if ret: raise ResourceDiskError("Could not mount {0} " "after syncing partition table: " "{1}".format(partition, ret)) logger.info("Resource disk {0} is mounted at {1} with {2}", device, mount_point, fs) return mount_point