def write_block(self, block): if self._file != None: self._file.write(block) else: log_without_throw("? FtpBinaryDataWriter has no file?\n", self.log, WriteToStdout=True)
def mount_network_fileshare(mount_point, mountable, log=None): if mount_point is None or mountable is None: #"calling code wishes to do this and have it absorbed so that calling code can iterate through objects silently do nothing for nonexistent ones" return None elif not os.path.isdir(mount_point): r, out = call_subprocess("sudo mkdir {0}".format(mount_point), log) ErrorInterpreter(r, "mkdir", log, out).check() Creds = get_credentials_for_server(mountable) LocalCreds = get_credentials_for_local() BaseCmdLine = "sudo mount.cifs {0} {1}".format(mountable, mount_point) if Creds.Domain is None: UNamePasswd = "-o user={0},pass={1}".format(Creds.Username, Creds.Password) else: UNamePasswd = "-o user={0},doman={1},pass={2}".format(Creds.Username, Creds.Domain, Creds.Password) if LocalCreds.uid is None: CIFSMountCmdline = "{0} {1}".format(BaseCmdLine, UNamePasswd) else: CIFSMountCmdline = "{0} {1},uid={2},gid={3}".format(BaseCmdLine, UNamePasswd, LocalCreds.uid, LocalCreds.gid) r, out = call_subprocess(CIFSMountCmdline, log) ErrorInterpreter(r, "mount.cifs", log, out).check() return mount_point else: log_without_throw("skipping mounting of duplicate directory {0}\n".format(mountable), log, WriteToStdout=False) return None
def dir_chmod(dest_dir, log=None): chmod_cmdline = "chmod a+w -R {}/*".format(dest_dir) log_without_throw("calling {}".format(chmod_cmdline), log) r, out = call_subprocess(chmod_cmdline, log, _timeout = 300) log_without_throw("got back r: {}, out {}".format(r, out), log) ErrorInterpreter(r, "chmod", log, out).check() return r, out
def call_subprocess(cmd, log=None, _timeout=None, capture_output=True): if _timeout is None: timeout = 300 else: timeout = _timeout log_without_throw("mchammer.ex: timeout {0}, cmd {1}\n".format(timeout, cmd), log) r, out = mchammer.ex.ex(timeout, cmd, buffer_output_in_memory=capture_output) if r != 0: log_without_throw("Error Detected {0}\n".format(r), log) return r, out
def source_can_be_handled(self): if server_is_rms_type(self.get_server_name()): return True else: log_without_throw( "dawn_treader is unaware of the process used to handle fetching {}\n" .format(self.iso_path), self.log, WriteToStdout=True) return False
def rms_iso_is_cached(storage, remote_path, name, Log=None): # remote path isn't currently used but may be necessary later if anyone wants to deconstruct # whether this is actually RMS or not log_without_throw("in rms_iso_is_cached") if os.path.exists(os.path.join(storage, name)): log_without_throw("{} appears to be cached in {}, using that".format( name, storage), Log, WriteToStdout=True) return True else: return False
def umount_failure(self): if self.errno == errno.EPERM: err = "You aren't allowed to run umount\n" if self.errno == errno.EINVAL: err = "You have asked umount to unmount something that isn't a mount point.\n" elif self.errno == errno.ENOENT: err = "You have asked umount to unmount something that it can't find.\n" else: err = "Umount isn't happy, and it says its reason is {0}\n".format( self.errno) # failure to mount is recoverable, please don't throw log_without_throw(self.append_output(err), self.log)
def unmount_network_fileshare(mount_point, log = None): forbidden = get_local_iso_storage() if mount_point == forbidden: log_without_throw("skipping unmounting of forbidden directory {}".format(forbidden)) elif os.path.isdir(mount_point): try: r, out = call_subprocess("sudo umount {0}".format(mount_point), log) ErrorInterpreter(r, "umount", log, out).check() # only try to rm if you successfully unmounted, to avoid accidentally doing very bad things # however, rename it, to prevent future runs from being confused by lingering data if r == 0: r, out = call_subprocess("sudo rm -rf {0}".format(mount_point), log) ErrorInterpreter(r, "rm", log, out).check() else: log_without_throw("declining to delete {0} because it wasn't successfully unmounted\n".format(mount_point), log) unmountable_rename = "{0}__unmountable".format(mount_point) r, out = call_subprocess("sudo mv {0} {1}".format(mount_point, unmountable_rename), log) log_without_throw("moving {0} to {1} because it's unmountable for some reason".format(mount_point, unmountable_rename), log, WriteToStdout=True) ErrorInterpreter(r, "mv", log, out).check() except: pass else: log_without_throw("skipping unmounting of already unmounted directory {0}\n".format(mount_point), log, WriteToStdout=False)
def rsync_file_with_retries(src_file, dest_dir, override_perms, log=None): rsync_cmdline = "rsync -av -W \"{0}\" \"{1}\"".format(src_file, dest_dir) chperm_cmdline = "chmod 666 \"{}\"".format(os.path.join(dest_dir, strip_filename(src_file))) rsync_timeout = 240 rsync_hangup_error = 20 max_retries = 10 retries = 0 r, out = call_subprocess(rsync_cmdline, log, capture_output=False, _timeout=rsync_timeout) while (r == rsync_hangup_error and retries < max_retries): log_without_throw("rsync hangup error detected on {0}, retrying\n".format(src_file), log, WriteToStdout = True) r, out = call_subprocess(rsync_cmdline, log, capture_output=True, _timeout=rsync_timeout) log_without_throw("output: {0}\n".format(out), log) retries = retries + 1 if (r == rsync_hangup_error): log_without_throw("BE WORRIED: rsync error {0} encountered after {1} retries\n".format(rsync_hangup_error, max_retries), log, WriteToStdout=True) ErrorInterpreter(r, "rsync", log, out).check() if override_perms is True or file_requires_permission_override(strip_filename(src_file)): log_without_throw("overriding permissions on {}\n".format(src_file), log) r, out = call_subprocess(chperm_cmdline, log, capture_output=True) ErrorInterpreter(r, "chmod", log, out).check() return r, out
def retrieve_source(self): server_name, server_subpath = self.deconstruct_server_name() src_file = os.path.join(server_subpath, self.iso_name) dst_file = os.path.join(self.storage, self.iso_name) log_without_throw("retrieve_source: {} -> {}".format( src_file, dst_file), self.log, WriteToStdout=True) rms_ftp_credential_file = get_rms_credential_file() rms_ftp_creds = dawn_treader_credentials.get_credentials_from_file( rms_ftp_credential_file) if rms_ftp_creds.Username is None: rms_ftp_creds = dawn_treader_credentials.DefaultCredentials _datawriter.open_file(dst_file) _datawriter.add_log(self.log) ftp = ftplib.FTP("{}".format(server_name)) try: ftp.login("{}".format(rms_ftp_creds.Username), "{}".format(rms_ftp_creds.Password)) log_without_throw(" logged in: {}".format(server_name), self.log, WriteToStdout=True) ftp.cwd(RMS_PATH) log_without_throw(" retrieving", self.log, WriteToStdout=True) ftp.retrbinary("RETR {}".format(src_file), handle_ftp_download) log_without_throw(" done".format(dst_file), self.log, WriteToStdout=True) _datawriter.close_file() return dst_file except ftplib.error_perm as e: log_without_throw("FTP error: {}".format(e.args), self.log, WriteToStdout=True) _datawriter.close_file() os.remove(dst_file) return None finally: # BOO! BOO! BOO! # # a well behaved client calles ftp.quit(). but it turns out # fcmrms03 hangs if you do that. # # this is one of those cases where we can't play nice if they can't play nice. ftp.close()
def modify_and_copy_subdir_xml(self, destdir, subdir): try: full_path_to_subdir = os.path.join(destdir, subdir) xml_name = self.get_subdir_xml(full_path_to_subdir) xml = os.path.join(full_path_to_subdir, xml_name) retval = "" if self.Installer.iso_name == None: retval = replace_information_in_xml_file(subdir, xml) else: retval = replace_information_in_xml_file_flame9(subdir, xml) log_without_throw(retval, self.Log) new_xml = os.path.join(destdir, xml_name) self.simple_file_copy(xml, new_xml) except Exception as e: log_without_throw("EEEK! Unable to replace information in xml_file {} because {}".format(xml_name, e), self.Log)
def obtain_local_copy(self): if self.validate_storage_exists() and self.source_is_remote( ) and self.source_can_be_handled(): r = self.retrieve_source() if r == None: log_without_throw( "unable to obtain local copy of ISO: {}".format( os.path.join(self.iso_path, self.iso_name)), self.log, WriteToStdout=True) return r else: log_and_throw("Unable to obtain local copy of ISO: {}".format( os.path.join(self.iso_path, self.iso_name)), self.log, WriteToStdout=True) return None
def unmount_isos(self): for unmounting_img in self.isos(): if unmounting_img.dvd_mount_pt != None and unmounting_img.iso_name != None: try: unmount_dvd_image(unmounting_img.dvd_mount_pt, self.Log) unmounting_img.dvd_mount_pt = None except: log_without_throw("failed to unmount {}, ignoring".format(unmounting_img.dvd_mount_pt), self.Log) for unmounting_loc in self.isos(): if unmounting_loc.mounting_type == _FTP_MOUNT: remove_local_copy_of_iso(unmounting_loc.iso_mount_pt, unmounting_loc.iso_name) unmounting_loc.iso_mount_pt = None elif unmounting_loc.mounting_type == _NETAPP_MOUNT: # it's an error to remove the local copy of the ISO, and the "mount_point" in this sense is a local dir rather than a mount # so set it to nil :) unmounting_loc.iso_mount_pt = None if unmounting_loc.iso_mount_pt != None: unmount_network_fileshare(unmounting_loc.iso_mount_pt, self.Log) unmounting_loc.iso_mount_pt = None
def check(self): if self.errno != 0: log_without_throw( "dawn_treader_error_interpreter checking cmd {0}, errno {1}\n". format(self.cmd, self.errno), self.log) if self.cmd == "rsync": self.rsync_failure() elif self.cmd == "mkdir": self.mkdir_failure() elif self.cmd == "mount.cifs": self.mount_cifs_failure() elif self.cmd == "umount": self.umount_failure() elif self.cmd == "rm": self.rm_failure() elif self.cmd == "mount": self.mount_failure() elif self.cmd == "mv": self.mv_failure() else: self.unknown_failure()
def build(self): try: log_without_throw("building windows image", self.builder_log) self.mount_isos() log_without_throw("trying to copy_fiery_iso", self.builder_log) if self.product_isos.Installer.iso_name != None: self.product_isos.copy_installer_iso( self.destination.USBDestination) self.product_isos.copy_fiery_iso(self.destination.USBDestination) self.product_isos.copy_windows_isos( self.destination.USBDestination) self.product_isos.copy_and_modify_usersw_iso( self.destination.USBDestination) self.product_isos.copy_and_modify_adobe_iso( self.destination.USBDestination) self.product_isos.copy_and_modify_fcps_iso( self.destination.USBDestination) self.product_isos.copy_windows_permission_override_behavior( self.destination.USBDestination) msg = "Success!\nlog and manifest created at {}\nUSB Installer created at {}".format( self.destination.FullDestination, self.destination.USBDestination) log_without_throw(msg, self.builder_log, WriteToStdout=True) except USBInstallBuilderError, e: self.builder_log.write(e.value) exit(e.value)
def mount_dvd_image(mount_point, dvd_image, log = None, mount_udf = False): log_without_throw("mount_dvd_image: mount_point {} dvd_image {}\n".format(mount_point, dvd_image), log) if mount_point is None or dvd_image is None: return None #"calling code wishes to do this and have it absorbed so that calling code can iterate through objects silently do nothing for nonexistent ones" r, out = call_subprocess("sudo mkdir {0}".format(mount_point), log) ErrorInterpreter(r, "mkdir", log, out).check() # the initial implementation of dawntreader just did a loopback ount without specifying # type. # # but it turns out that there's a problem where unless you sxplicitly suppress the rock ridge extensions, # mount handles the Joliet filenames incorrectly. # so I changed the code to work around it, on advice from trux. # r, out = call_subprocess("sudo mount -o loop {0} {1}".format(dvd_image, mount_point), log) if mount_udf: r, out = call_subprocess("sudo mount -o loop {0} {1}".format(dvd_image, mount_point), log) else: r, out = call_subprocess("sudo mount -t iso9660 -o loop -o norock {0} {1}".format(dvd_image, mount_point), log) ErrorInterpreter(r, "mount", log, out).check()
def iterative_rsync(src_dir, dest_dir, override_perms, log=None): try: src_list = os.listdir(src_dir) log_without_throw("--iterative_rsync: ({0}) : {1}---\n".format(src_dir, src_list), log) for _src in src_list: full_src_name = os.path.join(src_dir, _src) if os.path.isdir(full_src_name): full_dst_name = os.path.join(dest_dir, _src) try: log_without_throw(" making directory {0}\n".format(full_dst_name), log) os.makedirs(full_dst_name) except OSError as e: log_without_throw("suppressing OS Error {0} from os.makedirs\n".format(e.errno), log) iterative_rsync(full_src_name, full_dst_name, override_perms, log) else: r, out = rsync_file_with_retries(os.path.join(src_dir, _src), dest_dir, override_perms, log) except Exception as e: log_and_throw("iterative rsync encountered {}".format(e))
def mount_isos(self): log_without_throw("mounting", self.builder_log) self.product_isos.mount_isos() log_without_throw("mounted", self.builder_log)
def rm_failure(self): err = "ERROR: Unspecified error {0} using --rm--\n".format(self.errno) # failure to rm is recoverable, please don't throw log_without_throw(self.append_output(err), self.log)
def close_file(self): if self._file != None: self._file.close() log_without_throw("closing {}".format(self.filename), self.log) self._file = None self.filename = None
def copy_tree(src_dir, dest_dir, override_perms, log=None): log_without_throw("copy_tree {0} {1}\n".format(src_dir, dest_dir), log) # DANGEROUS BUG - in *theory*, iterative_rsync with override_perms set to TRUE ought to do what we want. # but in practice, when copying an ISO on ubuntu 14.04, the process runs out of open file handles. # so ... don't use this that way. iterative_rsync(src_dir, dest_dir, override_perms, log)
def mv_failure(self): err = "ERROR: unexpected problem renaming directory with mv. Skipping rename.\n" log_without_throw(self.append_output(err), self.log)
def unknown_failure(self): log_without_throw("huh? {}".format(self.output), self.log) log_without_throw("{}".format(traceback.print_stack()), self.log) log_and_throw("WTFLOLBBQ", self.log)
def set_log(self, Log): self.Log = Log log_without_throw("Log attaching to ProductISOSet. Fiery = {0}/{1}, OS1 = {2}/{3}, OS2 = {3}/{4}, usersw = {4}/{5}".format(self.Fiery.iso_loc, self.Fiery.iso_name, self.OS1.iso_loc, self.OS1.iso_name, self.OS2.iso_loc, self.OS2.iso_name, self.UserSoftware.iso_loc, self.UserSoftware.iso_name), self.Log, WriteToStdout=False)
def mount_isos(self): for mounting_loc in self.isos(): log_without_throw("mounting {}".format(mounting_loc.iso_name), self.Log) mounted = None if mounting_loc.iso_loc != _LOCAL: log_without_throw("iso loc is not local", self.Log) if self.should_try_netapp_mount(mounted, mounting_loc.iso_loc) is True: # ok, in theory this should just set the mounting_loc to the correct value for the netapp mount log_without_throw("trying to use local_rms_netapp_mount capacity to load {}".format(mounting_loc.iso_loc), self.Log) mounted = None if mounted == None: mounting_loc.mounting_type = _NETAPP_MOUNT if self.using_rms_ftp_subsystem(mounted, mounting_loc.iso_loc) is True: log_without_throw("using the rms ftp subsystem", self.Log) local_storage = "/tmp" try: local_storage = get_local_iso_storage() except Exception as e: log_and_throw("exception {}".format(e), self.Log) if self.rms_iso_is_cached(local_storage, mounting_loc.iso_loc, mounting_loc.iso_name) is True: log_without_throw("rms iso seems to be cached", self.Log) mounting_loc.iso_mount_pt = local_storage mounted = True mounting_loc.mounting_type = _FTP_MOUNT else: log_without_throw("trying to use rms ftp capacity to load {}".format(mounting_loc.iso_loc), self.Log) mounting_loc.iso_mount_pt = local_storage mounted = ftp_local_copy_of_iso(mounting_loc.iso_mount_pt, mounting_loc.iso_loc, mounting_loc.iso_name, self.Log) if mounted is not None: mounting_loc.mounting_type = _FTP_MOUNT if mounted == None: log_without_throw("trying to use cifs mount capacity to load {}".format(mounting_loc.iso_loc), self.Log, WriteToStdout = True) mounting_loc.fixup_name_for_cifs() mounting_loc.iso_mount_pt = generate_iso_source_location_mount_point_name(mounting_loc.iso_loc) mounted = mount_network_fileshare(mounting_loc.iso_mount_pt, mounting_loc.iso_loc, self.Log) if mounted != None: mounting_loc.mounting_type = _CIFS_MOUNT # FIXME: it's really not clear that this works at all if the mounting_loc's iso_loc is _LOCAL. # unfortunately, I also can't imagine what the workflow was where that was desired. for mounting_img in self.isos(): if mounting_img.iso_name != None: log_without_throw("generating dvd image mount point name", self.Log) mounting_img.dvd_mount_pt = generate_dvd_image_mount_point_name(mounting_img.iso_name) log_without_throw("trying to mount {} image {}".format(mounting_img.iso_mount_pt, mounting_img.iso_name), self.Log) log_without_throw("dvd_mount_pt = {}".format(mounting_img.dvd_mount_pt), self.Log) mount_dvd_image(mounting_img.dvd_mount_pt, os.path.join(mounting_img.iso_mount_pt, mounting_img.iso_name), self.Log, mounting_img.use_udf)