def test_zero_superblock(self): log.info("Test execute of zero_superblock with a MdAdm object ...") from pb_blockdev.md.admin import MdAdm try: mdadm = MdAdm( appname=self.appname, verbose=self.verbose, ) except CommandNotFoundError as e: log.info(str(e)) return if self.verbose > 3: log.debug("MdAdm object:\n%s", pp(mdadm.as_dict(True))) self._create_new_loop() loop = self.loop_devs[0] no_dump = True if os.geteuid() == 0: no_dump = False mdadm.zero_superblock(loop, no_dump=no_dump)
def lun_blockdevicename(self, bus_id, target_id, lun_id): """ Returns the name of the appropriate blockdevice, if there is such one existing, else None is returning. """ bdir = self.lun_block_dir(bus_id, target_id, lun_id) if not os.path.exists(bdir): return None pattern = os.path.join(bdir, '*') files = glob.glob(pattern) if self.verbose > 3: LOG.debug(_("Found blockdevice dirs: %s"), pp(files)) if not files: return None bdevdir = files[0] bdevname = os.path.basename(bdevdir) if self.verbose > 3: msg = _( "Found blockdevice %(bd)r for '%(h)d:%(b)d:%(t)d:%(l)d'.") % { 'bd': bdevname, 'h': self.host_id, 'b': bus_id, 't': target_id, 'l': lun_id} LOG.debug(msg) return bdevname
def test_mdadm_lock(self): log.info("Test global lock of a GenericMdHandler object ...") from pb_blockdev.md import GenericMdHandler try: hdlr = GenericMdHandler( appname=self.appname, verbose=self.verbose, ) except CommandNotFoundError as e: log.info(str(e)) return hdlr.lock_global() msg = "Object hdlr.global_lock is not a instance of PbLock." self.assertIsInstance(hdlr.global_lock, PbLock, msg) lockfile = hdlr.global_lock.lockfile log.debug("Lockfile: %r.", lockfile) if not os.path.exists(lockfile): msg = "Lockfile %r does not exists." self.fail(msg % (lockfile)) if self.verbose > 2: log.debug( "Global lock object:\n%s", pp(hdlr.global_lock.as_dict(True))) hdlr.global_lock = None if os.path.exists(lockfile): msg = "Lockfile %r still exists." self.fail(msg % (lockfile))
def test_search_blockdevices(self): log.info("Test searching for target blockdevices ...") from pb_blockdev.scsi_host import get_scsi_hosts from pb_blockdev.base import BlockDevice scsi_hosts = get_scsi_hosts(appname=self.appname, verbose=self.verbose) if not scsi_hosts: log.debug("No SCSI hosts found.") first = True for scsi_host in scsi_hosts: if not scsi_host.luns: continue for hbtl in scsi_host.luns: self.assertEqual(hbtl.host, scsi_host.host_id) blockdev = scsi_host.lun_blockdevice( hbtl.bus, hbtl.target, hbtl.lun) if not blockdev: continue log.debug("Found blockdevice %r for '%s'.", blockdev, hbtl) self.assertIsInstance( blockdev, BlockDevice, ("Object %r should be a BlockDevice." % (blockdev))) if self.verbose > 2: if self.verbose > 3 or first: log.debug("Blockdevice:\n%s", pp(blockdev.as_dict(True))) first = False
def _get_new_backup_dir(self, cur_backup_dirs=None): # Retrieving new backup directory if cur_backup_dirs is None: cur_backup_dirs = [] dlist = self.dir_list() for entry in dlist: entry_stat = dlist[entry] if self.verbose > 2: LOG.debug("Checking entry %r ...", pp(entry)) if not stat.S_ISDIR(entry_stat.st_mode): if self.verbose > 2: LOG.debug("%r is not a directory.", entry) continue cur_backup_dirs.append(entry) cur_date = datetime.utcnow() backup_dir_tpl = cur_date.strftime('%Y-%m-%d_%%02d') LOG.debug("Backup directory template: %r", backup_dir_tpl) new_backup_dir = None i = 0 found = False while not found: new_backup_dir = backup_dir_tpl % (i) if not new_backup_dir in cur_backup_dirs: found = True i += 1 self.new_backup_dir = new_backup_dir LOG.info("New backup directory: %r", str(self.new_backup_dir))
def connect(self): """ Establish a connection with the PostgreSQL database. """ if self.connection and not self.connection.closed: msg = _("Trying to establish a connection while an existing " + "database connection.") raise BaseDbError(msg) self.check_password() c_params = {} c_params['host'] = self._db_host if self._db_port and self._db_port != 5432: c_params['port'] = self._db_port c_params['database'] = self._db_schema c_params['user'] = self._db_user c_params['password'] = self._db_passwd for key in self._connect_params: c_params[key] = self._connect_params[key] log.debug(_("Used parameters to connect to database:") + "\n%s", pp(c_params)) connection = psycopg2.connect(**c_params) self.connection = connection if self.simulate: log.debug(_("Setting DB connection to readonly.")) self.connection.set_session(readonly = True)
def test_examine(self): from pb_blockdev.base import BlockDevice from pb_blockdev.md.admin import MdAdm try: mdadm = MdAdm( appname=self.appname, verbose=self.verbose, ) except CommandNotFoundError as e: log.info(str(e)) return self._create_new_loop() loop = self.loop_devs[0] log.info("Test examining MD superblock of an empty device.") sb = mdadm.examine(loop) self.assertIsNone(sb, "There may be no superblock on an empty device.") bd_dir = os.sep + os.path.join('sys', 'block') if not os.path.isdir(bd_dir): self.skipTest("Directory %r not found." % (bd_dir)) md_dev_dirs = glob.glob(os.path.join(bd_dir, 'md*')) if not md_dev_dirs: self.skipTest("No MD raids with componenets found.") components = [] for md_dev_dir in md_dev_dirs: cdirs = glob.glob(os.path.join(md_dev_dir, 'md', 'dev-*')) for cdir in cdirs: block_link = os.path.join(cdir, 'block') devname = os.path.basename(os.readlink(block_link)) components.append(devname) if not components: self.skipTest("No MD component devices found.") if self.verbose > 2: log.debug("Found MD component devices: %s", pp(components)) index = random.randint(0, len(components) - 1) devname = components[index] blockdev = BlockDevice( name=devname, appname=self.appname, verbose=self.verbose, ) log.debug("Examining blockdevice %r ...", blockdev.device) if self.verbose > 2: log.debug("BlockDevice object to examine:\n%s", blockdev) sb = mdadm.examine(blockdev) if self.verbose > 2: log.debug( "Got MD superblock information of %r:\n%s", blockdev.device, sb) log.debug("Finished examining.")
def test_scsi_host_object(self): log.info("Test init of a ScsiHost object ...") from pb_blockdev.scsi_host import ScsiHost scsi_host = ScsiHost(0, appname=self.appname, verbose=self.verbose) if self.verbose > 1: log.debug("repr of ScsiHost object: %r", scsi_host) if self.verbose > 2: log.debug("ScsiHost object:\n%s", pp(scsi_host.as_dict(True)))
def test_get_all_scsi_hosts(self): log.info("Test getting of all ScsiHosts ...") from pb_blockdev.scsi_host import ScsiHost from pb_blockdev.scsi_host import get_scsi_hosts scsi_hosts = get_scsi_hosts(appname=self.appname, verbose=self.verbose) if self.verbose: hostnames = map(lambda x: x.hostname, scsi_hosts) log.debug("Got ScsiHost list:\n%s", pp(hostnames)) for host in scsi_hosts: self.assertIsInstance(host, ScsiHost, ("Object %r should be a ScsiHost." % (host)))
def _init_megacli(self): """ Searches in path for the MegaCLI command and set self.megacli with the found path. @raise CommandNotFoundError: if the MegaCLI could not be found """ if self.verbose > 2: log.debug(_("Searching for the MegaCLI command ...")) paths = caller_search_path() add_paths = ( os.sep + os.path.join('opt', 'MegaRAID', 'MegaCli'), os.sep + os.path.join('opt', 'lsi', 'megacli'), os.sep + os.path.join('opt', 'megacli'), ) for d in add_paths: if os.path.isdir(d) and d not in paths: paths.append(d) if self.verbose > 3: log.debug(_("Searching command in paths:") + "\n" + pp(paths)) commands = ('MegaCli64', 'MegaCli', 'megacli') for cmd in commands: for d in paths: if self.verbose > 3: log.debug(_( "Searching command %(cmd)r in %(dir)r ...") % { 'cmd': cmd, 'dir': d}) p = os.path.join(d, cmd) if os.path.exists(p): if self.verbose > 2: log.debug(_("Found %r ..."), p) if os.access(p, os.X_OK): self._megacli = p return else: log.debug(_("File %r is not executable."), p) raise CommandNotFoundError('MegaCli')
def test_handler_object(self): log.info("Test init of a GenericLvmHandler object ...") from pb_blockdev.lvm import GenericLvmHandler try: hdlr = GenericLvmHandler( appname=self.appname, verbose=self.verbose, ) except CommandNotFoundError as e: log.info(str(e)) return if self.verbose > 1: log.debug("repr of GenericLvmHandler object: %r", hdlr) if self.verbose > 2: log.debug("GenericLvmHandler object:\n%s", pp(hdlr.as_dict(True)))
def test_exec_df_all(self): log.info("Testing execution of df on all filesystems.") from pb_base.handler.df import DfHandler df = DfHandler( appname=self.appname, verbose=self.verbose, ) result = df(all_fs=True) if self.verbose > 2: res = [] for r in result: res.append(r.as_dict()) log.debug("Got a result for all 'df':\n%s", pp(res)) out = self.format_df_results(result).strip() log.debug("DF of all filesystems:\n%s", out)
def test_exec_df_root(self): log.info("Testing execution of df on the root filesystem.") from pb_base.handler.df import DfHandler df = DfHandler( appname=self.appname, verbose=self.verbose, ) result = df('/') if self.verbose > 2: res = [] for r in result: res.append(r.as_dict()) log.debug("Got a result from 'df /':\n%s", pp(res)) out = self.format_df_results(result).strip() log.debug("DF of root filesystem:\n%s", out)
def test_mdadm_object(self): log.info("Test init of a MdAdm object ...") from pb_blockdev.md.admin import MdAdm try: mdadm = MdAdm( appname=self.appname, verbose=self.verbose, ) except CommandNotFoundError as e: log.info(str(e)) return if self.verbose > 1: log.debug("repr of MdAdm object: %r", mdadm) if self.verbose > 2: log.debug("MdAdm object:\n%s", pp(mdadm.as_dict(True)))
def disk_usage(self, item): """ Performs a recursive determination of the disk usage of the given FTP directory item. This item must be located in the current remote directory. """ if not self.logged_in: msg = "Could not detect disk usage of item %r, not loggen in." raise FTPHandlerError(msg) total = 0 if six.PY2: total = long(0) if not item: msg = "No item to detect disk usage given." raise FTPHandlerError(msg) if self.verbose > 3: LOG.debug("Analyzing item:\n%s", pp(item.as_dict(short=True))) if not item.is_dir(): return item.size if self.verbose > 2: msg = "Trying to detect disk usage of remote directory %r ..." LOG.debug(msg, item.name) item_dir_list = self.dir_list(item.name) for list_item in item_dir_list: if list_item.name == "." or list_item.name == "..": continue list_item.name = item.name + "/" + list_item.name list_item_size = self.disk_usage(list_item) if self.verbose > 2: LOG.debug("Got disk usage of %r: %d Bytes.", list_item.name, list_item_size) total += list_item_size return total
def _map_dirs2types(self, type_mapping, backup_dirs): re_backup_date = re.compile(r'^\s*(\d+)[_\-](\d+)[_\-](\d+)') for backup_dir in backup_dirs: match = re_backup_date.search(backup_dir) if not match: if not backup_dir in type_mapping['other']: type_mapping['other'].append(backup_dir) continue year = int(match.group(1)) month = int(match.group(2)) day = int(match.group(3)) dt = None try: dt = datetime(year, month, day) except ValueError as e: LOG.debug("Invalid date in backup directory %r: %s", backup_dir, str(e)) if not backup_dir in type_mapping['other']: type_mapping['other'].append(backup_dir) continue weekday = dt.timetuple().tm_wday if dt.month == 1 and dt.day == 1: if not backup_dir in type_mapping['yearly']: type_mapping['yearly'].append(backup_dir) if dt.day == 1: if not backup_dir in type_mapping['monthly']: type_mapping['monthly'].append(backup_dir) if weekday == 6: # Sunday if not backup_dir in type_mapping['weekly']: type_mapping['weekly'].append(backup_dir) if not backup_dir in type_mapping['daily']: type_mapping['daily'].append(backup_dir) if self.verbose > 3: LOG.debug("Mapping of found directories to backup types:\n%s", pp(type_mapping))
def test_handler_object(self): LOG.info("Testing init of a FTP handler object ...") from ftp_backup.ftp_handler import FTPHandler from ftp_backup.ftp_handler import DEFAULT_FTP_HOST from ftp_backup.ftp_handler import DEFAULT_FTP_PORT from ftp_backup.ftp_handler import DEFAULT_FTP_USER from ftp_backup.ftp_handler import DEFAULT_FTP_PWD from ftp_backup.ftp_handler import DEFAULT_FTP_TZ from ftp_backup.ftp_handler import DEFAULT_FTP_TIMEOUT from ftp_backup.ftp_handler import DEFAULT_MAX_STOR_ATTEMPTS from ftp_backup.ftp_handler import MAX_FTP_TIMEOUT ftp = FTPHandler( appname=self.appname, verbose=self.verbose, ) if self.verbose > 1: LOG.debug("repr of FTPHandler object: %r", ftp) if self.verbose > 2: LOG.debug("FTPHandler object:\n%s", pp(ftp.as_dict(True))) LOG.info("Checking FTP handler object for default values ...") self.assertIsInstance(ftp.ftp, FTP) self.assertEqual(ftp.connected, False) self.assertEqual(ftp.host, DEFAULT_FTP_HOST) self.assertEqual(ftp.logged_in, False) self.assertEqual(ftp.max_stor_attempts, DEFAULT_MAX_STOR_ATTEMPTS) self.assertEqual(ftp.passive, False) self.assertEqual(ftp.password, DEFAULT_FTP_PWD) self.assertEqual(ftp.port, DEFAULT_FTP_PORT) self.assertEqual(ftp.remote_dir, '/') self.assertEqual(ftp.timeout, DEFAULT_FTP_TIMEOUT) self.assertEqual(ftp.tls, False) self.assertEqual(ftp.tz, DEFAULT_FTP_TZ) self.assertEqual(ftp.user, DEFAULT_FTP_USER)
def get_random_dm_name(self): bd_dir = os.sep + os.path.join('sys', 'block') if not os.path.isdir(bd_dir): self.skipTest("Directory %r not found." % (bd_dir)) dirs = glob.glob(os.path.join(bd_dir, 'dm-*')) if not dirs: self.skipTest("No Devicemapper devices found.") devs = [] for dev_dir in dirs: devs.append(os.path.basename(dev_dir)) if self.verbose > 2: log.debug("Found DM devices: %s", pp(devs)) index = random.randint(0, len(devs) - 1) devname = devs[index] if self.verbose > 1: log.debug("Got a random devicemapper name %r.", devname) return devname
def test_handler_object_tls(self): LOG.info("Testing init of a FTP handler object with TLS support ...") from ftp_backup.ftp_handler import FTPHandler ftp = FTPHandler( appname=self.appname, tls=True, verbose=self.verbose, ) if self.verbose > 1: LOG.debug("repr of FTPHandler object: %r", ftp) if self.verbose > 2: LOG.debug("FTPHandler object:\n%s", pp(ftp.as_dict(True))) LOG.info("Checking FTP handler object for default values ...") self.assertIsInstance(ftp.ftp, FTP_TLS) self.assertEqual(ftp.tls, True) self.assertEqual(ftp.tls_verify, None)
def from_dir_line(cls, line, appname=None, verbose=0): line = line.strip() match = cls.re_dir_line.search(line) if not match: LOG.warn("Invalid line in FTP dir output %r.", line) return None dir_entry = cls(appname=appname, verbose=verbose) dir_entry.perms = match.group(1) dir_entry.num_hardlinks = match.group(2) dir_entry.user = match.group(3) dir_entry.group = match.group(4) dir_entry.size = match.group(5) dir_entry.mtime = match.group(6) dir_entry.name = match.group(7) dir_entry.initialized = True if verbose > 3: LOG.debug('Initialized FTP dir entry:\n%s', pp(dir_entry.as_dict(short=True))) return dir_entry
def test_mp_system_get_paths(self): log.info("Testing get_paths() by a MultipathSystem object.") from pb_blockdev.multipath import MultipathdNotRunningError from pb_blockdev.multipath.system import MultipathSystem try: system = MultipathSystem( appname=self.appname, verbose=self.verbose, sudo=self.do_sudo, ) paths = system.get_paths() if self.verbose > 2: log.debug("Got paths from MultipathSystem:\n%s", pp(paths)) del system except MultipathdNotRunningError as e: log.debug(str(e)) return
def test_md_device(self): from pb_blockdev.md.device import MdDevice log.info("Test MD device object.") bd_dir = os.sep + os.path.join('sys', 'block') if not os.path.isdir(bd_dir): self.skipTest("Directory %r not found." % (bd_dir)) md_dev_dirs = glob.glob(os.path.join(bd_dir, 'md*')) if md_dev_dirs: index = random.randint(0, len(md_dev_dirs) - 1) dev_dir = md_dev_dirs[index] md_name = os.path.basename(dev_dir) else: md_name = 'md0' md = MdDevice( name=md_name, appname=self.appname, verbose=self.verbose, ) if md.exists: log.debug("Get informations about sub devices ...") md.retr_sub_devices() if self.verbose > 2: log.debug("Got a MD device:\n%s", pp(md.as_dict(True))) if md.exists: log.info("Test getting details from MD device %s.", md_name) details = md.get_details() if self.verbose > 2: log.debug("Details of %s:\n%s", md_name, details)
def _run(self): """The underlaying startpoint of the application.""" count_adapters = self.handler.adapter_count() if not count_adapters: sys.stderr.write("No MegaRaid controllers found.\n\n") sys.exit(1) line_templ = "%2d %3d %4d %11d %10.f %-68s %s" inq_templ = "%-8s %-20s %s" if self.parsable: line_templ = "%d;%d;%d;%d;%1.f;%s;%s" inq_templ = "%s %s %s" else: print ( "Adp. Enc. Slot Size MiB Size GiB Vendor " "Model " "Serial Firmware state") size_total = 0 adapter_id = 0 while adapter_id < count_adapters: enclosures = self.handler.get_enclosures(adapter_id) if self.verbose > 2: if enclosures: encs = [] for enclosure in enclosures: encs.append(enclosure.as_dict(True)) log.debug("Got enclosures:\n%s", pp(encs)) for enc in sorted(enclosures): if not enc.nr_pds: continue slot = 0 first = True while slot < enc.nr_slots: pd = self.handler.get_pd(adapter_id, enc.id, slot) if not pd: slot += 1 continue if self.verbose > 2 and first: log.debug("Got physical device:\n%s", pp(pd.as_dict(True))) first = False inq_data = pd.inq_data if pd.vendor: inq_data = inq_templ % (pd.vendor, pd.model, pd.serial) if pd.size: size_total += pd.size print(line_templ % ( adapter_id, enc.id, slot, pd.size_mb, pd.size_gb, inq_data, pd.firmware_state) ) slot += 1 adapter_id += 1 size_mb = int(size_total / 1024 / 1024) size_gb = float(size_total) / 1024.0 / 1024.0 / 1024.0 if not self.parsable: print("\n%-13s %11d %10.f" % ('Total:', size_mb, size_gb))
def _read_config(self): """ Read in configuration from all configuration files stored in self.cfg_files in the order, how they are stored there. The found configuration is merged into self.cfg. NOTE: all generated keys and string values are decoded to unicode. """ if self.verbose > 2: log.debug( _("Read cfg files with character set '%s' ..."), self.cfg_encoding) if self.verbose > 3: cfgspec = StringIO() self.cfg_spec.write(cfgspec) log.debug( (_("Used config specification:") + "\n%s"), cfgspec.getvalue()) cfgspec.close() del cfgspec validator = Validator(pbvalidator_checks) cfgfiles_ok = True existing_cfg_files = [file for file in self.cfg_files if os.path.isfile(file)] if not existing_cfg_files and self.need_config_file: msg = "Could not find any configuration file at these locations:" for file in self.cfg_files: msg += '\n' + file self.exit(1, msg) for cfg_file in existing_cfg_files: if self.verbose > 1: log.debug(_("Reading in configuration file '%s' ..."), cfg_file) try: cfg = ConfigObj( cfg_file, encoding=self.cfg_encoding, stringify=True, configspec=self.cfg_spec, ) except ConfigObjError as e: msg = _("Wrong configuration in %r found:") % (cfg_file) msg += ' ' + str(e) self.handle_error(msg, _("Configuration error")) continue if self.verbose > 2: log.debug((_("Found configuration:") + "\n%r"), pp(cfg)) result = cfg.validate( validator, preserve_errors=True, copy=True) if self.verbose > 2: log.debug((_("Validation result:") + "\n%s"), pp(result)) if result is not True: cfgfiles_ok = False msg = _("Wrong configuration in %r found:") % (cfg_file) msg += '\n' + self._transform_cfg_errors(result) self.handle_error(msg, _("Configuration error")) continue self.cfg.rec_update(cfg) if not cfgfiles_ok: self.exit(2) if self.verbose > 2: if len(existing_cfg_files) > 1: log.debug( (_("Using merged configuration:") + "\n%r"), pp(self.cfg)) else: log.debug((_("Using configuration:") + "\n%r"), pp(self.cfg))
def get_ld_info(self, adapter_id, ld_nr, target_id=None, ld=None, nolog=True): """ Executes 'MegaCLI -LdInfo -LX' and returns the logical drive of the given MegaRaid adapter. @raise CommandNotFoundError: if the MegaCli command could not be found. @raise MegaraidHandlerError: if the output of 'MegaCLI -LdInfo' could not analyzed. @raise ValueError: on wrong parameters @param adapter_id: the numeric ID of the MegaRaid adapter @type adapter_id: int @param ld_nr: the number of the logical drive on the MegaRaid adapter @type ld_nr: int @param target_id: the SCSI target Id of the Logical Drive @type target_id: int @param ld: a MegaraidLogicalDrive, which should completed @type ld: MegaraidLogicalDrive or None @param nolog: executes MegaCli with the '-NoLog' option @type nolog: bool @return: the found logical drive, or None, if not found @rtype: MegaraidLogicalDrive or None """ adapter_id = int(adapter_id) ld_nr = int(ld_nr) if ld is not None: if not isinstance(ld, MegaraidLogicalDrive): msg = _("Parameter %(lbl)r %(val)r is not a %(cls)s object.") % { 'lbl': 'ld', 'val': ld, 'cls': 'MegaraidLogicalDrive'} raise ValueError(msg) no_override = False if ld: no_override = True else: ld = MegaraidLogicalDrive( adapter=adapter_id, number=ld_nr, target_id=target_id, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) log.debug(_( "Retrieving infos about logical drive %(ld)d of MegaRaid controller %(adp)d ...") % { 'ld': ld_nr, 'adp': adapter_id}) args = [ '-LdInfo', ('-L%d' % (ld_nr)), ('-a%d' % (adapter_id)), ] result = self.exec_megacli(args, nolog) if self.verbose > 4: log.debug(_("Got:")) sys.stderr.write(result['out']) match = re_ld_not_exists.search(result['out']) if match: log.warn(match.group(1)) return None lines = [] for line in result['out'].splitlines(): line = line.strip() if not line: continue lines.append(line) ld.init_from_lines(lines, no_override) if self.verbose > 3: log.debug(( _("%s object:") % ('MegaraidLogicalDrive')) + "\n" + pp(ld.as_dict(True))) return ld
def cleanup_old_backupdirs(self): LOG.info("Cleaning up old backup directories ...") re_backup_dirs = re.compile(r'^\s*\d{4}[-_]+\d\d[-_]+\d\d[-_]+\d+\s*$') cur_backup_dirs = [] dlist = self.dir_list() for entry in dlist: entry_stat = dlist[entry] if self.verbose > 2: LOG.debug("Checking entry %r ...", pp(entry)) if not stat.S_ISDIR(entry_stat.st_mode): if self.verbose > 2: LOG.debug("%r is not a directory.", entry) continue if re_backup_dirs.search(entry): cur_backup_dirs.append(entry) cur_backup_dirs.sort(key=str.lower) if self.verbose > 1: LOG.debug("Found backup directories to check:\n%s", pp(cur_backup_dirs)) cur_date = datetime.utcnow() cur_weekday = cur_date.timetuple().tm_wday # Retrieving new backup directory self._get_new_backup_dir(cur_backup_dirs) new_backup_dir = str(self.new_backup_dir) cur_backup_dirs.append(new_backup_dir) type_mapping = { 'yearly': [], 'monthly': [], 'weekly': [], 'daily': [], 'other': [], } if cur_date.month == 1 and cur_date.day == 1: if not new_backup_dir in type_mapping['yearly']: type_mapping['yearly'].append(new_backup_dir) if cur_date.day == 1: if not new_backup_dir in type_mapping['monthly']: type_mapping['monthly'].append(new_backup_dir) if cur_weekday == 6: # Sunday if not new_backup_dir in type_mapping['weekly']: type_mapping['weekly'].append(new_backup_dir) if not new_backup_dir in type_mapping['daily']: type_mapping['daily'].append(new_backup_dir) self._map_dirs2types(type_mapping, cur_backup_dirs) for key in type_mapping: type_mapping[key].sort(key=str.lower) if self.verbose > 2: LOG.debug("Mapping of found directories to backup types:\n%s", pp(type_mapping)) for key in self.copies: max_copies = self.copies[key] cur_copies = len(type_mapping[key]) while cur_copies > max_copies: type_mapping[key].pop(0) cur_copies = len(type_mapping[key]) if self.verbose > 2: LOG.debug("Directories to keep:\n%s", pp(type_mapping)) dirs_delete = [] for backup_dir in cur_backup_dirs: keep = False for key in type_mapping: if backup_dir in type_mapping[key]: if self.verbose > 2: LOG.debug("Directory %r has to be kept.", backup_dir) keep = True continue if not keep: dirs_delete.append(backup_dir) LOG.debug("Directories to remove:\n%s", pp(dirs_delete)) if dirs_delete: self.remove_recursive(*dirs_delete)
def _run(self): """The underlaying startpoint of the application.""" count_adapters = self.handler.adapter_count() if not count_adapters: sys.stderr.write(_("No MegaRaid controllers found.") + "\n\n") sys.exit(1) if self.parsable: line_templ = "%d;%d;%s;%s;%s;%s" line_templ = "%(adp)3s %(id)3s %(lvl)-15s %(size)10s %(cache)-15s %(pds)s" if self.parsable: line_templ = "%(adp)s;%(id)s;%(lvl)s;%(size)s;%(cache)s;%(pds)s" else: info = {} info['adp'] = 'Adp' info['id'] = 'Id' info['lvl'] = 'RAID level' info['size'] = 'Size' info['cache'] = "Cache" info['pds'] = 'PDs' print(line_templ % (info)) size_total = 0 all_lds = [] adapter_id = 0 while adapter_id < count_adapters: lds = self.handler.get_all_lds(adapter_id) if self.verbose > 3: if lds: ldlist = [] for ld in lds: ldlist.append(ld.as_dict(True)) LOG.debug(_("Got logical drives:") + "\n%s", pp(ldlist)) if lds: for ld in lds: all_lds.append(ld) adapter_id += 1 for ld in sorted(all_lds): if ld.cached: ld = self.handler.get_ld_info(ld.adapter, ld.number, ld) info = {} info['adp'] = ld.adapter info['id'] = ld.number info['lvl'] = ld.raid_level info['size'] = ld.size info['cache'] = "no cache" if ld.cached: if ld.cache_rw: info['cache'] = 'cache r/w' else: info['cache'] = 'cache ro' elif ld.is_cachecade_drive: info['cache'] = "CacheCade drive" pds = [] info['pds'] = '' for pd in ld.pds: pds.append("%d:%d" % (pd.enclosure, pd.slot)) if pds: if self.parsable: info['pds'] = ','.join(pds) else: info['pds'] = ', '.join(pds) print(line_templ % (info)) adapter_id += 1 size_mb = int(size_total / 1024 / 1024) size_gb = float(size_total) / 1024.0 / 1024.0 / 1024.0 if not self.parsable and size_total: print("\n%-13s %11d %10.f" % (_('Total:'), size_mb, size_gb))
def init_from_lines(self, lines, no_override=False): """ Init of all properties from output lines from 'MegaCLI -LdPdInfo' or 'MegaCLI -LdInfo'. @param lines: the output lines from MegaCLI to inspect @type lines: str or list of str @param no_override: don't reset all properties before inspecting @type no_override: bool """ # The Output looks like: # # Name : # RAID Level : Primary-1, Secondary-0, RAID Level Qualifier-0 # Size : 55.375 GB # Sector Size : 512 # Is VD emulated : No # Mirror Data : 55.375 GB # State : Optimal # Strip Size : 256 KB # Number Of Drives : 2 # Span Depth : 1 # Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU # Current Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU # Default Access Policy: Read/Write # Current Access Policy: Read/Write # Disk Cache Policy : Enabled # Encryption Type : None # PI type: No PI # # Is VD Cached: No # Number of Spans: 1 # Span: 0 - Number of PDs: 2 # # PD: 0 Information # Enclosure Device ID: 9 # Slot Number: 0 # Drive's position: DiskGroup: 0, Span: 0, Arm: 0 # Enclosure position: 1 # Device Id: 32 # WWN: 50015178f36b244d # Sequence Number: 2 # Media Error Count: 0 # Other Error Count: 0 # Predictive Failure Count: 0 # Last Predictive Failure Event Seq Number: 0 # PD Type: SATA # # Raw Size: 55.899 GB [0x6fccf30 Sectors] # Non Coerced Size: 55.399 GB [0x6eccf30 Sectors] # Coerced Size: 55.375 GB [0x6ec0000 Sectors] # Sector Size: 512 # Logical Sector Size: 512 # Physical Sector Size: 512 # Firmware state: Online, Spun Up # Commissioned Spare : No # Emergency Spare : No # Device Firmware Level: 400i # Shield Counter: 0 # Successful diagnostics completion on : N/A # SAS Address(0): 0x500304800058338c # Connected Port Number: 0(path0) # Inquiry Data: CVCV3053035K060AGN INTEL SSDSC2CW060A3 400i # FDE Capable: Not Capable # FDE Enable: Disable # Secured: Unsecured # Locked: Unlocked # Needs EKM Attention: No # Foreign State: None # Device Speed: 6.0Gb/s # Link Speed: 6.0Gb/s # Media Type: Solid State Device # Drive: Not Certified # Drive Temperature : N/A # PI Eligibility: No # Drive is formatted for PI information: No # PI: No PI # Port-0 : # Port status: Active # Port's Linkspeed: 6.0Gb/s # Drive has flagged a S.M.A.R.T alert : No # # # # # PD: 1 Information # Enclosure Device ID: 9 # Slot Number: 1 # Drive's position: DiskGroup: 0, Span: 0, Arm: 1 # Enclosure position: 1 # Device Id: 33 # WWN: 50015178f36b1102 # Sequence Number: 2 # Media Error Count: 0 # Other Error Count: 0 # Predictive Failure Count: 0 # Last Predictive Failure Event Seq Number: 0 # PD Type: SATA # # Raw Size: 55.899 GB [0x6fccf30 Sectors] # Non Coerced Size: 55.399 GB [0x6eccf30 Sectors] # Coerced Size: 55.375 GB [0x6ec0000 Sectors] # Sector Size: 512 # Logical Sector Size: 512 # Physical Sector Size: 512 # Firmware state: Online, Spun Up # Commissioned Spare : No # Emergency Spare : No # Device Firmware Level: 400i # Shield Counter: 0 # Successful diagnostics completion on : N/A # SAS Address(0): 0x500304800058338d # Connected Port Number: 0(path0) # Inquiry Data: CVCV30530085060AGN INTEL SSDSC2CW060A3 400i # FDE Capable: Not Capable # FDE Enable: Disable # Secured: Unsecured # Locked: Unlocked # Needs EKM Attention: No # Foreign State: None # Device Speed: 6.0Gb/s # Link Speed: 6.0Gb/s # Media Type: Solid State Device # Drive: Not Certified # Drive Temperature : N/A # PI Eligibility: No # Drive is formatted for PI information: No # PI: No PI # Port-0 : # Port status: Active # Port's Linkspeed: 6.0Gb/s # Drive has flagged a S.M.A.R.T alert : No # # # # self.initialized = False if self.verbose > 3: log.debug(_("Analyzing lines:") + "\n%s", pp(lines)) if not no_override: self._name = None self._raid_level_primary = None self._raid_level_secondary = None self._raid_level_qualifier = None self._size = None self._state = None self._cached = False self._cache_rw = None pd_lines = [] pd_nr = None pd_enc = None pd_slot = None pd = None for line in lines: # Checking for the Drive type match = re_drive_type.search(line) if match: self._drive_type = match.group(1) continue # Checking for RAID level match = re_raid_level.search(line) if match: self._raid_level_primary = int(match.group(1)) self._raid_level_secondary = int(match.group(2)) if match.group(3) is not None: self._raid_level_qualifier = int(match.group(3)) if self.verbose > 2: log.debug( _("RAID level of LD No %(nr)d: %(l)s.") % { 'nr': self.number, 'l': self.raid_level}) continue # Checking for the name of the LD match = re_name.search(line) if match: self._name = match.group(1) if self.verbose > 2: log.debug(_("Got %(n)r as name of LD %(nr)d.") % { 'n': self.name, 'nr': self.number}) continue # Checking for the text size match = re_size.search(line) if match: self._size = match.group(1) if self.verbose > 2: log.debug(_("Got %(s)r as size of LD %(n)d.") % { 's': self.size, 'n': self.number}) continue # Checking for the state match = re_state.search(line) if match: self._state = match.group(1) if self.verbose > 3: log.debug(_("Got %(s)r as state of LD %(n)d.") % { 's': self.state, 'n': self.number}) continue # Check for cache state match = re_cached.search(line) if match: cached = match.group(1) if re_yes.search(cached): self._cached = True else: self._cached = False continue # Check, whether the cache is r/w match = re_cache_rw.search(line) if match: ctype = match.group(1) if re_rw.search(ctype): self._cache_rw = True else: self._cache_rw = False # Check for start of a new PD definition match = re_start_pd.search(line) if match: pd_nr = int(match.group(1)) if pd_lines and pd_enc is not None and pd_slot is not None: if pd: self.pds.append(pd) if self.verbose > 2: log.debug(_( "Init of PD %(pd_nr)d [%(enc)d:%(slot)d] of LD %(nr)d.") % { 'pd_nr': pd_nr, 'enc': pd_enc, 'slot': pd_slot, 'nr': self.number}) pd = MegaraidPd( adapter=self.adapter, enclosure=pd_enc, slot=pd_slot, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) pd.init_from_lines(pd_lines) if self.verbose > 3: log.debug(_("Got PD:") + "\n%s", pp(pd.as_dict(True))) pd_lines = [] pd_nr = None pd_enc = None pd_slot = None continue # Check for PD Enclosure match = re_pd_enc.search(line) if match: pd_enc = int(match.group(1)) continue # Checkfor PD slot match = re_pd_slot.search(line) if match: pd_slot = int(match.group(1)) continue pd_lines.append(line) if pd: self.pds.append(pd) if pd_lines and pd_enc is not None and pd_slot is not None: if self.verbose > 2: log.debug(_("Init of PD [%(enc)d:%(slot)d] of LD %(nr)d.") % { 'enc': pd_enc, 'slot': pd_slot, 'nr': self.number}) pd = MegaraidPd( adapter=self.adapter, enclosure=pd_enc, slot=pd_slot, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) pd.init_from_lines(pd_lines) if self.verbose > 3: log.debug(_("Got PD:") + "\n%s", pp(pd.as_dict(True))) self.pds.append(pd) self.initialized = True
def init_from_lines(self, lines): """ Init of all properties from output lines from 'MegaCLI -EncInfo'. """ # The Output looks like: # # Number of enclosures on adapter 0 -- 3 # # Enclosure 0: # Device ID : 8 # Number of Slots : 12 # Number of Power Supplies : 0 # Number of Fans : 3 # Number of Temperature Sensors : 1 # Number of Alarms : 0 # Number of SIM Modules : 0 # Number of Physical Drives : 12 # Status : Normal # Position : 1 # Connector Name : Port 4 - 7 # Enclosure type : SES # FRU Part Number : N/A # Enclosure Serial Number : N/A # ESM Serial Number : N/A # Enclosure Zoning Mode : N/A # Partner Device Id : 65535 # # Inquiry data : # Vendor Identification : LSI # Product Identification : SAS2X28 # Product Revision Level : 0e0b # Vendor Specific : x36-55.14.11.0 # # Number of Voltage Sensors :2 # # Voltage Sensor :0 # Voltage Sensor Status :OK # Voltage Value :5010 milli volts # # Voltage Sensor :1 # Voltage Sensor Status :OK # Voltage Value :11810 milli volts # # Number of Power Supplies : 0 # # Number of Fans : 3 # # Fan : 0 # Fan Status : OK # # Fan : 1 # Fan Speed :Medium Speed # Fan Status : OK # # Fan : 2 # Fan Status : OK # # Number of Temperature Sensors : 1 # # Temp Sensor : 0 # Temperature : 28 # Temperature Sensor Status : OK # # Number of Chassis : 1 # # Chassis : 0 # Chassis Status : OK # # Enclosure 1: # Device ID : 9 # Number of Slots : 24 # Number of Power Supplies : 0 # Number of Fans : 5 # Number of Temperature Sensors : 1 # Number of Alarms : 0 # Number of SIM Modules : 0 # Number of Physical Drives : 24 # Status : Normal # Position : 1 # Connector Name : Port 0 - 3 # Enclosure type : SES # FRU Part Number : N/A # Enclosure Serial Number : N/A # ESM Serial Number : N/A # Enclosure Zoning Mode : N/A # Partner Device Id : 65535 # # Inquiry data : # Vendor Identification : LSI # Product Identification : SAS2X36 # Product Revision Level : 0e0b # Vendor Specific : x36-55.14.11.0 # # Number of Voltage Sensors :2 # # Voltage Sensor :0 # Voltage Sensor Status :OK # Voltage Value :4980 milli volts # # Voltage Sensor :1 # Voltage Sensor Status :OK # Voltage Value :11910 milli volts # # Number of Power Supplies : 0 # # Number of Fans : 5 # # Fan : 0 # Fan Status : OK # # Fan : 1 # Fan Status : OK # # Fan : 2 # Fan Speed :Medium Speed # Fan Status : OK # # Fan : 3 # Fan Status : Not Available # # Fan : 4 # Fan Status : Not Available # # Number of Temperature Sensors : 1 # # Temp Sensor : 0 # Temperature : 32 # Temperature Sensor Status : OK # # Number of Chassis : 1 # # Chassis : 0 # Chassis Status : OK # # Enclosure 2: # Device ID : 252 # Number of Slots : 8 # Number of Power Supplies : 0 # Number of Fans : 0 # Number of Temperature Sensors : 0 # Number of Alarms : 0 # Number of SIM Modules : 1 # Number of Physical Drives : 0 # Status : Normal # Position : 1 # Connector Name : Unavailable # Enclosure type : SGPIO # FRU Part Number : N/A # Enclosure Serial Number : N/A # ESM Serial Number : N/A # Enclosure Zoning Mode : N/A # Partner Device Id : Unavailable # # Inquiry data : # Vendor Identification : LSI # Product Identification : SGPIO # Product Revision Level : N/A # Vendor Specific : # # # Exit Code: 0x00 self.initialized = False if self.verbose > 3: log.debug(_("Analyzing lines:") + "\n" + pp(lines)) self._id = None self._nr_slots = None self._nr_power_supplies = None self._nr_fans = None self._nr_temp_sensors = None self._nr_alarms = None self._nr_pds = None self._nr_voltage_sensors = None self._status = None self._connector_name = None self._enc_type = None self._vendor = None self._product_name = None self._product_revision = None self._vendor_specific = None self.voltage_sensors = [] cur_voltage_sensor = None self.fans = [] cur_fan_status = None self.temperature_sensors = [] cur_temperature_sensor = None re_enc_id = re.compile(r'^Device\s+ID\s*:\s*(\d+)', re.IGNORECASE) re_nr_slots = re.compile(r'^Number\s+of\s+Slots\s*:\s*(\d+)', re.IGNORECASE) re_nr_power_supplies = re.compile( r'^Number\s+of\s+Power\s+Supplies\s*:\s*(\d+)', re.IGNORECASE) re_nr_fans = re.compile(r'^Number\s+of\s+Fans\s*:\s*(\d+)', re.IGNORECASE) re_nr_temp_sensors = re.compile( r'^Number\s+of\s+Temperature\s+Sensors\s*:\s*(\d+)', re.IGNORECASE) re_nr_alarms = re.compile(r'^Number\s+of\s+Alarms\s*:\s*(\d+)', re.IGNORECASE) re_nr_pds = re.compile( r'^Number\s+of\s+Physical\s+Drives\s*:\s*(\d+)', re.IGNORECASE) re_nr_voltage_sensors = re.compile( r'^Number\s+of\s+Voltage\s+Sensors\s*:\s*(\d+)', re.IGNORECASE) re_status = re.compile(r'^Status\s*:\s*(\S+.*)', re.IGNORECASE) re_connector_name = re.compile( r'^Connector\s+Name\s*:\s*(\S+.*)', re.IGNORECASE) re_enc_type = re.compile( r'^Enclosure\s+type\s*:\s*(\S+.*)', re.IGNORECASE) re_vendor = re.compile( r'^Vendor\s+Identification\s*:\s*(\S+.*)', re.IGNORECASE) re_product_name = re.compile( r'^Product\s+Identification\s*:\s*(\S+.*)', re.IGNORECASE) re_product_revision = re.compile( r'^Product\s+Revision\s+Level\s*:\s*(\S+.*)', re.IGNORECASE) re_vendor_specific = re.compile( r'^Vendor\s+Specific\s*:\s*(\S+.*)', re.IGNORECASE) re_voltage_sensor = re.compile( r'^Voltage\s+Sensor\s*:\s*(\d+)', re.IGNORECASE) re_voltage_sensor_status = re.compile( r'^Voltage\s+Sensor\s+Status\s*:\s*(\S+.*)', re.IGNORECASE) re_voltage_value = re.compile( r'^Voltage\s+Value\s*:\s*(\d+(?:\.\d*)?)\s*(milli)?\s*volt', re.IGNORECASE) re_fan = re.compile(r'^Fan\s*:\s*(\d+)', re.IGNORECASE) re_fan_status = re.compile(r'^Fan\s+Status\s*:\s*(\S+.*)', re.IGNORECASE) re_fan_speed = re.compile(r'^Fan\s+Speed\s*:\s*(\S+.*)', re.IGNORECASE) re_temp_sensor = re.compile(r'^Temp\s+Sensor\s*:\s*(\d+)', re.IGNORECASE) re_temperature = re.compile(r'^Temperature\s*:\s*(\d+)', re.IGNORECASE) re_temp_status = re.compile( r'^Temperature\s+Sensor\s+Status\s*:\s*(\S+.*)', re.IGNORECASE) re_not_avail = re.compile(r'^Not\s+Available', re.IGNORECASE) for line in lines: # Checking for Device Id match = re_enc_id.search(line) if match: self.id = match.group(1) if self.verbose > 2: log.debug(_( "Id of enclosure No %(enc)d: %(id)d.") % { 'enc': self.number, 'id': self.id}) continue # Checking for the number of slots match = re_nr_slots.search(line) if match: self._nr_slots = int(match.group(1)) continue # Checking for the number of power supplies match = re_nr_power_supplies.search(line) if match: self._nr_power_supplies = int(match.group(1)) continue # Checking for the number of fans match = re_nr_fans.search(line) if match: self._nr_fans = int(match.group(1)) continue # Checking for the number of Temperature Sensors match = re_nr_temp_sensors.search(line) if match: self._nr_temp_sensors = int(match.group(1)) continue # Checking for the number of alarms match = re_nr_alarms.search(line) if match: self._nr_alarms = int(match.group(1)) continue # Checking for the number of mounted physical drives match = re_nr_pds.search(line) if match: self._nr_pds = int(match.group(1)) continue # Checking for the number of voltage sensors match = re_nr_voltage_sensors.search(line) if match: self._nr_voltage_sensors = int(match.group(1)) continue # Checking for the state match = re_status.search(line) if match: if match.group(1).lower() != 'n/a': self._status = match.group(1) continue # Checking for the connector name match = re_connector_name.search(line) if match: if match.group(1).lower() != 'n/a': self._connector_name = match.group(1) continue # Checking for the enclosure type match = re_enc_type.search(line) if match: if match.group(1).lower() != 'n/a': self._enc_type = match.group(1) continue # Checking for the vendor name match = re_vendor.search(line) if match: if match.group(1).lower() != 'n/a': self._vendor = match.group(1) continue # Checking for the product name match = re_product_name.search(line) if match: if match.group(1).lower() != 'n/a': self._product_name = match.group(1) continue # Checking for the product revision match = re_product_revision.search(line) if match: if match.group(1).lower() != 'n/a': self._product_revision = match.group(1) continue # Checking for the vendore specific product name match = re_vendor_specific.search(line) if match: if match.group(1).lower() != 'n/a': self._vendor_specific = match.group(1) continue # Check for start of a voltage sensor match = re_voltage_sensor.search(line) if match: if cur_voltage_sensor: self.voltage_sensors.append(cur_voltage_sensor) nr = int(match.group(1)) if self.verbose > 3: log.debug(_("Found voltage sensor %d."), nr) cur_voltage_sensor = VoltageSensor( nr, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) # Check for the voltage sensor status match = re_voltage_sensor_status.search(line) if match: st = match.group(1) if self.verbose > 3: log.debug(_("Found voltage sensor status %r."), st) if cur_voltage_sensor: cur_voltage_sensor.status = st continue # Check for the voltage sensor value match = re_voltage_value.search(line) if match: value = float(match.group(1)) if match.group(2): value /= 1000.0 if self.verbose > 3: log.debug(_("Found voltage sensor value %f."), value) if cur_voltage_sensor: cur_voltage_sensor.voltage = value continue # Check for start of a fan status match = re_fan.search(line) if match: if cur_fan_status: self.fans.append(cur_fan_status) nr = int(match.group(1)) if self.verbose > 3: log.debug(_("Found Fan %d."), nr) cur_fan_status = FanStatus( nr, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) # Check for the fan status match = re_fan_status.search(line) if match: st = match.group(1) if re_not_avail.search(st): continue if self.verbose > 3: log.debug(_("Found fan status %r."), st) if cur_fan_status: cur_fan_status.status = st continue # Check for the fan speed match = re_fan_speed.search(line) if match: value = match.group(1) if self.verbose > 3: log.debug(_("Found fan speed %r."), value) if cur_fan_status: cur_fan_status.speed = value continue # Check for start of a temperature sensor match = re_temp_sensor.search(line) if match: if cur_temperature_sensor: self.temperature_sensors.append(cur_temperature_sensor) nr = int(match.group(1)) if self.verbose > 3: log.debug(_("Found temperature sensor %d."), nr) cur_temperature_sensor = TemperatureSensor( nr, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, use_stderr=self.use_stderr, ) # Check for the temperature sensor status match = re_temp_status.search(line) if match: st = match.group(1) if self.verbose > 3: log.debug(_("Found temperature sensor status %r."), st) if cur_temperature_sensor: cur_temperature_sensor.status = st continue # Check for the temperature value match = re_temperature.search(line) if match: value = int(match.group(1)) if self.verbose > 3: log.debug(_("Found temperature value %d."), value) if cur_temperature_sensor: cur_temperature_sensor.temperature = value continue if cur_voltage_sensor: self.voltage_sensors.append(cur_voltage_sensor) if cur_fan_status: self.fans.append(cur_fan_status) if cur_temperature_sensor: self.temperature_sensors.append(cur_temperature_sensor) self.initialized = True
def _run(self): """The underlaying startpoint of the application.""" if not os.path.isdir(self.local_directory): LOG.error("Local directory %r does not exists.", self.local_directory) sys.exit(5) re_backup_dirs = re.compile(r"^\s*\d{4}[-_]+\d\d[-_]+\d\d[-_]+\d+\s*$") re_whitespace = re.compile(r"\s+") self.login_ftp() self.ftp.cwd(self.ftp_remote_dir) cur_backup_dirs = [] dlist = self.dir_list() for entry in dlist: if self.verbose > 3: LOG.debug("Entry in FTP dir:\n%s", pp(entry.as_dict(short=True))) if re_backup_dirs.search(entry.name): cur_backup_dirs.append(entry.name) else: LOG.debug("FTP-Entry %r is not a valid backup directory.", entry.name) cur_backup_dirs.sort(key=str.lower) if self.verbose > 1: LOG.debug("Found backup directories:\n%s", pp(cur_backup_dirs)) cur_date = datetime.utcnow() backup_dir_tpl = cur_date.strftime("%Y-%m-%d_%%02d") LOG.debug("Backup directory template: %r", backup_dir_tpl) cur_weekday = cur_date.timetuple().tm_wday # Retrieving new backup directory new_backup_dir = None i = 0 found = False while not found: new_backup_dir = backup_dir_tpl % (i) if not new_backup_dir in cur_backup_dirs: found = True i += 1 LOG.info("New backup directory: %r", new_backup_dir) cur_backup_dirs.append(new_backup_dir) type_mapping = {"yearly": [], "monthly": [], "weekly": [], "daily": [], "other": []} if cur_date.month == 1 and cur_date.day == 1: if not new_backup_dir in type_mapping["yearly"]: type_mapping["yearly"].append(new_backup_dir) if cur_date.day == 1: if not new_backup_dir in type_mapping["monthly"]: type_mapping["monthly"].append(new_backup_dir) if cur_weekday == 6: # Sunday if not new_backup_dir in type_mapping["weekly"]: type_mapping["weekly"].append(new_backup_dir) if not new_backup_dir in type_mapping["daily"]: type_mapping["daily"].append(new_backup_dir) self.map_dirs2types(type_mapping, cur_backup_dirs) for key in type_mapping: type_mapping[key].sort(key=str.lower) if self.verbose > 2: LOG.debug("Mapping of found directories to backup types:\n%s", pp(type_mapping)) for key in self.copies: max_copies = self.copies[key] cur_copies = len(type_mapping[key]) while cur_copies > max_copies: type_mapping[key].pop(0) cur_copies = len(type_mapping[key]) if self.verbose > 2: LOG.debug("Directories to keep:\n%s", pp(type_mapping)) dirs_delete = [] for backup_dir in cur_backup_dirs: keep = False for key in type_mapping: if backup_dir in type_mapping[key]: if self.verbose > 2: LOG.debug("Directory %r has to be kept.", backup_dir) keep = True continue if not keep: dirs_delete.append(backup_dir) LOG.debug("Directories to remove:\n%s", pp(dirs_delete)) # Removing recursive unnecessary stuff for item in dirs_delete: self.remove_recursive(item) # Creating date formatted directory LOG.info("Creating directory %r ...", new_backup_dir) if not self.simulate: self.ftp.mkd(new_backup_dir) local_pattern = os.path.join(self.local_directory, "*") try: LOG.debug("Changing into %r ...", new_backup_dir) if not self.simulate: self.ftp.cwd(new_backup_dir) # Backing up stuff LOG.debug("Searching for stuff to backup in %r.", local_pattern) local_files = glob.glob(local_pattern) for local_file in sorted(local_files, key=str.lower): if not os.path.isfile(local_file): if self.verbose > 1: LOG.debug("%r is not a file, don't backup it.", local_file) continue statinfo = os.stat(local_file) size = statinfo.st_size s = "" if size != 1: s = "s" size_human = bytes2human(size, precision=1) remote_file = re_whitespace.sub("_", os.path.basename(local_file)) LOG.info( "Transfering file %r -> %r, size %d Byte%s (%s).", local_file, remote_file, size, s, size_human ) if not self.simulate: cmd = "STOR %s" % (remote_file) with open(local_file, "rb") as f: try_nr = 0 while try_nr < 10: try_nr += 1 if try_nr > 2: LOG.info("Try %d transferring file %r ...", try_nr, local_file) try: self.ftp.storbinary(cmd, f) break except ftplib.error_temp as e: if try_nr >= 10: msg = "Giving up trying to upload %r after %d tries: %s" LOG.error(msg, local_file, try_nr, str(e)) raise self.handle_error(str(e), e.__class__.__name__, False) time.sleep(2) finally: LOG.debug("Changing cwd up.") if not self.simulate: self.ftp.cwd("..") # Detect and display current disk usages total_bytes = 0 if six.PY2: total_bytes = long(0) dlist = self.dir_list() total_s = "Total" max_len = len(total_s) for entry in dlist: if len(entry.name) > max_len: max_len = len(entry.name) max_len += 2 LOG.info("Current disk usages:") for entry in dlist: if entry.name == "." or entry.name == "..": continue entry_size = self.disk_usage(entry) total_bytes += entry_size s = "" if entry_size != 1: s = "s" b_h = bytes2human(entry_size, precision=1) (val, unit) = b_h.split(maxsplit=1) b_h_s = "%6s %s" % (val, unit) LOG.info("%-*r %13d Byte%s (%s)", max_len, entry.name, entry_size, s, b_h_s) s = "" if total_bytes != 1: s = "s" b_h = bytes2human(total_bytes, precision=1) (val, unit) = b_h.split(maxsplit=1) b_h_s = "%6s %s" % (val, unit) LOG.info("%-*s %13d Byte%s (%s)", max_len, total_s + ":", total_bytes, s, b_h_s)