def start_backup_job(self, context): """ At this point, plugin options were passed and checked already. We try to read from filename and setup the list of file to backup in self.files_to_backup """ bareosfd.DebugMessage( context, 100, "Using %s to search for local files\n" % (self.options["filename"]), ) if os.path.exists(self.options["filename"]): try: config_file = open(self.options["filename"], "rb") except: bareosfd.DebugMessage( context, 100, "Could not open file %s\n" % (self.options["filename"]), ) return bRCs["bRC_Error"] else: bareosfd.DebugMessage( context, 100, "File %s does not exist\n" % (self.options["filename"])) return bRCs["bRC_Error"] # Check, if we have allow or deny regular expressions defined if "allow" in self.options: self.allow = re.compile(self.options["allow"]) if "deny" in self.options: self.deny = re.compile(self.options["deny"]) for listItem in config_file.read().splitlines(): if os.path.isfile(listItem) and self.filename_is_allowed( context, listItem, self.allow, self.deny): self.files_to_backup.append(listItem) if os.path.isdir(listItem): for topdir, dirNames, fileNames in os.walk(listItem): for fileName in fileNames: if self.filename_is_allowed( context, os.path.join(topdir, fileName), self.allow, self.deny, ): self.files_to_backup.append( os.path.join(topdir, fileName)) if not self.files_to_backup: bareosfd.JobMessage( context, bJobMessageType["M_ERROR"], "No (allowed) files to backup found\n", ) return bRCs["bRC_Error"] else: return bRCs["bRC_Cancel"]
def create_file(self, restorepkt): """ Creates the file to be restored and directory structure, if needed. Adapt this in your derived class, if you need modifications for virtual files or similar """ bareosfd.DebugMessage( 100, "create_file() entry point in Python called with %s\n" % (restorepkt), ) FNAME = restorepkt.ofname if not FNAME: return bareosfd.bRC_Error dirname = os.path.dirname(FNAME.rstrip("/")) if not os.path.exists(dirname): bareosfd.DebugMessage( 200, "Directory %s does not exist, creating it now\n" % dirname) os.makedirs(dirname) # open creates the file, if not yet existing, we close it again right # aways it will be opened again in plugin_io. if restorepkt.type == bareosfd.FT_REG: open(FNAME, "wb").close() restorepkt.create_status = bareosfd.CF_EXTRACT elif restorepkt.type == bareosfd.FT_LNK: linkNameEnc = restorepkt.olname linkNameClear = linkNameEnc if not os.path.islink(FNAME.rstrip("/")): # if not os.path.exists(linkNameClear): os.symlink(linkNameClear, FNAME.rstrip("/")) restorepkt.create_status = bareosfd.CF_CREATED elif restorepkt.type == bareosfd.FT_LNKSAVED: linkNameEnc = restorepkt.olname linkNameClear = linkNameEnc if not os.path.exists(linkNameClear): os.link(linkNameClear, FNAME.rstrip("/")) restorepkt.create_status = bareosfd.CF_CREATED elif restorepkt.type == bareosfd.FT_DIREND: if not os.path.exists(FNAME): os.makedirs(FNAME) restorepkt.create_status = bareosfd.CF_CREATED elif restorepkt.type == bareosfd.FT_FIFO: if not os.path.exists(FNAME): try: os.mkfifo(FNAME, 0o600) except Exception as e: bareosfd.JobMessage( bareosfd.M_ERROR, 'Could net create fifo %s: "%s"' % (FNAME, e), ) restorepkt.create_status = bareosfd.CF_CREATED else: bareosfd.JobMessage( bareosfd.M_ERROR, "Unknown type %s of file %s" % (restorepkt.type, FNAME), ) return bareosfd.bRC_OK
def start_backup_job(self): """ At this point, plugin options were passed and checked already. We try to read from filename and setup the list of file to backup in self.files_to_backup """ bareosfd.DebugMessage( 100, "Using %s to search for local files\n" % self.options["filename"], ) if os.path.exists(self.options["filename"]): try: config_file = open(self.options["filename"], "r") except: bareosfd.DebugMessage( 100, "Could not open file %s\n" % (self.options["filename"]), ) return bareosfd.bRC_Error else: bareosfd.DebugMessage( 100, "File %s does not exist\n" % (self.options["filename"]) ) return bareosfd.bRC_Error # Check, if we have allow or deny regular expressions defined if "allow" in self.options: self.allow = re.compile(self.options["allow"]) if "deny" in self.options: self.deny = re.compile(self.options["deny"]) for listItem in config_file.read().splitlines(): if os.path.isfile(listItem) and self.filename_is_allowed( listItem, self.allow, self.deny ): self.append_file_to_backup(listItem) if os.path.isdir(listItem): fullDirName = listItem # FD requires / at the end of a directory name if not fullDirName.endswith(tuple("/")): fullDirName += "/" self.append_file_to_backup(fullDirName) for topdir, dirNames, fileNames in os.walk(listItem): for fileName in fileNames: if self.filename_is_allowed( os.path.join(topdir, fileName), self.allow, self.deny, ): self.append_file_to_backup(os.path.join(topdir, fileName)) for dirName in dirNames: fullDirName = os.path.join(topdir, dirName) + "/" self.append_file_to_backup(fullDirName) bareosfd.DebugMessage(150, "Filelist: %s\n" % (self.files_to_backup)) if not self.files_to_backup: bareosfd.JobMessage( bareosfd.M_ERROR, "No (allowed) files to backup found\n", ) return bareosfd.bRC_Error else: return bareosfd.bRC_OK
def start_backup_file(self, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(100, "No files to backup\n") return bRC_Skip file_to_backup = self.files_to_backup.pop() statp = bareosfd.StatPacket() savepkt.statp = statp if file_to_backup.endswith(".sha256sum"): checksum = self.get_sha256sum(os.path.splitext(file_to_backup)[0]) savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(checksum.encode("utf-8")) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".abspath"): savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray( os.path.splitext(file_to_backup)[0].encode("utf-8")) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".longrestoreobject"): teststring_length = int(os.path.splitext(file_to_backup)[0]) savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(b"a" * teststring_length) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 else: savepkt.fname = file_to_backup savepkt.type = FT_REG bareosfd.JobMessage( M_INFO, "Starting backup of {}\n".format(file_to_backup), ) return bRC_OK
def start_backup_file(self, context, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(context, 100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(context, 100, "No files to backup\n") return bRCs["bRC_Skip"] file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(context, 100, "file: " + file_to_backup + "\n") statp = bareosfd.StatPacket() savepkt.statp = statp if file_to_backup.endswith(".sha256sum"): checksum = self.get_sha256sum(context, os.path.splitext(file_to_backup)[0]) savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(checksum) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".abspath"): savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(os.path.splitext(file_to_backup)[0]) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".longrestoreobject"): teststring_length = int(os.path.splitext(file_to_backup)[0]) savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray("a" * teststring_length) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 else: savepkt.fname = file_to_backup savepkt.type = bFileType["FT_REG"] bareosfd.JobMessage( context, bJobMessageType["M_INFO"], "Starting backup of %s\n" % (file_to_backup), ) return bRCs["bRC_OK"]
def end_backup_file(self, context): bareosfd.DebugMessage( context, 100, "BareosFdPluginVMware:end_backup_file() called\n") if self.vadp.disk_devices or self.vadp.files_to_backup: bareosfd.DebugMessage(context, 100, "end_backup_file(): returning bRC_More\n") return bRCs['bRC_More'] bareosfd.DebugMessage(context, 100, "end_backup_file(): returning bRC_OK\n") return bRCs['bRC_OK']
def prepare_vm_restore(self, context): ''' prepare VM restore: - get vm details - ensure vm is powered off - get disk devices ''' if 'uuid' in self.options: vmname = self.options['uuid'] if not self.get_vm_details_by_uuid(context): bareosfd.DebugMessage( context, 100, "Error getting details for VM %s\n" % (vmname)) return bRCs['bRC_Error'] else: vmname = self.options['vmname'] if not self.get_vm_details_dc_folder_vmname(context): bareosfd.DebugMessage( context, 100, "Error getting details for VM %s\n" % (vmname)) return bRCs['bRC_Error'] bareosfd.DebugMessage( context, 100, "Successfully got details for VM %s\n" % (vmname)) vm_power_state = self.vm.summary.runtime.powerState if vm_power_state != 'poweredOff': bareosfd.JobMessage( context, bJobMessageType['M_FATAL'], "Error VM %s must be poweredOff for restore, but is %s\n" % (vmname, vm_power_state)) return bRCs['bRC_Error'] if self.vm.snapshot is not None: bareosfd.JobMessage( context, bJobMessageType['M_FATAL'], "Error VM %s must not have any snapshots before restore\n" % (vmname)) return bRCs['bRC_Error'] bareosfd.DebugMessage(context, 100, "Getting Disk Devices on VM %s\n" % (vmname)) self.get_vm_disk_devices(context) if not self.disk_devices: bareosfd.JobMessage( context, bJobMessageType['M_FATAL'], "Error getting Disk Devices on VM %s\n" % (vmname)) return bRCs['bRC_Error'] # make sure backed up disks match VM disks if not self.check_vm_disks_match(context): return bRCs['bRC_Error'] return bRCs['bRC_OK']
def plugin_io_open(self, IOP): self.FNAME = IOP.fname bareosfd.DebugMessage( 250, "io_open: self.FNAME is set to %s\n" % (self.FNAME) ) if os.path.isdir(self.FNAME): bareosfd.DebugMessage(100, "%s is a directory\n" % (self.FNAME)) self.fileType = "FT_DIR" bareosfd.DebugMessage( 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRC_OK elif os.path.islink(self.FNAME): self.fileType = "FT_LNK" bareosfd.DebugMessage( 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRC_OK elif os.path.exists(self.FNAME) and stat.S_ISFIFO(os.stat(self.FNAME).st_mode): self.fileType = "FT_FIFO" bareosfd.DebugMessage( 100, "Did not open file %s of type %s\n" % (self.FNAME, self.fileType), ) return bRC_OK else: self.fileType = "FT_REG" bareosfd.DebugMessage( 150, "file %s has type %s - trying to open it\n" % (self.FNAME, self.fileType), ) try: if IOP.flags & (os.O_CREAT | os.O_WRONLY): bareosfd.DebugMessage( 100, "Open file %s for writing with %s\n" % (self.FNAME, IOP), ) dirname = os.path.dirname(self.FNAME) if not os.path.exists(dirname): bareosfd.DebugMessage( 100, "Directory %s does not exist, creating it now\n" % (dirname), ) os.makedirs(dirname) self.file = open(self.FNAME, "wb") else: bareosfd.DebugMessage( 100, "Open file %s for reading with %s\n" % (self.FNAME, IOP), ) self.file = open(self.FNAME, "rb") except: IOP.status = -1 return bRC_Error return bRC_OK
def parse_plugin_definition(self, context, plugindef): ''' Parses the plugin argmuents and reads files from file given by argument 'config' ''' super(BareosFdPluginS3, self).parse_plugin_definition( context, plugindef) if ('config' not in self.options): bareosfd.DebugMessage(context, 100, "Option \'config\' not defined.\n") return bRCs['bRC_Error'] if ('bucket' not in self.options): bareosfd.DebugMessage(context, 100, "Option \'bucket\' not defined.\n") return bRCs['bRC_Error'] bareosfd.DebugMessage(context, 100, "Using %s to look up plugin config\n" % (self.options['config'])) if os.path.exists(self.options['config']): try: cfg = Config(self.options['config']) #config_file = open(self.options['config'], 'rb') except: bareosfd.DebugMessage(context, 100, "Could not open file %s\n" % (self.options['config'])) return bRCs['bRC_Error'] else: bareosfd.DebugMessage(context, 100, "File %s does not exist\n" % (self.options['config'])) return bRCs['bRC_Error'] if ('prefix' not in self.options): self.options['prefix'] = None if ('pattern' in self.options): self.pattern = re.compile(self.options['pattern']) else: self.pattern = None self.files_to_backup = [] self.s3 = S3(cfg) self.prefix_list = [ None ] self.file_iterator = {} self.file_iterator['uri_params'] = None if self.pattern: self.make_prefix_list() self.iterate_files() return bRCs['bRC_OK']
def prepare_backup(self, context, options): """ Prepare a LDAP backup """ connect_bRC = self.connect_and_bind(context, options, True) if connect_bRC != bRCs["bRC_OK"]: return connect_bRC bareosfd.DebugMessage( context, 100, "Creating search filter and attribute filter to perform LDAP search\n", ) # Get a subtree searchScope = ldap.SCOPE_SUBTREE # See if there is a specific search filter otherwise use the all object filter. if "search_filter" in options: searchFilter = options["search_filter"] else: searchFilter = r"(objectclass=*)" # Get all user attributes and createTimestamp + modifyTimestamp attributeFilter = ["*", "createTimestamp", "modifyTimestamp"] try: # Asynchronous search method self.msg_id = self.ld.search(options["basedn"], searchScope, searchFilter, attributeFilter) except ldap.LDAPError as e: if type(e.message) == dict and "desc" in e.message: bareosfd.JobMessage( context, bJobMessageType["M_FATAL"], "Failed to execute LDAP search on LDAP uri %s: %s\n" % (options["uri"], e.message["desc"]), ) else: bareosfd.JobMessage( context, bJobMessageType["M_FATAL"], "Failed to execute LDAP search on LDAP uri %s: %s\n" % (options["uri"], e), ) return bRCs["bRC_Error"] bareosfd.DebugMessage(context, 100, "Successfully performed LDAP search\n") return bRCs["bRC_OK"]
def end_restore_job(self, ): ''' The cleanup after a restore job depends on which kind of retore it was. For full container restore we need to unmount the images ''' bareosfd.DebugMessage(100, "ENTERING end_restore_job()\n") if self.restore == 'file': bareosfd.DebugMessage( 100, "LEAVING end_restore_job because self.restore is 'file'\n") return bareosfd.bRC_OK elif self.restore == 'ct': bareosfd.DebugMessage(100, "Calling: self.end_restore_ct_job()") return self.end_restore_ct_job()
def plugin_io(self, context, IOP): ''' Called for io operations. We read from pipe into buffers or on restore create a file for each database and write into it. ''' bareosfd.DebugMessage(context, 100, "plugin_io called with " + str(IOP.func) + "\n") if IOP.func == bIOPS['IO_OPEN']: try: if IOP.flags & (os.O_CREAT | os.O_WRONLY): self.file = open(IOP.fname, 'wb') except Exception as msg: IOP.status = -1 bareosfd.DebugMessage( context, 100, "Error opening file: " + IOP.fname + "\n") return bRCs['bRC_Error'] return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_READ']: IOP.buf = bytearray(IOP.count) IOP.status = self.stream.stdout.readinto(IOP.buf) IOP.io_errno = 0 return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_WRITE']: try: self.file.write(IOP.buf) IOP.status = IOP.count IOP.io_errno = 0 except IOError as msg: IOP.io_errno = -1 bareosfd.DebugMessage(context, 100, "Error writing data: " + msg + "\n") return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_CLOSE']: if self.file: self.file.close() return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_SEEK']: return bRCs['bRC_OK'] else: bareosfd.DebugMessage( context, 100, "plugin_io called with unsupported IOP:" + str(IOP.func) + "\n") return bRCs['bRC_OK']
def plugin_io(self, context, IOP): bareosfd.DebugMessage( context, 100, "plugin_io called with function %s\n" % (IOP.func)) bareosfd.DebugMessage( context, 100, "FNAME is set to %s\n" % (self.FNAME)) if IOP.func == bIOPS['IO_OPEN']: self.FNAME = IOP.fname try: if IOP.flags & (os.O_CREAT | os.O_WRONLY): bareosfd.DebugMessage( context, 100, "Open file %s for writing with %s\n" % (self.FNAME, IOP)) dirname = os.path.dirname(self.FNAME) if not os.path.exists(dirname): bareosfd.DebugMessage( context, 100, "Directory %s does not exist, creating it now\n" % (dirname)) os.makedirs(dirname) self.file = open(self.FNAME, 'wb') else: bareosfd.DebugMessage( context, 100, "Open file %s for reading with %s\n" % (self.FNAME, IOP)) self.file = open(self.FNAME, 'rb') except: IOP.status = -1 return bRCs['bRC_Error'] return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_CLOSE']: bareosfd.DebugMessage(context, 100, "Closing file " + "\n") self.file.close() return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_SEEK']: return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_READ']: bareosfd.DebugMessage( context, 200, "Reading %d from file %s\n" % (IOP.count, self.FNAME)) IOP.buf = bytearray(IOP.count) IOP.status = self.file.readinto(IOP.buf) IOP.io_errno = 0 return bRCs['bRC_OK'] elif IOP.func == bIOPS['IO_WRITE']: bareosfd.DebugMessage( context, 200, "Writing buffer to file %s\n" % (self.FNAME)) self.file.write(IOP.buf) IOP.status = IOP.count IOP.io_errno = 0 return bRCs['bRC_OK']
def end_backup_file(self, context): ''' Check, if dump was successfull. ''' # Usually the pgsqldump process should have terminated here, but on some servers # it has not always. self.stream.wait() returnCode = self.stream.poll() if returnCode == None: bareosfd.JobMessage( context, bJobMessageType['M_ERROR'], "Dump command not finished properly for unknown reason") returnCode = -99 else: bareosfd.DebugMessage( context, 100, "end_backup_file() entry point in Python called. Returncode: %d\n" % self.stream.returncode) if returnCode != 0: (stdOut, stdError) = self.stream.communicate() if stdError == None: stdError = '' bareosfd.JobMessage( context, bJobMessageType['M_ERROR'], "Dump command returned non-zero value: %d, message: %s\n" % (returnCode, stdError)) if self.databases: return bRCs['bRC_More'] else: if returnCode == 0: return bRCs['bRC_OK'] else: return bRCs['bRC_Error']
def plugin_io(self, context, IOP): bareosfd.DebugMessage( context, 100, "BareosFdPluginLDAP:plugin_io() called with function %s\n" % (IOP.func), ) if IOP.func == bIOPS["IO_OPEN"]: return bRCs["bRC_OK"] elif IOP.func == bIOPS["IO_CLOSE"]: return bRCs["bRC_OK"] elif IOP.func == bIOPS["IO_SEEK"]: return bRCs["bRC_OK"] elif IOP.func == bIOPS["IO_READ"]: if self.ldap.ldif: IOP.buf = bytearray(self.ldap.ldif) IOP.status = self.ldap.ldif_len self.ldap.ldif = None else: IOP.status = 0 IOP.io_errno = 0 return bRCs["bRC_OK"] elif IOP.func == bIOPS["IO_WRITE"]: self.ldap.ldif = str(IOP.buf) self.ldap.ldif_len = IOP.count IOP.status = IOP.count IOP.io_errno = 0 return bRCs["bRC_OK"]
def create_file(self, context, restorepkt): """ Directories are placeholders only we use the data.ldif files to get the actual DN of the LDAP record """ bareosfd.DebugMessage( context, 100, "BareosFdPluginVMware:create_file() called with %s\n" % (restorepkt), ) if restorepkt.type == bFileType["FT_DIREND"]: restorepkt.create_status = bCFs["CF_SKIP"] elif restorepkt.type == bFileType["FT_REG"]: self.ldap.set_new_dn(restorepkt.ofname) restorepkt.create_status = bCFs["CF_EXTRACT"] else: bareosfd.JobMessage( context, bJobMessageType["M_FATAL"], "Request to restore illegal filetype %s\n" % (restorepkt.type), ) return bRCs["bRC_Error"] return bRCs["bRC_OK"]
def handle_backup_file(self, savepkt): bareosfd.DebugMessage( 100, "handle_backup_file() entry point in Python called with %s\n" % (savepkt), ) return bRC_OK
def end_backup_job(self, ): ''' Finish the backup: Umount snapshot and merge it ''' bareosfd.DebugMessage( 100, "end_backup_job() entry point in Python called\n") if self.mounted: try: subprocess.check_output([ '/usr/sbin/ploop', 'umount', '-c', self.base_image['fname'], '-m', self.job_mount_point, self.disk_descriptor ], universal_newlines=True) except subprocess.CalledProcessError: bareosfd.JobMessage( bareosfd.M_WARNING, "Cannot unmount base image '{}'\n".format( self.base_image['fname'])) if self.snapshot_created: try: # we delete the base_snapshot which results in merging and delteing the delta file subprocess.check_call([ 'ploop', 'snapshot-delete', '-u', self.snapshot_uuid, self.disk_descriptor ]) except subprocess.CalledProcessError: bareosfd.JobMessage( bareosfd.M_WARNING, "Cannot merge snapshot for CT '{}'\n".format( self.options['name'])) if self.lock: self.lock.release() return bareosfd.bRC_OK
def __init__(self, plugindef): bareosfd.DebugMessage( 100, "Constructor called in module %s with plugindef=%s\n" % (__name__, plugindef)) events = [] events.append(bareosfd.bEventEndRestoreJob) bareosfd.RegisterEvents(events) # Last argument of super constructor is a list of mandatory arguments # using mandatory options in python constructor is not working cause options are merged inside plugin_option_parser # and not on directzor side super(BareosFdPluginVz7CtFs, self).__init__(plugindef) self.files = [] # Filled during start_backup_job self.cnf_default_excludes = [] self.excluded_backup_paths = [] self.prepared = False self.disk_descriptor = "" self.config_path = "" self.fs_path = "" self.mount_basedir = "/mnt/bareos" self.job_mount_point = "" self.base_image = [] self.verbose = False self.snapshot_uuid = "" self.snapshot_created = False self.blocker = "" self.mounted = False # default is the restore of a whole container self.restore = "ct"
def get_cts(self, name=None, uuid=None): ''' Returns a list of hashes containing found container: name,uuid,status One may call it with optional parameter "pattern=name" or pattern="uuid" ''' ct_list = [] if name: pattern = name elif uuid: pattern = uuid else: pattern = "" try: ct_list = subprocess.check_output([ '/usr/bin/prlctl', 'list', '-o', 'name,uuid,status', '--vmtype', 'ct', '--all', pattern ], universal_newlines=True) except subprocess.CalledProcessError: return [] cts = ct_list.split("\n") cts.pop(-1) cts.pop(0) ct_list = [] for record in cts: cname, cuuid, status = record.split() ct_list.append({'uuid': cuuid, 'status': status, 'name': cname}) bareosfd.DebugMessage( 100, "Function get_cts() returns {} \n".format(str(ct_list))) return ct_list
def start_backup_file(self, context, savepkt): """ Base method, we do not add anything, overload this method with your implementation to add files to backup fileset """ bareosfd.DebugMessage(context, 100, "start_backup called\n") return bRCs["bRC_Skip"]
def start_backup_file(self, savepkt): """ Defines the file to backup and creates the savepkt. """ bareosfd.DebugMessage(100, "BareosFdPluginLDAP:start_backup_file() called\n") return self.ldap.get_next_file_to_backup(savepkt)
def get_vm_disk_cbt(self, context): ''' Get CBT Information ''' cbt_changeId = '*' if self.disk_device_to_backup[ 'fileNameRoot'] in self.restore_objects_by_diskpath: if len(self.restore_objects_by_diskpath[ self.disk_device_to_backup['fileNameRoot']]) > 1: bareosfd.JobMessage( context, bJobMessageType['M_FATAL'], "ERROR: more then one CBT info for Diff/Inc exists\n") return False cbt_changeId = self.restore_objects_by_diskpath[ self.disk_device_to_backup['fileNameRoot']][0]['data'][ 'DiskParams']['changeId'] bareosfd.DebugMessage( context, 100, "get_vm_disk_cbt(): using changeId %s from restore object\n" % (cbt_changeId)) self.changed_disk_areas = self.vm.QueryChangedDiskAreas( snapshot=self.create_snap_result, deviceKey=self.disk_device_to_backup['deviceKey'], startOffset=0, changeId=cbt_changeId) self.cbt2json(context) return True
def checkForWalFiles(self): """ Look for new WAL files and backup Backup start time is timezone aware, we need to add timezone to files' mtime to make them comparable """ # We have to add local timezone to the file's timestamp in order # to compare them with the backup starttime, which has a timezone walArchive = self.options["walArchive"] self.files_to_backup.append(walArchive) for fileName in os.listdir(walArchive): fullPath = os.path.join(walArchive, fileName) try: st = os.stat(fullPath) except Exception as e: bareosfd.JobMessage( bareosfd.M_ERROR, "Could net get stat-info for file %s: %s\n" % (fullPath, e), ) continue fileMtime = datetime.datetime.fromtimestamp(st.st_mtime) if (fileMtime.replace( tzinfo=dateutil.tz.tzoffset(None, self.tzOffset)) > self.backupStartTime): bareosfd.DebugMessage( 150, "Adding WAL file %s for backup\n" % fileName) self.files_to_backup.append(fullPath) if self.files_to_backup: return bareosfd.bRC_More else: return bareosfd.bRC_OK
def start_restore_file(self, cmd): bareosfd.DebugMessage( 100, "start_restore_file() entry point in Python called with %s\n" % (cmd), ) return bRC_OK
def __init__(self, plugindef, mandatory_options=None): bareosfd.DebugMessage( 100, "Constructor called in module %s with plugindef=%s\n" % (__name__, plugindef), ) super(BareosFdPluginLocalFileset, self).__init__(plugindef, mandatory_options)
def check_file(self, context, fname): bareosfd.DebugMessage( context, 100, "check_file() entry point in Python called with %s\n" % (fname), ) return bRCs["bRC_OK"]
def set_file_attributes(self, restorepkt): bareosfd.DebugMessage( 100, "set_file_attributes() entry point in Python called with %s\n" % (str(restorepkt)), ) return bRC_OK
def filename_is_allowed(self, context, filename, allowregex, denyregex): """ Check, if filename is allowed. True, if matches allowreg and not denyregex. If allowreg is None, filename always matches If denyreg is None, it never matches """ if allowregex is None or allowregex.search(filename): allowed = True else: allowed = False if denyregex is None or not denyregex.search(filename): denied = False else: denied = True if not allowed or denied: bareosfd.DebugMessage( context, 100, "File %s denied by configuration\n" % (filename) ) bareosfd.JobMessage( context, bJobMessageType["M_ERROR"], "File %s denied by configuration\n" % (filename), ) return False else: return True
def set_file_attributes(self, context, restorepkt): # Python attribute setting does not work properly with links if restorepkt.type == bFileType["FT_LNK"]: return bRCs["bRC_OK"] file_name = restorepkt.ofname file_attr = restorepkt.statp bareosfd.DebugMessage( context, 150, "Set file attributes " + file_name + " with stat " + str(file_attr) + "\n", ) try: os.chown(file_name, file_attr.uid, file_attr.gid) os.chmod(file_name, file_attr.mode) os.utime(file_name, (file_attr.atime, file_attr.mtime)) except Exception as e: bareosfd.JobMessage( context, bJobMessageType["M_WARNING"], "Could net set attributes for file %s: \"%s\"" % (file_to_backup, e.message), ) return bRCs["bRC_OK"]