def test_StatPacket(self): timestamp = time.time() test_StatPacket = bareosfd.StatPacket() # check that the initialization of timestamps from current time stamp works self.assertAlmostEqual(test_StatPacket.st_atime, timestamp, delta=1) self.assertAlmostEqual(test_StatPacket.st_mtime, timestamp, delta=1) self.assertAlmostEqual(test_StatPacket.st_ctime, timestamp, delta=1) # set fixed values for comparison test_StatPacket.st_atime = 999 test_StatPacket.st_mtime = 1000 test_StatPacket.st_ctime = 1001 self.assertEqual( "StatPacket(dev=0, ino=0, mode=0700, nlink=0, uid=0, gid=0, rdev=0, size=-1, atime=999, mtime=1000, ctime=1001, blksize=4096, blocks=1)", str(test_StatPacket), ) sp2 = bareosfd.StatPacket(dev=0, ino=0, mode=0o0700, nlink=0, uid=0, gid=0, rdev=0, size=-1, atime=1, mtime=1, ctime=1, blksize=4096, blocks=1) self.assertEqual( 'StatPacket(dev=0, ino=0, mode=0700, nlink=0, uid=0, gid=0, rdev=0, size=-1, atime=1, mtime=1, ctime=1, blksize=4096, blocks=1)', str(sp2))
def start_backup_file(self,context, savepkt): ''' This method is called, when Bareos is ready to start backup a file For each database to backup we create a mysqldump subprocess, wrting to the pipe self.stream.stdout ''' bareosfd.DebugMessage(context, 100, "start_backup called\n"); if not self.bucket_key_tuple: baoreosfd.DebugMessage(context,100,"No buckets to backup") bareosfd.JobMessage(context, bJobMessageType['M_ERROR'], "No buckets to backup.\n"); return bRCs['bRC_Skip'] bucketname, keyname = self.bucket_key_tuple.pop() keysize = get_key_size(self.conn, bucketname, keyname) keymtime = get_key_mtime(self.conn, bucketname, keyname) statp = bareosfd.StatPacket() if not keysize == "NULL\n": try: statp.size = int(keysize) except ValueError: pass statp.atime = 0 statp.ctime = 0 statp.mtime = keymtime savepkt.statp = statp savepkt.fname = "/{bname}/{kname}".format(bname=bucketname, kname = keyname) savepkt.type = bFileType['FT_REG'] bareosfd.DebugMessage(context, 100, "Attempting to download key: '" + bucketname + "/" + keyname + "\n") self.stream = chunky_download(self.conn, bucketname, keyname) bareosfd.JobMessage(context, bJobMessageType['M_INFO'], "Starting backup of " + savepkt.fname + "\n"); return bRCs['bRC_OK'];
def start_backup_file(self, context, savepkt): ''' Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed ''' bareosfd.DebugMessage(context, 100, "start_backup called\n") if not self.files_to_backup: bareosfd.DebugMessage(context, 100, "No files to backup\n") return bRCs['bRC_Skip'] file_to_backup = self.files_to_backup.pop(0) bareosfd.DebugMessage(context, 100, 'file: ' + file_to_backup['name'] + "\n") statp = bareosfd.StatPacket() statp.mtime = file_to_backup['timestamp'] statp.ctime = file_to_backup['timestamp'] statp.size = file_to_backup['size'] #savepkt.save_time = int(time.time()) savepkt.statp = statp savepkt.fname = str(file_to_backup['name']) savepkt.type = bFileType['FT_REG'] #bareosfd.JobMessage(context, bJobMessageType['M_INFO'], # "Starting backup of %s\n" # % (file_to_backup['name'])) return bRCs['bRC_OK']
def start_backup_file(self, context, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(context, 100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(context, 100, "No files to backup\n") return bRCs["bRC_Skip"] file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(context, 100, "file: " + file_to_backup + "\n") statp = bareosfd.StatPacket() savepkt.statp = statp if file_to_backup.endswith(".sha256sum"): checksum = self.get_sha256sum(context, os.path.splitext(file_to_backup)[0]) savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(checksum) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".abspath"): savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(os.path.splitext(file_to_backup)[0]) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".longrestoreobject"): teststring_length = int(os.path.splitext(file_to_backup)[0]) savepkt.type = bFileType["FT_RESTORE_FIRST"] savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray("a" * teststring_length) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 else: savepkt.fname = file_to_backup savepkt.type = bFileType["FT_REG"] bareosfd.JobMessage( context, bJobMessageType["M_INFO"], "Starting backup of %s\n" % (file_to_backup), ) return bRCs["bRC_OK"]
def start_backup_file(self, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(100, "No files to backup\n") return bRC_Skip file_to_backup = self.files_to_backup.pop() statp = bareosfd.StatPacket() savepkt.statp = statp if file_to_backup.endswith(".sha256sum"): checksum = self.get_sha256sum(os.path.splitext(file_to_backup)[0]) savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(checksum.encode("utf-8")) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".abspath"): savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray( os.path.splitext(file_to_backup)[0].encode("utf-8")) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 elif file_to_backup.endswith(".longrestoreobject"): teststring_length = int(os.path.splitext(file_to_backup)[0]) savepkt.type = FT_RESTORE_FIRST savepkt.fname = file_to_backup savepkt.object_name = file_to_backup savepkt.object = bytearray(b"a" * teststring_length) savepkt.object_len = len(savepkt.object) savepkt.object_index = self.object_index_seq self.object_index_seq += 1 else: savepkt.fname = file_to_backup savepkt.type = FT_REG bareosfd.JobMessage( M_INFO, "Starting backup of {}\n".format(file_to_backup), ) return bRC_OK
def start_backup_file(self, context, savepkt): try: while True: try: self.job = self.plugin_todo_queue.get_nowait() break except: size = self.plugin_todo_queue.qsize() log('start_backup_file: queue is near empty : %s' % (size, )) time.sleep(0.1) except TypeError: self.job = None if self.job is None: log('End of queue found, backup is completed') for i in self.prefetchers: log('join() for a prefetcher (pid %s)' % (i.pid, )) i.join() log('Ok, all prefetchers are dead') try: self.manager.shutdown() except OSError: # manager already dead, somehow ?! pass log('self.manager.shutdown()') log('Join() for the writer (pid %s)' % (self.writer.pid, )) self.writer.join() log('writer is dead') # savepkt is always checked, so we fill it with a dummy value savepkt.fname = 'empty' return bRCs['bRC_Skip'] filename = '%s/%s' % (self.job['bucket'], self.job['name']) log('Backuping %s' % (filename, )) statp = bareosfd.StatPacket() statp.size = self.job['size'] statp.mtime = self.job['mtime'] statp.atime = 0 statp.ctime = 0 savepkt.statp = statp savepkt.fname = filename savepkt.type = bareos_fd_consts.bFileType['FT_REG'] return bRCs['bRC_OK']
def start_backup_file(self, savepkt): ''' Defines the file to backup and creates the savepkt. ''' if not self.files: bareosfd.DebugMessage(100, "No files to backup\n") return bareosfd.bRCs['bareosfd.bRC_Skip'] # reading file list from beginning to ensure dirs are created before files path_to_backup = self.files.pop(0) possible_link_to_dir = path_to_backup.rstrip('/') try: osstat = os.lstat(path_to_backup) except OSError: bareosfd.JobMessage( bareosfd.M_ERROR, "Cannot backup file '{}'\n".format(path_to_backup)) return bareosfd.bRCs['bareosfd.bRC_Skip'] if os.path.islink(possible_link_to_dir): savepkt.type = bareosfd.FT_LNK savepkt.link = os.readlink(possible_link_to_dir) savepkt.no_read = True savepkt.fname = possible_link_to_dir elif os.path.isdir(path_to_backup): # do not try to read a directory as a file savepkt.type = bareosfd.FT_DIREND savepkt.no_read = True savepkt.link = os.path.join(path_to_backup, "") savepkt.fname = path_to_backup elif stat.S_ISREG(osstat.st_mode): savepkt.type = bareosfd.FT_REG savepkt.fname = path_to_backup else: savepkt.type = bareosfd.FT_DELETED savepkt.no_read = True savepkt.fname = path_to_backup statpacket = bareosfd.StatPacket() statpacket.st_mode = osstat.st_mode statpacket.st_uid = osstat.st_uid statpacket.st_gid = osstat.st_gid statpacket.st_atime = osstat.st_atime statpacket.st_mtime = osstat.st_mtime statpacket.st_ctime = osstat.st_ctime savepkt.statp = statpacket return bareosfd.bRC_OK
def start_backup_file(self, context, savepkt): ''' This method is called, when Bareos is ready to start backup a file For each database to backup we create a pgsqldump subprocess, wrting to the pipe self.stream.stdout ''' bareosfd.DebugMessage(context, 100, "start_backup called\n") if not self.databases: bareosfd.DebugMessage(context, 100, "No databases to backup") bareosfd.JobMessage(context, bJobMessageType['M_ERROR'], "No databases to backup.\n") return bRCs['bRC_Skip'] db = self.databases.pop() sizeDbCommand = 'sudo -u postgres /usr/bin/psql -At -c "select pg_database_size(\'{database_name}\')"'.format( database_name=db) sizeDb = Popen(sizeDbCommand, shell=True, stdout=PIPE, stderr=PIPE) size_curr_db = sizeDb.stdout.read() sizeDb.wait() sizereturnCode = sizeDb.poll() statp = bareosfd.StatPacket() if not size_curr_db == "NULL\n": try: statp.size = int(size_curr_db) except ValueError: pass savepkt.statp = statp savepkt.fname = "/_pgsqlbackups_/" + db + ".sql" savepkt.type = bFileType['FT_REG'] dumpcommand = ( "sudo -u postgres {dumpcmd} {connectopts} {dumpopts} {database}". format(dumpcmd=self.dumpbinary, connectopts=self.pgsqlconnect, database=db, dumpopts=self.dumpoptions)) bareosfd.DebugMessage(context, 100, "Dumper: '" + dumpcommand + "'\n") self.stream = Popen(dumpcommand, shell=True, stdout=PIPE, stderr=PIPE) bareosfd.JobMessage(context, bJobMessageType['M_INFO'], "Starting backup of " + savepkt.fname + "\n") return bRCs['bRC_OK']
def start_backup_file(self, context, savepkt): ''' Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed ''' bareosfd.DebugMessage(context, 100, "start_backup called\n") if not self.files_to_backup: bareosfd.DebugMessage(context, 100, "No files to backup\n") return bRCs['bRC_Skip'] file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(context, 100, "file: " + file_to_backup + "\n") statp = bareosfd.StatPacket() savepkt.statp = statp savepkt.fname = file_to_backup savepkt.type = bFileType['FT_REG'] bareosfd.JobMessage(context, bJobMessageType['M_INFO'], "Starting backup of %s\n" % (file_to_backup)) return bRCs['bRC_OK']
def start_backup_file(self, savepkt): """ For normal files we call the super method Special objects are treated here """ if not self.files_to_backup: bareosfd.DebugMessage(100, "No files to backup\n") return bareosfd.bRC_Skip # Plain files are handled by super class if self.files_to_backup[-1] not in ["ROP"]: return super(BareosFdPluginPostgres, self).start_backup_file(savepkt) # Here we create the restore object self.file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(100, "file: " + self.file_to_backup + "\n") savepkt.statp = bareosfd.StatPacket() if self.file_to_backup == "ROP": self.rop_data["lastBackupStopTime"] = self.lastBackupStopTime self.rop_data["lastLSN"] = self.lastLSN savepkt.fname = "/_bareos_postgres_plugin/metadata" savepkt.type = bareosfd.FT_RESTORE_FIRST savepkt.object_name = savepkt.fname bareosfd.DebugMessage(150, "fname: " + savepkt.fname + "\n") bareosfd.DebugMessage(150, "rop " + str(self.rop_data) + "\n") savepkt.object = bytearray(json.dumps(self.rop_data), "utf-8") savepkt.object_len = len(savepkt.object) savepkt.object_index = int(time.time()) else: # should not happen bareosfd.JobMessage( bareosfd.M_FATAL, "Unknown error. Don't know how to handle %s\n" % self.file_to_backup, ) return bareosfd.bRC_OK
def get_next_file_to_backup(self, savepkt): """ Find out the next file that should be backed up """ # When file_to_backup is not None we should return the LDIF. if self.file_to_backup: # Remove some attributes from entry before creating the LDIF. ignore_attribute = ["createTimestamp", "modifyTimestamp"] keys = self.entry.keys() for value in keys: if value in ignore_attribute: del self.entry[value] # Dump the content of the LDAP entry as LDIF text ldif_dump = StringIO() ldif_out = ldif.LDIFWriter(ldif_dump) try: ldif_out.unparse(self.dn, self.entry) except UnicodeDecodeError: ldif_out.unparse(self.dn.decode("utf-8"), self.entry) self.ldif = ldif_dump.getvalue() self.ldif_len = len(self.ldif) ldif_dump.close() statp = bareosfd.StatPacket() statp.st_mode = S_IRWXU | S_IFREG statp.st_size = self.ldif_len if self.unix_create_time: statp.st_ctime = self.unix_create_time if self.unix_modify_time: statp.st_mtime = self.unix_modify_time savepkt.statp = statp savepkt.type = bareosfd.bFileType["FT_REG"] savepkt.fname = self.file_to_backup + "/data.ldif" # Read the content of a file savepkt.no_read = False # On next run we need to get next entry from result set. self.file_to_backup = None else: # If we have no result set get what the LDAP search returned as resultset. if self.resultset is None: self.resultset = self.ld.allresults(self.msg_id) # Try to get the first result set from the query, # if there is nothing return an error. try: res_type, res_data, res_msgid, res_controls = self.resultset.next( ) self.ldap_entries = res_data except ldap.NO_SUCH_OBJECT: return bareosfd.bRC_Error except StopIteration: return bareosfd.bRC_Error # Get the next entry from the result set. if self.ldap_entries: self.dn, self.entry = self.ldap_entries.pop(0) if self.dn: # Extract the createTimestamp and modifyTimestamp and # convert it to an UNIX timestamp self.unix_create_time = None try: createTimestamp = self.entry["createTimestamp"][0] except KeyError: pass else: self.unix_create_time = self.to_unix_timestamp( createTimestamp) self.unix_modify_time = None try: modifyTimestamp = self.entry["modifyTimestamp"][0] except KeyError: pass else: self.unix_modify_time = self.to_unix_timestamp( modifyTimestamp) # Convert the DN into a PATH e.g. reverse the elements. dn_sliced = self.dn.split(",") self.file_to_backup = "@LDAP" + "".join( ["/" + element for element in reversed(dn_sliced)]) statp = bareosfd.StatPacket() statp.st_mode = S_IRWXU | S_IFDIR if self.unix_create_time: statp.st_ctime = self.unix_create_time if self.unix_modify_time: statp.st_mtime = self.unix_modify_time savepkt.statp = statp savepkt.type = bareosfd.bFileType["FT_DIREND"] savepkt.fname = self.file_to_backup # A directory has a link field which contains # the fname + a trailing '/' savepkt.link = self.file_to_backup + "/" # Don't read the content of a directory savepkt.no_read = True if "/" in self.dn: bareosfd.JobMessage( bareosfd.bJobMessageType["M_ERROR"], "Slashes (/) in DN not supported. Skipping %s" % self.dn, ) # set to none, so the object will not be picket up self.file_to_backup = None return bareosfd.bRC_OK
def get_next_file_to_backup(self, context, savepkt): ''' Find out the next file that should be backuped ''' # When file_to_backup is not None we should return the LDIF. if self.file_to_backup: # Remove some attributes from entry before creating the LDIF. ignore_attribute = [ 'createTimestamp', 'modifyTimestamp', ] keys = self.entry.keys() for value in keys: if value in ignore_attribute: del self.entry[value] # Dump the content of the LDAP entry as LDIF text ldif_dump = StringIO() ldif_out = ldif.LDIFWriter(ldif_dump) ldif_out.unparse(self.dn, self.entry) self.ldif = ldif_dump.getvalue() self.ldif_len = len(self.ldif) ldif_dump.close() statp = bareosfd.StatPacket() statp.mode = S_IRWXU | S_IFREG statp.size = self.ldif_len if self.unix_create_time: statp.ctime = self.unix_create_time if self.unix_modify_time: statp.mtime = self.unix_modify_time savepkt.statp = statp savepkt.type = bFileType['FT_REG'] savepkt.fname = self.file_to_backup + "/data.ldif" # Read the content of a file savepkt.no_read = False # On next run we need to get next entry from result set. self.file_to_backup = None else: # If we have no result set get what the LDAP search returned as resultset. if self.resultset is None: self.resultset = self.ld.allresults(self.msg_id) # Try to get the first result set from the query, # if there is nothing return an error. try: res_type, res_data, res_msgid, res_controls = self.resultset.next( ) self.ldap_entries = res_data except ldap.NO_SUCH_OBJECT: return bRCs['bRC_Error'] except StopIteration: return bRCs['bRC_Error'] # Get the next entry from the result set. if self.ldap_entries: self.dn, self.entry = self.ldap_entries.pop(0) if self.dn: # Extract the createTimestamp and modifyTimestamp and # convert it to an UNIX timestamp self.unix_create_time = None try: createTimestamp = self.entry['createTimestamp'][0] except KeyError: pass else: self.unix_create_time = self.to_unix_timestamp( context, createTimestamp) self.unix_modify_time = None try: modifyTimestamp = self.entry['modifyTimestamp'][0] except KeyError: pass else: self.unix_modify_time = self.to_unix_timestamp( context, modifyTimestamp) # Convert the DN into a PATH e.g. reverse the elements. dn_sliced = self.dn.split(',') self.file_to_backup = '@LDAP' + ''.join( ['/' + element for element in reversed(dn_sliced)]) statp = bareosfd.StatPacket() statp.mode = S_IRWXU | S_IFDIR if self.unix_create_time: statp.ctime = self.unix_create_time if self.unix_modify_time: statp.mtime = self.unix_modify_time savepkt.statp = statp savepkt.type = bFileType['FT_DIREND'] savepkt.fname = self.file_to_backup # A directory has a link field which contains the fname + a trailing '/' savepkt.link = self.file_to_backup + '/' # Don't read the content of a directory savepkt.no_read = True return bRCs['bRC_OK']
def start_backup_file(self, savepkt): error = False while self.active: worker_result = self.api.check_worker_messages() if worker_result == ERROR: if self.options["treat_download_errors_as_warnings"]: pass else: self.active = False error = True elif worker_result == ABORT: self.active = False error = True else: self.current_backup_task = self.api.get_next_task() if self.current_backup_task != None: break elif self.api.worker_ready(): self.active = False else: sleep(0.01) if not self.active: self.__shutdown() savepkt.fname = "" # dummy value if error: jobmessage(M_FATAL, "Shutdown after worker error") return bRC_Cancel else: return bRC_Skip filename = FilenameConverter.BucketToBackup( "%s/%s" % (self.current_backup_task["bucket"], self.current_backup_task["name"],) ) debugmessage(100, "Backup file: %s" % (filename,)) statp = bareosfd.StatPacket() # statp.size = self.current_backup_task["size"] # statp.mtime = self.current_backup_task["mtime"] # statp.atime = 0 # statp.ctime = 0 savepkt.statp = statp savepkt.fname = StringCodec.encode_for_backup(filename) savepkt.type = FT_REG if self.current_backup_task["type"] == TASK_TYPE.DOWNLOADED: self.FILE = self.current_backup_task["data"] elif self.current_backup_task["type"] == TASK_TYPE.TEMP_FILE: try: self.FILE = io.open(self.current_backup_task["tmpfile"], "rb") except Exception as e: jobmessage(M_FATAL, "Could not open temporary file for reading.") self.__shutdown() return bRC_Error elif self.current_backup_task["type"] == TASK_TYPE.STREAM: try: self.FILE = IterStringIO(self.current_backup_task["data"].as_stream()) except ObjectDoesNotExistError: if self.options["treat_download_errors_as_warnings"]: jobmessage( M_WARNING, "Skipped file %s because it does not exist anymore" % (self.current_backup_task["name"]), ) return bRC_Skip else: jobmessage( M_ERROR, "File %s does not exist anymore" % (self.current_backup_task["name"]), ) return bRC_Error else: raise Exception(value='Wrong argument for current_backup_task["type"]') return bRC_OK
def start_backup_file(self, context, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(context, 100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(context, 100, "No files to backup\n") return bRCs["bRC_Skip"] file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(context, 100, "file: " + file_to_backup + "\n") mystatp = bareosfd.StatPacket() try: statp = os.stat(file_to_backup) except Exception as e: bareosfd.JobMessage( context, bJobMessageType["M_ERROR"], "Could net get stat-info for file %s: \"%s\"" % (file_to_backup, e.message), ) # As of Bareos 19.2.7 attribute names in bareosfd.StatPacket differ from os.stat # In this case we have to translate names # For future releases consistent names are planned, allowing to assign the # complete stat object in one rush if hasattr(mystatp, "st_uid"): mystatp = statp else: mystatp.mode = statp.st_mode mystatp.ino = statp.st_ino mystatp.dev = statp.st_dev mystatp.nlink = statp.st_nlink mystatp.uid = statp.st_uid mystatp.gid = statp.st_gid mystatp.size = statp.st_size mystatp.atime = statp.st_atime mystatp.mtime = statp.st_mtime mystatp.ctime = statp.st_ctime savepkt.fname = file_to_backup # os.islink will detect links to directories only when # there is no trailing slash - we need to perform checks # on the stripped name but use it with trailing / for the backup itself if os.path.islink(file_to_backup.rstrip("/")): savepkt.type = bFileType["FT_LNK"] savepkt.link = os.readlink(file_to_backup.rstrip("/")) bareosfd.DebugMessage(context, 150, "file type is: FT_LNK\n") elif os.path.isfile(file_to_backup): savepkt.type = bFileType["FT_REG"] bareosfd.DebugMessage(context, 150, "file type is: FT_REG\n") elif os.path.isdir(file_to_backup): savepkt.type = bFileType["FT_DIREND"] savepkt.link = file_to_backup bareosfd.DebugMessage( context, 150, "file %s type is: FT_DIREND\n" % file_to_backup) else: bareosfd.JobMessage( context, bJobMessageType["M_WARNING"], "File %s of unknown type" % (file_to_backup), ) return bRCs["bRC_Skip"] savepkt.statp = mystatp bareosfd.DebugMessage(context, 150, "file statpx " + str(savepkt.statp) + "\n") return bRCs["bRC_OK"]
def start_backup_file(self, savepkt): """ Defines the file to backup and creates the savepkt. In this example only files (no directories) are allowed """ bareosfd.DebugMessage(100, "start_backup_file() called\n") if not self.files_to_backup: bareosfd.DebugMessage(100, "No files to backup\n") return bareosfd.bRC_Skip self.file_to_backup = self.files_to_backup.pop() bareosfd.DebugMessage(100, "file: " + self.file_to_backup + "\n") mystatp = bareosfd.StatPacket() try: if os.path.islink(self.file_to_backup): statp = os.lstat(self.file_to_backup) else: statp = os.stat(self.file_to_backup) except Exception as e: bareosfd.JobMessage( bareosfd.M_ERROR, 'Could net get stat-info for file %s: "%s"' % (self.file_to_backup, e), ) # As of Bareos 19.2.7 attribute names in bareosfd.StatPacket differ from os.stat # In this case we have to translate names # For future releases consistent names are planned, allowing to assign the # complete stat object in one rush # if hasattr(mystatp, "st_uid"): # mystatp = statp # else: mystatp.st_mode = statp.st_mode mystatp.st_ino = statp.st_ino mystatp.st_dev = statp.st_dev mystatp.st_nlink = statp.st_nlink mystatp.st_uid = statp.st_uid mystatp.st_gid = statp.st_gid mystatp.st_size = statp.st_size mystatp.st_atime = statp.st_atime mystatp.st_mtime = statp.st_mtime mystatp.st_ctime = statp.st_ctime #bareosfd.JobMessage( bareosfd.M_ERROR, '\nmystatp: %s\nstatp: %s\n' % (mystatp,statp)) savepkt.fname = self.file_to_backup # os.islink will detect links to directories only when # there is no trailing slash - we need to perform checks # on the stripped name but use it with trailing / for the backup itself if os.path.islink(self.file_to_backup.rstrip("/")): savepkt.type = bareosfd.FT_LNK savepkt.link = os.readlink(self.file_to_backup.rstrip("/")) bareosfd.DebugMessage(150, "file type is: FT_LNK\n") elif os.path.isfile(self.file_to_backup): savepkt.type = bareosfd.FT_REG bareosfd.DebugMessage(150, "file type is: FT_REG\n") elif os.path.isdir(self.file_to_backup): savepkt.type = bareosfd.FT_DIREND savepkt.link = self.file_to_backup bareosfd.DebugMessage( 150, "file %s type is: FT_DIREND\n" % self.file_to_backup) elif stat.S_ISFIFO(os.stat(self.file_to_backup).st_mode): savepkt.type = bareosfd.FT_FIFO bareosfd.DebugMessage(150, "file type is: FT_FIFO\n") else: bareosfd.JobMessage( bareosfd.M_WARNING, "File %s of unknown type" % (self.file_to_backup), ) return bareosfd.bRC_Skip savepkt.statp = mystatp bareosfd.DebugMessage(150, "file statpx " + str(savepkt.statp) + "\n") return bareosfd.bRC_OK
def start_backup_file(self, context, savepkt): ''' Defines the file to backup and creates the savepkt. ''' bareosfd.DebugMessage( context, 100, "BareosFdPluginVMware:start_backup_file() called\n") if not self.vadp.files_to_backup: self.vadp.disk_device_to_backup = self.vadp.disk_devices.pop(0) self.vadp.files_to_backup = [] if 'uuid' in self.options: self.vadp.files_to_backup.append( '/VMS/%s/%s' % (self.options['uuid'], self.vadp.disk_device_to_backup['fileNameRoot'])) else: self.vadp.files_to_backup.append( '/VMS/%s%s/%s/%s' % (self.options['dc'], self.options['folder'].rstrip('/'), self.options['vmname'], self.vadp.disk_device_to_backup['fileNameRoot'])) self.vadp.files_to_backup.insert( 0, self.vadp.files_to_backup[0] + '_cbt.json') self.vadp.file_to_backup = self.vadp.files_to_backup.pop(0) bareosfd.DebugMessage(context, 100, "file: %s\n" % (self.vadp.file_to_backup)) if self.vadp.file_to_backup.endswith('_cbt.json'): if not self.vadp.get_vm_disk_cbt(context): return bRCs['bRC_Error'] # create a stat packet for a restore object statp = bareosfd.StatPacket() savepkt.statp = statp # see src/filed/backup.c how this is done in c savepkt.type = bFileType['FT_RESTORE_FIRST'] # set the fname of the restore object to the vmdk name # by stripping of the _cbt.json suffix v_fname = self.vadp.file_to_backup[:-len('_cbt.json')] if chr(self.level) != 'F': # add level and timestamp to fname in savepkt savepkt.fname = "%s+%s+%s" % (v_fname, chr( self.level), repr(self.vadp.create_snap_tstamp)) else: savepkt.fname = v_fname savepkt.object_name = savepkt.fname savepkt.object = bytearray(self.vadp.changed_disk_areas_json) savepkt.object_len = len(savepkt.object) savepkt.object_index = int(time.time()) else: # start bareos_vadp_dumper self.vadp.start_dumper(context, 'dump') # create a regular stat packet statp = bareosfd.StatPacket() savepkt.statp = statp savepkt.fname = self.vadp.file_to_backup if chr(self.level) != 'F': # add level and timestamp to fname in savepkt savepkt.fname = "%s+%s+%s" % ( self.vadp.file_to_backup, chr( self.level), repr(self.vadp.create_snap_tstamp)) savepkt.type = bFileType['FT_REG'] bareosfd.JobMessage( context, bJobMessageType['M_INFO'], "Starting backup of %s\n" % self.vadp.file_to_backup) return bRCs['bRC_OK']