Exemple #1
0
 def end_backup_job(self, ):
     '''
     Finish the backup: Umount snapshot and merge it
     '''
     bareosfd.DebugMessage(
         100, "end_backup_job() entry point in Python called\n")
     if self.mounted:
         try:
             subprocess.check_output([
                 '/usr/sbin/ploop', 'umount', '-c',
                 self.base_image['fname'], '-m', self.job_mount_point,
                 self.disk_descriptor
             ],
                                     universal_newlines=True)
         except subprocess.CalledProcessError:
             bareosfd.JobMessage(
                 bareosfd.M_WARNING,
                 "Cannot unmount base image '{}'\n".format(
                     self.base_image['fname']))
     if self.snapshot_created:
         try:
             # we delete the base_snapshot which results in merging and delteing the delta file
             subprocess.check_call([
                 'ploop', 'snapshot-delete', '-u', self.snapshot_uuid,
                 self.disk_descriptor
             ])
         except subprocess.CalledProcessError:
             bareosfd.JobMessage(
                 bareosfd.M_WARNING,
                 "Cannot merge snapshot for CT '{}'\n".format(
                     self.options['name']))
     if self.lock:
         self.lock.release()
     return bareosfd.bRC_OK
def load_bareos_plugin(plugindef):
    """
    This function is called by the Bareos-FD to load the plugin
    We use it to instantiate the plugin class
    """
    if version_info.major >= 3 and version_info.minor >= 8:
        bareosfd.JobMessage(
            M_FATAL,
            "Need Python version < 3.8 (current version: {}.{}.{})\n".format(
                version_info.major, version_info.minor, version_info.micro
            ),
        )
        return bRC_Error

    # Check for needed python modules
    try:
        import psycopg2
    except Exception as e:
        bareosfd.JobMessage(
            M_FATAL,
            "could not import Python module: %s\n" % e,
        )
        return bRC_Error

    # This module contains the used plugin class
    import BareosFdPluginPostgres

    # BareosFdWrapper.bareos_fd_plugin_object is the module attribute that
    # holds the plugin class object
    BareosFdWrapper.bareos_fd_plugin_object = (
        BareosFdPluginPostgres.BareosFdPluginPostgres(plugindef)
    )
    return bRC_OK
    def restore_object_data(self, ROP):
        """
        Called on restore and on diff/inc jobs.
        """
        # Improve: sanity / consistence check of restore object
        # ROP.object is of type bytearray.
        self.row_rop_raw = ROP.object.decode("UTF-8")
        try:
            self.rop_data[ROP.jobid] = json.loads(self.row_rop_raw)
        except Exception as e:
            bareosfd.JobMessage(
                bareosfd.M_FATAL,
                'Could not parse restore object json-data "%s" / "%s"\n' %
                (self.row_rop_raw, e),
            )

        if "lastBackupStopTime" in self.rop_data[ROP.jobid]:
            self.lastBackupStopTime = int(
                self.rop_data[ROP.jobid]["lastBackupStopTime"])
            bareosfd.JobMessage(
                bareosfd.M_INFO,
                "Got lastBackupStopTime %d from restore object of job %d\n" %
                (self.lastBackupStopTime, ROP.jobid),
            )
        if "lastLSN" in self.rop_data[ROP.jobid]:
            self.lastLSN = self.rop_data[ROP.jobid]["lastLSN"]
            bareosfd.JobMessage(
                bareosfd.M_INFO,
                "Got lastLSN %s from restore object of job %d\n" %
                (self.lastLSN, ROP.jobid),
            )
        return bareosfd.bRC_OK
    def start_backup_file(self,context, savepkt):
        '''
        This method is called, when Bareos is ready to start backup a file
        For each database to backup we create a mysqldump subprocess, wrting to
        the pipe self.stream.stdout
        '''
        bareosfd.DebugMessage(context, 100, "start_backup called\n");
        if not self.bucket_key_tuple:
            baoreosfd.DebugMessage(context,100,"No buckets to backup")
            bareosfd.JobMessage(context, bJobMessageType['M_ERROR'], "No buckets to backup.\n");
            return bRCs['bRC_Skip']

        bucketname, keyname = self.bucket_key_tuple.pop()
        keysize = get_key_size(self.conn, bucketname, keyname)
        keymtime = get_key_mtime(self.conn, bucketname, keyname)


        statp = bareosfd.StatPacket()
        if not keysize == "NULL\n":
            try:
                statp.size = int(keysize)
            except ValueError:
                pass
        statp.atime = 0
        statp.ctime = 0
        statp.mtime = keymtime
        savepkt.statp = statp
        savepkt.fname = "/{bname}/{kname}".format(bname=bucketname, kname = keyname)
        savepkt.type = bFileType['FT_REG']
        bareosfd.DebugMessage(context, 100, "Attempting to download key: '" + bucketname + "/" + keyname + "\n")
        self.stream = chunky_download(self.conn, bucketname, keyname)

        bareosfd.JobMessage(context, bJobMessageType['M_INFO'], "Starting backup of " + savepkt.fname + "\n");
        return bRCs['bRC_OK'];
Exemple #5
0
    def end_backup_file(self, context):
        '''
        Check, if dump was successfull.
        '''
        # Usually the pgsqldump process should have terminated here, but on some servers
        # it has not always.
        self.stream.wait()
        returnCode = self.stream.poll()
        if returnCode == None:
            bareosfd.JobMessage(
                context, bJobMessageType['M_ERROR'],
                "Dump command not finished properly for unknown reason")
            returnCode = -99
        else:
            bareosfd.DebugMessage(
                context, 100,
                "end_backup_file() entry point in Python called. Returncode: %d\n"
                % self.stream.returncode)
            if returnCode != 0:
                (stdOut, stdError) = self.stream.communicate()
                if stdError == None:
                    stdError = ''
                bareosfd.JobMessage(
                    context, bJobMessageType['M_ERROR'],
                    "Dump command returned non-zero value: %d, message: %s\n" %
                    (returnCode, stdError))

        if self.databases:
            return bRCs['bRC_More']
        else:
            if returnCode == 0:
                return bRCs['bRC_OK']
            else:
                return bRCs['bRC_Error']
Exemple #6
0
 def end_restore_ct_job(self, ):
     '''
     Umount the ploop image and register the CT
     '''
     bareosfd.DebugMessage(
         100, "end_restore_job() entry point in Python called\n")
     # unmounting ploop
     try:
         ploop_umount = subprocess.check_output(
             ['ploop', 'umount', self.disk_descriptor],
             universal_newlines=True)
     # fixme: need to parse output
     except subprocess.CalledProcessError:
         bareosfd.JobMessage(
             bareosfd.M_ERROR,
             "Cannot umount ploop image of CT '{}'\n".format(
                 self.options['name']))
         return bareosfd.bRC_Error
     # register CT
     try:
         register = subprocess.check_output(
             ['prlctl', 'register', self.config_path, '--preserve-uuid'],
             universal_newlines=True)
     # fixme: need to parse output
     except subprocess.CalledProcessError:
         bareosfd.JobMessage(
             bareosfd.M_ERROR,
             "Cannot register CT '{}'\n".format(self.options['name']))
         return bareosfd.bRC_Error
     bareosfd.DebugMessage(100, "LEAVING end_restore_ct_job () \n")
     return bareosfd.bRC_OK
    def restore_entry(self, context):
        # Restore the entry
        if self.ldif:
            # Parse the LDIF back to an attribute list
            ldif_dump = StringIO(str(self.ldif))
            ldif_parser = ldif.LDIFRecordList(ldif_dump, max_entries=1)
            ldif_parser.parse()
            dn, entry = ldif_parser.all_records[0]
            ldif_dump.close()

            if self.dn != dn:
                bareosfd.JobMessage(
                    context,
                    bJobMessageType["M_INFO"],
                    "Restoring original DN %s as %s\n" % (dn, self.dn),
                )

            if dn:
                if self.ld:
                    # Try adding the entry
                    add_ldif = ldap.modlist.addModlist(entry)
                    try:
                        self.ld.add_s(self.dn, add_ldif)
                    except ldap.LDAPError as e:
                        # Delete the original DN
                        try:
                            self.ld.delete_s(self.dn)
                            self.ld.add_s(self.dn, add_ldif)
                        except ldap.LDAPError as e:
                            if type(e.message) == dict and "desc" in e.message:
                                bareosfd.JobMessage(
                                    context,
                                    bJobMessageType["M_ERROR"],
                                    "Failed to restore LDAP DN %s: %s\n" %
                                    (self.dn, e.message["desc"]),
                                )
                            else:
                                bareosfd.JobMessage(
                                    context,
                                    bJobMessageType["M_ERROR"],
                                    "Failed to restore LDAP DN %s: %s\n" %
                                    (self.dn, e),
                                )
                            self.ldif = None
                            return bRCs["bRC_Error"]
                else:
                    bareosfd.JobMessage(
                        context,
                        bJobMessageType["M_ERROR"],
                        "Failed to restore LDAP DN %s no writable binding to LDAP exists\n"
                        % (self.dn),
                    )
                    self.ldif = None
                    return bRCs["bRC_Error"]

            # Processed ldif
            self.ldif = None

        return bRCs["bRC_OK"]
Exemple #8
0
 def create_file(self, restorepkt):
     """
     Creates the file to be restored and directory structure, if needed.
     Adapt this in your derived class, if you need modifications for
     virtual files or similar
     """
     bareosfd.DebugMessage(
         100,
         "create_file() entry point in Python called with %s\n" %
         (restorepkt),
     )
     FNAME = restorepkt.ofname
     if not FNAME:
         return bareosfd.bRC_Error
     dirname = os.path.dirname(FNAME.rstrip("/"))
     if not os.path.exists(dirname):
         bareosfd.DebugMessage(
             200,
             "Directory %s does not exist, creating it now\n" % dirname)
         os.makedirs(dirname)
     # open creates the file, if not yet existing, we close it again right
     # aways it will be opened again in plugin_io.
     if restorepkt.type == bareosfd.FT_REG:
         open(FNAME, "wb").close()
         restorepkt.create_status = bareosfd.CF_EXTRACT
     elif restorepkt.type == bareosfd.FT_LNK:
         linkNameEnc = restorepkt.olname
         linkNameClear = linkNameEnc
         if not os.path.islink(FNAME.rstrip("/")):
             # if not os.path.exists(linkNameClear):
             os.symlink(linkNameClear, FNAME.rstrip("/"))
         restorepkt.create_status = bareosfd.CF_CREATED
     elif restorepkt.type == bareosfd.FT_LNKSAVED:
         linkNameEnc = restorepkt.olname
         linkNameClear = linkNameEnc
         if not os.path.exists(linkNameClear):
             os.link(linkNameClear, FNAME.rstrip("/"))
         restorepkt.create_status = bareosfd.CF_CREATED
     elif restorepkt.type == bareosfd.FT_DIREND:
         if not os.path.exists(FNAME):
             os.makedirs(FNAME)
         restorepkt.create_status = bareosfd.CF_CREATED
     elif restorepkt.type == bareosfd.FT_FIFO:
         if not os.path.exists(FNAME):
             try:
                 os.mkfifo(FNAME, 0o600)
             except Exception as e:
                 bareosfd.JobMessage(
                     bareosfd.M_ERROR,
                     'Could net create fifo %s: "%s"' % (FNAME, e),
                 )
         restorepkt.create_status = bareosfd.CF_CREATED
     else:
         bareosfd.JobMessage(
             bareosfd.M_ERROR,
             "Unknown type %s of file %s" % (restorepkt.type, FNAME),
         )
     return bareosfd.bRC_OK
Exemple #9
0
    def prepare_vm_restore(self, context):
        '''
        prepare VM restore:
        - get vm details
        - ensure vm is powered off
        - get disk devices
        '''
        if 'uuid' in self.options:
            vmname = self.options['uuid']
            if not self.get_vm_details_by_uuid(context):
                bareosfd.DebugMessage(
                    context, 100,
                    "Error getting details for VM %s\n" % (vmname))
                return bRCs['bRC_Error']
        else:
            vmname = self.options['vmname']
            if not self.get_vm_details_dc_folder_vmname(context):
                bareosfd.DebugMessage(
                    context, 100,
                    "Error getting details for VM %s\n" % (vmname))
                return bRCs['bRC_Error']

        bareosfd.DebugMessage(
            context, 100, "Successfully got details for VM %s\n" % (vmname))

        vm_power_state = self.vm.summary.runtime.powerState
        if vm_power_state != 'poweredOff':
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                "Error VM %s must be poweredOff for restore, but is %s\n" %
                (vmname, vm_power_state))
            return bRCs['bRC_Error']

        if self.vm.snapshot is not None:
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                "Error VM %s must not have any snapshots before restore\n" %
                (vmname))
            return bRCs['bRC_Error']

        bareosfd.DebugMessage(context, 100,
                              "Getting Disk Devices on VM %s\n" % (vmname))
        self.get_vm_disk_devices(context)
        if not self.disk_devices:
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                "Error getting Disk Devices on VM %s\n" % (vmname))
            return bRCs['bRC_Error']

        # make sure backed up disks match VM disks
        if not self.check_vm_disks_match(context):
            return bRCs['bRC_Error']

        return bRCs['bRC_OK']
    def parse_plugin_definition(self, context, plugindef):
        BareosFdPluginBaseclass.BareosFdPluginBaseclass.parse_plugin_definition(self, context, plugindef)
        if 'configfile' in self.options:
            config_path = self.options['configfile']
            if os.path.exists(config_path):
                try:
                    self.config = json.load(open(config_path, 'r'))
                except ValueError as e:
                    bareosfd.JobMessage(context, bJobMessageType['M_FATAL'],
                            "s3 config file ( {config_path} ) is not valid json \
                            {error}".format(config_path = config_path, error = e))
                    return bRCs['bRC_Error']
            else:
                bareosfd.JobMessage(context, bJobMessageType['M_FATAL'],
                        "The config file mentioned can't be found \
                        {config_path}".format(config_path = config_path))
                return bRCs['bRC_Error']
            self.s3config = { "access_key": self.config['access_key'],
                              "secret_key": self.config['secret_key'],
                              "host": self.config['host'],
                              "is_secure": self.config['is_secure'],
                              "signature": self.config['signature'],
                              "astyle": self.config['astyle']
                            }
            try:
                self.conn = conngen(self.s3config)
            except Exception as e:
                bareosfd.JobMessage(context, bJobMessageType['M_FATAL'],
                        "We can't connect to S3 with the current config. \n \
                        {error}".format(error = e))
                return bRCS['bRC_Error']
#            '''Generate a list of keys for each bucket and pair them together
#            '''
            if 'bucket_list' in self.config:
                self.bucket_list = self.config['bucket_list']
            else:
                self.bucket_list = []
            if self.bucket_list and self.bucket_list is not None:
                for bucket in self.bucket_list:
                    if check_bucket_exists(self.conn, bucket):
                        for key in list_objects(self.conn, bucket):
                            self.bucket_key_tuple.append((bucket, key))
#            '''If you don't specify any buckets. We add them all
#            sans buckets in exclude_buckets list'''
            else:
                for bucket in list_buckets(self.conn):
                    if 'exclude_buckets' in  self.config:
                        if bucket in self.config['exclude_buckets']:
                            continue
                        else:
                            self.bucket_list.append(bucket)
                            for key in list_objects(self.conn, bucket):
                                self.bucket_key_tuple.append((bucket, key))
        return bRCs['bRC_OK']
Exemple #11
0
    def prepare_backup(self, context, options):
        """
        Prepare a LDAP backup
        """
        connect_bRC = self.connect_and_bind(context, options, True)
        if connect_bRC != bRCs["bRC_OK"]:
            return connect_bRC

        bareosfd.DebugMessage(
            context,
            100,
            "Creating search filter and attribute filter to perform LDAP search\n",
        )

        # Get a subtree
        searchScope = ldap.SCOPE_SUBTREE

        # See if there is a specific search filter otherwise use the all object filter.
        if "search_filter" in options:
            searchFilter = options["search_filter"]
        else:
            searchFilter = r"(objectclass=*)"

        # Get all user attributes and createTimestamp + modifyTimestamp
        attributeFilter = ["*", "createTimestamp", "modifyTimestamp"]

        try:
            # Asynchronous search method
            self.msg_id = self.ld.search(options["basedn"], searchScope,
                                         searchFilter, attributeFilter)
        except ldap.LDAPError as e:
            if type(e.message) == dict and "desc" in e.message:
                bareosfd.JobMessage(
                    context,
                    bJobMessageType["M_FATAL"],
                    "Failed to execute LDAP search on LDAP uri %s: %s\n" %
                    (options["uri"], e.message["desc"]),
                )
            else:
                bareosfd.JobMessage(
                    context,
                    bJobMessageType["M_FATAL"],
                    "Failed to execute LDAP search on LDAP uri %s: %s\n" %
                    (options["uri"], e),
                )

            return bRCs["bRC_Error"]

        bareosfd.DebugMessage(context, 100,
                              "Successfully performed LDAP search\n")

        return bRCs["bRC_OK"]
    def wait_for_wal_archiving(self, LSN):
        """
        Wait for wal archiving to be finished by checking if the wal file
        for the given LSN is present in the filesystem.
        """

        pgMajorVersion = self.pgVersion // 10000
        if pgMajorVersion >= 10:
            wal_filename_func = "pg_walfile_name"
        else:
            wal_filename_func = "pg_xlogfile_name"

        walfile_stmt = "SELECT %s('%s')" % (wal_filename_func, LSN)

        try:
            result = self.dbCon.run(walfile_stmt)
            wal_filename = result[0][0]

            bareosfd.DebugMessage(
                100,
                "wait_for_wal_archiving(%s): wal filename=%s\n" %
                (LSN, wal_filename),
            )

        except Exception as e:
            bareosfd.JobMessage(
                bareosfd.M_FATAL,
                "Error getting WAL filename for LSN %s\n" % (LSN, e),
            )
            return False

        wal_file_path = self.options["walArchive"] + wal_filename

        # To finish as quick as possible but with low impact on a heavy loaded
        # system, we use increasing sleep times here, starting with a small value
        sleep_time = 0.01
        slept_sum = 0.0
        while slept_sum <= self.switchWalTimeout:
            if os.path.exists(wal_file_path):
                return True
            time.sleep(sleep_time)
            slept_sum += sleep_time
            sleep_time *= 1.2

        bareosfd.JobMessage(
            bareosfd.M_FATAL,
            "Timeout waiting %s s for wal file %s to be archived\n" %
            (self.switchWalTimeout, wal_filename),
        )
        return False
Exemple #13
0
    def connect_and_bind(self, options, bulk=False):
        """
        Bind to LDAP URI using the given authentication tokens
        """
        if bulk:
            try:
                self.ld = BulkLDAP(options["uri"], bytes_mode=True)
            except TypeError:
                self.ld = BulkLDAP(options["uri"])
        else:
            try:
                self.ld = ldap.initialize(options["uri"], bytes_mode=True)
            except TypeError:
                self.ld = ldap.initialize(options["uri"])

        try:
            self.ld.protocol_version = ldap.VERSION3
            if "bind_dn" in options and "password" in options:
                self.ld.simple_bind_s(options["bind_dn"], options["password"])
            else:
                self.ld.simple_bind_s("", "")
        except ldap.INVALID_CREDENTIALS:
            bareosfd.JobMessage(
                bareosfd.bJobMessageType["M_FATAL"],
                "Failed to bind to LDAP uri due to invalid credentials %s\n" %
                (options["uri"]),
            )

            return bareosfd.bRC_Error
        except ldap.LDAPError as e:
            if type(e.message) == dict and "desc" in e.message:
                bareosfd.JobMessage(
                    bareosfd.bJobMessageType["M_FATAL"],
                    "Failed to bind to LDAP uri %s: %s %s\n" %
                    (options["uri"], e.message["desc"], e.message["info"]),
                )
            else:
                bareosfd.JobMessage(
                    bareosfd.bJobMessageType["M_FATAL"],
                    "Failed to bind to LDAP uri %s: %s\n" %
                    (options["uri"], e),
                )

            return bareosfd.bRC_Error

        bareosfd.DebugMessage(100, "connected to LDAP server\n")

        return bareosfd.bRC_OK
Exemple #14
0
    def get_vm_details_dc_folder_vmname(self, context):
        '''
        Get details of VM given by plugin options dc, folder, vmname
        and save result in self.vm
        Returns True on success, False otherwise
        '''
        content = self.si.content
        dcView = content.viewManager.CreateContainerView(
            content.rootFolder, [vim.Datacenter], False)
        vmListWithFolder = {}
        dcList = dcView.view
        dcView.Destroy()
        for dc in dcList:
            if dc.name == self.options['dc']:
                folder = ''
                self._get_dcftree(vmListWithFolder, folder, dc.vmFolder)

        if self.options['folder'].endswith('/'):
            vm_path = "%s%s" % (self.options['folder'], self.options['vmname'])
        else:
            vm_path = "%s/%s" % (self.options['folder'],
                                 self.options['vmname'])

        if vm_path not in vmListWithFolder:
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                "No VM with Folder/Name %s found in DC %s\n" %
                (vm_path, self.options['dc']))
            return False

        self.vm = vmListWithFolder[vm_path]
        return True
Exemple #15
0
    def get_vm_disk_cbt(self, context):
        '''
        Get CBT Information
        '''
        cbt_changeId = '*'
        if self.disk_device_to_backup[
                'fileNameRoot'] in self.restore_objects_by_diskpath:
            if len(self.restore_objects_by_diskpath[
                    self.disk_device_to_backup['fileNameRoot']]) > 1:
                bareosfd.JobMessage(
                    context, bJobMessageType['M_FATAL'],
                    "ERROR: more then one CBT info for Diff/Inc exists\n")
                return False

            cbt_changeId = self.restore_objects_by_diskpath[
                self.disk_device_to_backup['fileNameRoot']][0]['data'][
                    'DiskParams']['changeId']
            bareosfd.DebugMessage(
                context, 100,
                "get_vm_disk_cbt(): using changeId %s from restore object\n" %
                (cbt_changeId))
        self.changed_disk_areas = self.vm.QueryChangedDiskAreas(
            snapshot=self.create_snap_result,
            deviceKey=self.disk_device_to_backup['deviceKey'],
            startOffset=0,
            changeId=cbt_changeId)
        self.cbt2json(context)
        return True
Exemple #16
0
    def writeStringToFile(self, context, filename, data_string):
        """
        Write a String to the given file.
        """
        bareosfd.DebugMessage(
            context, 100,
            "writeStringToFile(): writing String to file %s\n" % (filename))
        # ensure the directory for writing the file exists
        self.mkdir(os.path.dirname(filename))
        try:
            out = open(filename, 'w')
            out.write(data_string)
            out.close()
            bareosfd.DebugMessage(
                context, 100,
                "saveStringTofile(): successfully wrote String to file %s\n" %
                (filename))

        except IOError as io_error:
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                ("writeStingToFile(): failed to write String to file %s,"
                 " reason: %s\n") % (filename, io_error.strerror))

        # the following path must be passed to bareos_vadp_dumper as parameter
        self.cbt_json_local_file_path = filename
    def set_file_attributes(self, context, restorepkt):
        # Python attribute setting does not work properly with links
        if restorepkt.type == bFileType["FT_LNK"]:
            return bRCs["bRC_OK"]
        file_name = restorepkt.ofname
        file_attr = restorepkt.statp
        bareosfd.DebugMessage(
            context,
            150,
            "Set file attributes " + file_name + " with stat " +
            str(file_attr) + "\n",
        )
        try:
            os.chown(file_name, file_attr.uid, file_attr.gid)
            os.chmod(file_name, file_attr.mode)
            os.utime(file_name, (file_attr.atime, file_attr.mtime))
        except Exception as e:
            bareosfd.JobMessage(
                context,
                bJobMessageType["M_WARNING"],
                "Could net set attributes for file %s: \"%s\"" %
                (file_to_backup, e.message),
            )

        return bRCs["bRC_OK"]
Exemple #18
0
 def end_restore_file(self):
     bareosfd.DebugMessage(
         100,
         "end_restore_file() entry point in Python called FNAME: %s\n" %
         self.FNAME,
     )
     bareosfd.DebugMessage(
         150,
         "end_restore_file set file attributes " + self.FNAME +
         " with stat " + str(self.statp[self.FNAME]) + "\n",
     )
     try:
         os.chown(self.FNAME, self.statp[self.FNAME].st_uid,
                  self.statp[self.FNAME].st_gid)
         os.chmod(self.FNAME, self.statp[self.FNAME].st_mode)
         os.utime(self.FNAME, (self.statp[self.FNAME].st_atime,
                               self.statp[self.FNAME].st_mtime))
         # del sometimes leads to no-key errors, it seams that end_restore_file is sometimes called
         # multipl times.
         # del self.statp[self.FNAME]
     except Exception as e:
         bareosfd.JobMessage(
             bareosfd.M_WARNING,
             'Could net set attributes for file %s: "%s"' % (self.FNAME, e),
         )
     return bareosfd.bRC_OK
    def checkForWalFiles(self):
        """
        Look for new WAL files and backup
        Backup start time is timezone aware, we need to add timezone
        to files' mtime to make them comparable
        """
        # We have to add local timezone to the file's timestamp in order
        # to compare them with the backup starttime, which has a timezone
        walArchive = self.options["walArchive"]
        self.files_to_backup.append(walArchive)
        for fileName in os.listdir(walArchive):
            fullPath = os.path.join(walArchive, fileName)
            try:
                st = os.stat(fullPath)
            except Exception as e:
                bareosfd.JobMessage(
                    bareosfd.M_ERROR,
                    "Could net get stat-info for file %s: %s\n" %
                    (fullPath, e),
                )
                continue
            fileMtime = datetime.datetime.fromtimestamp(st.st_mtime)
            if (fileMtime.replace(
                    tzinfo=dateutil.tz.tzoffset(None, self.tzOffset)) >
                    self.backupStartTime):
                bareosfd.DebugMessage(
                    150, "Adding WAL file %s for backup\n" % fileName)
                self.files_to_backup.append(fullPath)

        if self.files_to_backup:
            return bareosfd.bRC_More
        else:
            return bareosfd.bRC_OK
    def set_file_attributes(self, restorepkt):
        bareosfd.DebugMessage(
            100,
            "set_file_attributes() entry point in Python called with %s\n" %
            (str(restorepkt)),
        )

        orig_fname = "/" + os.path.relpath(restorepkt.ofname, restorepkt.where)
        bareosfd.DebugMessage(
            100, "set_file_attributes() orig_fname: {} \n".format(orig_fname))
        restoreobject_sha256sum = self.sha256sums_by_filename[orig_fname]

        file_sha256sum = self.get_sha256sum(orig_fname).encode('utf-8')
        bareosfd.DebugMessage(
            100,
            "set_file_attributes() orig_fname: %s restoreobject_sha256sum: %s file_sha256sum: %s\n"
            %
            (orig_fname, repr(restoreobject_sha256sum), repr(file_sha256sum)),
        )
        if file_sha256sum != restoreobject_sha256sum:
            bareosfd.JobMessage(
                M_ERROR,
                "bad restoreobject orig_fname: %s restoreobject_sha256sum: %s file_sha256sum: %s\n"
                % (orig_fname, repr(restoreobject_sha256sum),
                   repr(file_sha256sum)),
            )

        return bRC_OK
 def plugin_io_read(self, IOP):
     if self.fileType == "FT_REG":
         bareosfd.DebugMessage(
             200, "Reading %d from file %s\n" % (IOP.count, self.FNAME)
         )
         IOP.buf = bytearray(IOP.count)
         try:
             IOP.status = self.file.readinto(IOP.buf)
             IOP.io_errno = 0
         except Exception as e:
             bareosfd.JobMessage(
                 M_ERROR,
                 'Could net read %d bytes from file %s. "%s"'
                 % (IOP.count, file_to_backup, e.message),
             )
             IOP.io_errno = e.errno
             return bRC_Error
     else:
         bareosfd.DebugMessage(
             100,
             "Did not read from file %s of type %s\n" % (self.FNAME, self.fileType),
         )
         IOP.buf = bytearray()
         IOP.status = 0
         IOP.io_errno = 0
     return bRC_OK
Exemple #22
0
    def get_disk_devices(self, context, devicespec):
        '''
        Get disk devices from a devicespec
        '''
        self.disk_devices = []
        for hw_device in devicespec:
            if type(hw_device) == vim.vm.device.VirtualDisk:
                if hw_device.backing.diskMode in self.skip_disk_modes:
                    bareosfd.JobMessage(
                        context, bJobMessageType['M_INFO'],
                        "Skipping Disk %s because mode is %s\n" %
                        (self.get_vm_disk_root_filename(
                            hw_device.backing), hw_device.backing.diskMode))
                    continue

                self.disk_devices.append({
                    'deviceKey':
                    hw_device.key,
                    'fileName':
                    hw_device.backing.fileName,
                    'fileNameRoot':
                    self.get_vm_disk_root_filename(hw_device.backing),
                    'changeId':
                    hw_device.backing.changeId
                })
Exemple #23
0
    def create_file(self, context, restorepkt):
        """
        Directories are placeholders only we use the data.ldif files
        to get the actual DN of the LDAP record
        """
        bareosfd.DebugMessage(
            context,
            100,
            "BareosFdPluginVMware:create_file() called with %s\n" %
            (restorepkt),
        )
        if restorepkt.type == bFileType["FT_DIREND"]:
            restorepkt.create_status = bCFs["CF_SKIP"]
        elif restorepkt.type == bFileType["FT_REG"]:
            self.ldap.set_new_dn(restorepkt.ofname)
            restorepkt.create_status = bCFs["CF_EXTRACT"]
        else:
            bareosfd.JobMessage(
                context,
                bJobMessageType["M_FATAL"],
                "Request to restore illegal filetype %s\n" % (restorepkt.type),
            )
            return bRCs["bRC_Error"]

        return bRCs["bRC_OK"]
 def filename_is_allowed(self, context, filename, allowregex, denyregex):
     """
     Check, if filename is allowed.
     True, if matches allowreg and not denyregex.
     If allowreg is None, filename always matches
     If denyreg is None, it never matches
     """
     if allowregex is None or allowregex.search(filename):
         allowed = True
     else:
         allowed = False
     if denyregex is None or not denyregex.search(filename):
         denied = False
     else:
         denied = True
     if not allowed or denied:
         bareosfd.DebugMessage(
             context, 100, "File %s denied by configuration\n" % (filename)
         )
         bareosfd.JobMessage(
             context,
             bJobMessageType["M_ERROR"],
             "File %s denied by configuration\n" % (filename),
         )
         return False
     else:
         return True
Exemple #25
0
    def connect_vmware(self, context):
        # this prevents from repeating on second call
        if self.si:
            bareosfd.DebugMessage(
                context, 100,
                "connect_vmware(): connection to server %s already exists\n" %
                (self.options['vcserver']))
            return True

        bareosfd.DebugMessage(
            context, 100, "connect_vmware(): connecting server %s\n" %
            (self.options['vcserver']))
        try:
            self.si = SmartConnect(host=self.options['vcserver'],
                                   user=self.options['vcuser'],
                                   pwd=self.options['vcpass'],
                                   port=443)
            self.si_last_keepalive = int(time.time())

        except IOError:
            pass
        if not self.si:
            bareosfd.JobMessage(
                context, bJobMessageType['M_FATAL'],
                "Cannot connect to host %s with user %s and password\n" %
                (self.options['vcserver'], self.options['vcuser']))
            return False

        bareosfd.DebugMessage(
            context, 100,
            ("Successfully connected to VSphere API on host %s with"
             " user %s\n") %
            (self.options['vcserver'], self.options['vcuser']))

        return True
    def set_file_attributes(self, context, restorepkt):
        bareosfd.DebugMessage(
            context,
            100,
            "set_file_attributes() entry point in Python called with %s\n"
            % (str(restorepkt)),
        )

        orig_fname = "/" + os.path.relpath(restorepkt.ofname, restorepkt.where)
        restoreobject_sha256sum = self.sha256sums_by_filename[orig_fname]

        file_sha256sum = self.get_sha256sum(context, orig_fname)
        bareosfd.DebugMessage(
            context,
            100,
            "set_file_attributes() orig_fname: %s restoreobject_sha256sum: %s file_sha256sum: %s\n"
            % (orig_fname, repr(restoreobject_sha256sum), repr(file_sha256sum)),
        )
        if file_sha256sum != restoreobject_sha256sum:
            bareosfd.JobMessage(
                context,
                bJobMessageType["M_ERROR"],
                "bad restoreobject orig_fname: %s restoreobject_sha256sum: %s file_sha256sum: %s\n"
                % (orig_fname, repr(restoreobject_sha256sum), repr(file_sha256sum)),
            )

        return bRCs["bRC_OK"]
Exemple #27
0
    def check_options(self, context, mandatory_options=None):
        """
        Check Plugin options
        Here we just verify that eventual mandatory options are set.
        If you have more to veriy, just overwrite ths method in your class
        """

        if mandatory_options is None:
            return bRCs["bRC_OK"]

        for option in mandatory_options:
            if option not in self.options:
                bareosfd.DebugMessage(
                    context, 100,
                    "Mandatory option '%s' not defined.\n" % option)
                bareosfd.JobMessage(
                    context,
                    bJobMessageType["M_FATAL"],
                    "Mandatory option '%s' not defined.\n" % (option),
                )
                return bRCs["bRC_Error"]

            bareosfd.DebugMessage(
                context, 100,
                "Using Option %s=%s\n" % (option, self.options[option]))

        return bRCs["bRC_OK"]
Exemple #28
0
    def prepare_backup(self, context, options):
        '''
        Prepare a LDAP backup
        '''
        connect_bRC = self.connect_and_bind(context, options, True)
        if connect_bRC != bRCs['bRC_OK']:
            return connect_bRC

        bareosfd.DebugMessage(
            context, 100,
            "Creating search filter and attribute filter to perform LDAP search\n"
        )

        # Get a subtree
        searchScope = ldap.SCOPE_SUBTREE

        # See if there is a specific search filter otherwise use the all object filter.
        if 'search_filter' in options:
            searchFilter = options['search_filter']
        else:
            searchFilter = r'(objectclass=*)'

        # Get all user attributes and createTimestamp + modifyTimestamp
        attributeFilter = ['*', 'createTimestamp', 'modifyTimestamp']

        try:
            # Asynchronous search method
            self.msg_id = self.ld.search(options['basedn'], searchScope,
                                         searchFilter, attributeFilter)
        except ldap.LDAPError as e:
            if type(e.message) == dict and 'desc' in e.message:
                bareosfd.JobMessage(
                    context, bJobMessageType['M_FATAL'],
                    "Failed to execute LDAP search on LDAP uri %s: %s\n" %
                    (options['uri'], e.message['desc']))
            else:
                bareosfd.JobMessage(
                    context, bJobMessageType['M_FATAL'],
                    "Failed to execute LDAP search on LDAP uri %s: %s\n" %
                    (options['uri'], e))

            return bRCs['bRC_Error']

        bareosfd.DebugMessage(context, 100,
                              "Successfully performed LDAP search\n")

        return bRCs['bRC_OK']
Exemple #29
0
    def start_backup_job(self, context):
        """
        At this point, plugin options were passed and checked already.
        We try to read from filename and setup the list of file to backup
        in self.files_to_backup
        """

        bareosfd.DebugMessage(
            context,
            100,
            "Using %s to search for local files\n" %
            (self.options["filename"]),
        )
        if os.path.exists(self.options["filename"]):
            try:
                config_file = open(self.options["filename"], "rb")
            except:
                bareosfd.DebugMessage(
                    context,
                    100,
                    "Could not open file %s\n" % (self.options["filename"]),
                )
                return bRCs["bRC_Error"]
        else:
            bareosfd.DebugMessage(
                context, 100,
                "File %s does not exist\n" % (self.options["filename"]))
            return bRCs["bRC_Error"]
        # Check, if we have allow or deny regular expressions defined
        if "allow" in self.options:
            self.allow = re.compile(self.options["allow"])
        if "deny" in self.options:
            self.deny = re.compile(self.options["deny"])

        for listItem in config_file.read().splitlines():
            if os.path.isfile(listItem) and self.filename_is_allowed(
                    context, listItem, self.allow, self.deny):
                self.files_to_backup.append(listItem)
            if os.path.isdir(listItem):
                for topdir, dirNames, fileNames in os.walk(listItem):
                    for fileName in fileNames:
                        if self.filename_is_allowed(
                                context,
                                os.path.join(topdir, fileName),
                                self.allow,
                                self.deny,
                        ):
                            self.files_to_backup.append(
                                os.path.join(topdir, fileName))
        if not self.files_to_backup:
            bareosfd.JobMessage(
                context,
                bJobMessageType["M_ERROR"],
                "No (allowed) files to backup found\n",
            )
            return bRCs["bRC_Error"]
        else:
            return bRCs["bRC_Cancel"]
Exemple #30
0
    def start_backup_job(self):
        """
        At this point, plugin options were passed and checked already.
        We try to read from filename and setup the list of file to backup
        in self.files_to_backup
        """
        bareosfd.DebugMessage(
            100, "Using %s to search for local files\n" % self.options["filename"],
        )
        if os.path.exists(self.options["filename"]):
            try:
                config_file = open(self.options["filename"], "r")
            except:
                bareosfd.DebugMessage(
                    100, "Could not open file %s\n" % (self.options["filename"]),
                )
                return bareosfd.bRC_Error
        else:
            bareosfd.DebugMessage(
                100, "File %s does not exist\n" % (self.options["filename"])
            )
            return bareosfd.bRC_Error
        # Check, if we have allow or deny regular expressions defined
        if "allow" in self.options:
            self.allow = re.compile(self.options["allow"])
        if "deny" in self.options:
            self.deny = re.compile(self.options["deny"])

        for listItem in config_file.read().splitlines():
            if os.path.isfile(listItem) and self.filename_is_allowed(
                listItem, self.allow, self.deny
            ):
                self.append_file_to_backup(listItem)
            if os.path.isdir(listItem):
                fullDirName = listItem
                # FD requires / at the end of a directory name
                if not fullDirName.endswith(tuple("/")):
                    fullDirName += "/"
                self.append_file_to_backup(fullDirName)
                for topdir, dirNames, fileNames in os.walk(listItem):
                    for fileName in fileNames:
                        if self.filename_is_allowed(
                            os.path.join(topdir, fileName), self.allow, self.deny,
                        ):
                            self.append_file_to_backup(os.path.join(topdir, fileName))
                    for dirName in dirNames:
                        fullDirName = os.path.join(topdir, dirName) + "/"
                        self.append_file_to_backup(fullDirName)
        bareosfd.DebugMessage(150, "Filelist: %s\n" % (self.files_to_backup))

        if not self.files_to_backup:
            bareosfd.JobMessage(
                bareosfd.M_ERROR, "No (allowed) files to backup found\n",
            )
            return bareosfd.bRC_Error
        else:
            return bareosfd.bRC_OK