コード例 #1
0
ファイル: driver.py プロジェクト: DPaaS-Raksha/raksha
    def backup_execute(self, backupjob, backupjobrun, backupjobrun_vm, vault_service, db, context, update_task_state = None):
        """
        Incremental backup of the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        
        #TODO(gbasava): Check if the previous backup exists by calling vm_recent_backupjobrun_get
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_START)    
            
        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path, backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
 
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {} # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(device, 
                        snapshot_directory + '/' + snapshot_name + '_' + device + '.qcow2' )

        #TODo(gbasava): snapshot_create_as is failing with permissions issue while the VM is running
        #Need
        self.snapshot_create_as(instance_name, snapshot_name, 
                                snapshot_description, dev_snapshot_disk_paths)
        #TODo(gbasava): Handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)
        
        
        vm_recent_backupjobrun = db.vm_recent_backupjobrun_get(context, backupjobrun_vm.vm_id)  
         
                    
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            previous_backupjobrun_vm_resource = db.backupjobrun_vm_resource_get(
                                                            context, 
                                                            backupjobrun_vm.vm_id, 
                                                            vm_recent_backupjobrun.backupjobrun_id, 
                                                            dev)
            previous_vm_resource_backup = db.vm_resource_backup_get_top(context, 
                                                                        previous_backupjobrun_vm_resource.id)
                 
            
            src_backing_path = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)        
            backupjobrun_vm_resource_values = {'id': str(uuid.uuid4()),
                                               'vm_id': backupjobrun_vm.vm_id,
                                               'backupjobrun_id': backupjobrun.id,       
                                               'resource_type': 'disk',
                                               'resource_name':  dev,
                                               'status': 'creating'}

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(context, 
                                                backupjobrun_vm_resource_values)                                                
            # create an entry in the vm_resource_backups table
            vm_resource_backup_backing_id = previous_vm_resource_backup.id
            vm_resource_backup_id = str(uuid.uuid4())
            vm_resource_backup_metadata = {} # Dictionary to hold the metadata
            vm_resource_backup_metadata.setdefault('disk_format','qcow2')
            vm_resource_backup_values = {'id': vm_resource_backup_id,
                                         'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                         'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                                         'metadata': vm_resource_backup_metadata,       
                                         'top':  True,
                                         'vault_service_id' : '1',
                                         'status': 'creating'}     
                                                         
            vm_resource_backup = db.vm_resource_backup_create(context, vm_resource_backup_values)                
            #upload to vault service
            vault_service_url = None
            with utils.temporary_chown(src_backing_path):
                vault_metadata = {'metadata': vm_resource_backup_metadata,
                                  'vm_resource_backup_id' : vm_resource_backup_id,
                                  'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                  'resource_name':  dev,
                                  'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                                  'backupjobrun_id': backupjobrun.id}
                vault_service_url = vault_service.backup(vault_metadata, src_backing_path); 
                
            # update the entry in the vm_resource_backup table
            vm_resource_backup_values = {'vault_service_url' :  vault_service_url ,
                                         'vault_service_metadata' : 'None',
                                         'status': 'completed'} 
            vm_resource_backup.update(vm_resource_backup_values)

                
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)

        # do a block commit. 
        # TODO(gbasava): Consider the case of a base image shared by multiple instances
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_INPROGRESS)

        state = self.get_info(instance_name)['state']
        
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():    
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            # if the instance is running we will do a blockcommit
            if (backing_file_backing != None and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    self.blockcommit(instance_name, dev, backing_file_backing, backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN or  state == power_state.SUSPENDED ): #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)                     
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates     
                   

                    
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
コード例 #2
0
    def backup_execute(self,
                       backupjob,
                       backupjobrun,
                       backupjobrun_vm,
                       vault_service,
                       db,
                       context,
                       update_task_state=None):
        """
        Incremental backup of the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """

        #TODO(gbasava): Check if the previous backup exists by calling vm_recent_backupjobrun_get

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_START)

        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path,
                                          backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)

        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {
        }  # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(
                device, snapshot_directory + '/' + snapshot_name + '_' +
                device + '.qcow2')

        #TODo(gbasava): snapshot_create_as is failing with permissions issue while the VM is running
        #Need
        self.snapshot_create_as(instance_name, snapshot_name,
                                snapshot_description, dev_snapshot_disk_paths)
        #TODo(gbasava): Handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        vm_recent_backupjobrun = db.vm_recent_backupjobrun_get(
            context, backupjobrun_vm.vm_id)

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            previous_backupjobrun_vm_resource = db.backupjobrun_vm_resource_get(
                context, backupjobrun_vm.vm_id,
                vm_recent_backupjobrun.backupjobrun_id, dev)
            previous_vm_resource_backup = db.vm_resource_backup_get_top(
                context, previous_backupjobrun_vm_resource.id)

            src_backing_path = libvirt_utils.get_disk_backing_file(
                snapshot_disk_path, basename=False)
            backupjobrun_vm_resource_values = {
                'id': str(uuid.uuid4()),
                'vm_id': backupjobrun_vm.vm_id,
                'backupjobrun_id': backupjobrun.id,
                'resource_type': 'disk',
                'resource_name': dev,
                'status': 'creating'
            }

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(
                context, backupjobrun_vm_resource_values)
            # create an entry in the vm_resource_backups table
            vm_resource_backup_backing_id = previous_vm_resource_backup.id
            vm_resource_backup_id = str(uuid.uuid4())
            vm_resource_backup_metadata = {}  # Dictionary to hold the metadata
            vm_resource_backup_metadata.setdefault('disk_format', 'qcow2')
            vm_resource_backup_values = {
                'id': vm_resource_backup_id,
                'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                'metadata': vm_resource_backup_metadata,
                'top': True,
                'vault_service_id': '1',
                'status': 'creating'
            }

            vm_resource_backup = db.vm_resource_backup_create(
                context, vm_resource_backup_values)
            #upload to vault service
            vault_service_url = None
            with utils.temporary_chown(src_backing_path):
                vault_metadata = {
                    'metadata': vm_resource_backup_metadata,
                    'vm_resource_backup_id': vm_resource_backup_id,
                    'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                    'resource_name': dev,
                    'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                    'backupjobrun_id': backupjobrun.id
                }
                vault_service_url = vault_service.backup(
                    vault_metadata, src_backing_path)

            # update the entry in the vm_resource_backup table
            vm_resource_backup_values = {
                'vault_service_url': vault_service_url,
                'vault_service_metadata': 'None',
                'status': 'completed'
            }
            vm_resource_backup.update(vm_resource_backup_values)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)

        # do a block commit.
        # TODO(gbasava): Consider the case of a base image shared by multiple instances
        if update_task_state:
            update_task_state(
                task_state=task_states.BACKUP_BLOCKCOMMIT_INPROGRESS)

        state = self.get_info(instance_name)['state']

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(
                    snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(
                    backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            # if the instance is running we will do a blockcommit
            if (backing_file_backing != None
                    and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    self.blockcommit(instance_name, dev, backing_file_backing,
                                     backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN
                      or state == power_state.SUSPENDED):  #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
コード例 #3
0
ファイル: driver.py プロジェクト: DPaaS-Raksha/raksha
    def backup_prepare(self, backupjob, backupjobrun, backupjobrun_vm, vault_service, db, context, update_task_state = None):
        """
        Prepares the backsup for the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        # Todo - Check the min supported version of the QEMU and Libvirt 
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_PREPARE)    
            
        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path, backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {} # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(device, 
                        snapshot_directory + '/' + snapshot_name + '_' + device + '.qcow2' )

        # we may have to powerdown/suspend until the permissions issue is resolved
        #self.suspend(instance_name)
        self.snapshot_create_as(instance_name, snapshot_name, 
                                snapshot_description, dev_snapshot_disk_paths)
        # Todo - handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        # stream the backing files of the new snapshots
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOAD_INPROGESS)
        
        
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():    
            src_backing_path = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)        
            backupjobrun_vm_resource_values = {'id': str(uuid.uuid4()),
                                               'vm_id': backupjobrun_vm.vm_id,
                                               'backupjobrun_id': backupjobrun.id,       
                                               'resource_type': 'disk',
                                               'resource_name':  dev,
                                               'status': 'creating'}

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(context, 
                                                backupjobrun_vm_resource_values)                                                
            
            src_backings = [] # using list as a stack for the disk backings
            while (src_backing_path != None):
                src_backings.append(src_backing_path)
                mode = os.stat(src_backing_path).st_mode
                if S_ISREG(mode) :
                    src_backing_path = libvirt_utils.get_disk_backing_file(src_backing_path, basename=False)      
                else:
                    src_backing_path = None
            
            base_backing_path = None
            vm_resource_backup_id = None
            if(len(src_backings) > 0):
                base_backing_path = src_backings.pop() 
            while (base_backing_path != None):
                top_backing_path = None
                if(len(src_backings) > 0):
                    top_backing_path = src_backings.pop()
                    
                # create an entry in the vm_resource_backups table
                vm_resource_backup_backing_id = vm_resource_backup_id
                vm_resource_backup_id = str(uuid.uuid4())
                vm_resource_backup_metadata = {} # Dictionary to hold the metadata
                if(dev == 'vda' and top_backing_path == None):
                    vm_resource_backup_metadata.setdefault('base_image_ref','TODO')                    
                vm_resource_backup_metadata.setdefault('disk_format','qcow2')
                vm_resource_backup_values = {'id': vm_resource_backup_id,
                                             'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                             'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                                             'metadata': vm_resource_backup_metadata,       
                                             'top':  (top_backing_path == None),
                                             'vault_service_id' : '1',
                                             'status': 'creating'}     
                                                             
                vm_resource_backup = db.vm_resource_backup_create(context, vm_resource_backup_values)                
                #upload to vault service
                vault_service_url = None
                with utils.temporary_chown(base_backing_path):
                    vault_metadata = {'metadata': vm_resource_backup_metadata,
                                      'vm_resource_backup_id' : vm_resource_backup_id,
                                      'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                      'resource_name':  dev,
                                      'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                                      'backupjobrun_id': backupjobrun.id}
                    vault_service_url = vault_service.backup(vault_metadata, base_backing_path); 
                # update the entry in the vm_resource_backup table
                vm_resource_backup_values = {'vault_service_url' :  vault_service_url ,
                                             'vault_service_metadata' : 'None',
                                             'status': 'completed'} 
                vm_resource_backup.update(vm_resource_backup_values)
                base_backing_path = top_backing_path

            if dev == 'vda': 
                #TODO(gbasava): Base image can be shared by multiple instances...should leave a minimum of 
                # two qcow2 files in front of the base image
                continue
            
            state = self.get_info(instance_name)['state']    
            #TODO(gbasava): Walk the qcow2 for each disk device and commit and intermediate qcow2 files into base
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            
            if (backing_file_backing != None and backing_file_backing != backing_file):
                if state == power_state.RUNNING: 
                    # if the instance is running we will do a blockcommit
                    self.blockcommit(instance_name, dev, backing_file_backing, backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN or  state == power_state.SUSPENDED ): #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)                     
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates     

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
コード例 #4
0
    def backup_prepare(self,
                       backupjob,
                       backupjobrun,
                       backupjobrun_vm,
                       vault_service,
                       db,
                       context,
                       update_task_state=None):
        """
        Prepares the backsup for the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        # Todo - Check the min supported version of the QEMU and Libvirt
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_PREPARE)

        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path,
                                          backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {
        }  # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(
                device, snapshot_directory + '/' + snapshot_name + '_' +
                device + '.qcow2')

        # we may have to powerdown/suspend until the permissions issue is resolved
        #self.suspend(instance_name)
        self.snapshot_create_as(instance_name, snapshot_name,
                                snapshot_description, dev_snapshot_disk_paths)
        # Todo - handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        # stream the backing files of the new snapshots
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOAD_INPROGESS)

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            src_backing_path = libvirt_utils.get_disk_backing_file(
                snapshot_disk_path, basename=False)
            backupjobrun_vm_resource_values = {
                'id': str(uuid.uuid4()),
                'vm_id': backupjobrun_vm.vm_id,
                'backupjobrun_id': backupjobrun.id,
                'resource_type': 'disk',
                'resource_name': dev,
                'status': 'creating'
            }

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(
                context, backupjobrun_vm_resource_values)

            src_backings = []  # using list as a stack for the disk backings
            while (src_backing_path != None):
                src_backings.append(src_backing_path)
                mode = os.stat(src_backing_path).st_mode
                if S_ISREG(mode):
                    src_backing_path = libvirt_utils.get_disk_backing_file(
                        src_backing_path, basename=False)
                else:
                    src_backing_path = None

            base_backing_path = None
            vm_resource_backup_id = None
            if (len(src_backings) > 0):
                base_backing_path = src_backings.pop()
            while (base_backing_path != None):
                top_backing_path = None
                if (len(src_backings) > 0):
                    top_backing_path = src_backings.pop()

                # create an entry in the vm_resource_backups table
                vm_resource_backup_backing_id = vm_resource_backup_id
                vm_resource_backup_id = str(uuid.uuid4())
                vm_resource_backup_metadata = {
                }  # Dictionary to hold the metadata
                if (dev == 'vda' and top_backing_path == None):
                    vm_resource_backup_metadata.setdefault(
                        'base_image_ref', 'TODO')
                vm_resource_backup_metadata.setdefault('disk_format', 'qcow2')
                vm_resource_backup_values = {
                    'id': vm_resource_backup_id,
                    'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                    'vm_resource_backup_backing_id':
                    vm_resource_backup_backing_id,
                    'metadata': vm_resource_backup_metadata,
                    'top': (top_backing_path == None),
                    'vault_service_id': '1',
                    'status': 'creating'
                }

                vm_resource_backup = db.vm_resource_backup_create(
                    context, vm_resource_backup_values)
                #upload to vault service
                vault_service_url = None
                with utils.temporary_chown(base_backing_path):
                    vault_metadata = {
                        'metadata': vm_resource_backup_metadata,
                        'vm_resource_backup_id': vm_resource_backup_id,
                        'backupjobrun_vm_resource_id':
                        backupjobrun_vm_resource.id,
                        'resource_name': dev,
                        'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                        'backupjobrun_id': backupjobrun.id
                    }
                    vault_service_url = vault_service.backup(
                        vault_metadata, base_backing_path)
                # update the entry in the vm_resource_backup table
                vm_resource_backup_values = {
                    'vault_service_url': vault_service_url,
                    'vault_service_metadata': 'None',
                    'status': 'completed'
                }
                vm_resource_backup.update(vm_resource_backup_values)
                base_backing_path = top_backing_path

            if dev == 'vda':
                #TODO(gbasava): Base image can be shared by multiple instances...should leave a minimum of
                # two qcow2 files in front of the base image
                continue

            state = self.get_info(instance_name)['state']
            #TODO(gbasava): Walk the qcow2 for each disk device and commit and intermediate qcow2 files into base
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(
                    snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(
                    backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):

            if (backing_file_backing != None
                    and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    # if the instance is running we will do a blockcommit
                    self.blockcommit(instance_name, dev, backing_file_backing,
                                     backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN
                      or state == power_state.SUSPENDED):  #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)