Example #1
0
    def restore_instance(self, backupjob, backupjobrun, backupjobrun_vm, vault_service, db, context, update_task_state = None):
        """
        Restores the specified instance from a backupjobrun
        """  
        restored_image = None
        device_restored_volumes = {} # Dictionary that holds dev and restored volumes     
        temp_directory = "/tmp"
        fileutils.ensure_tree(temp_directory)
        backupjobrun_vm_resources = db.backupjobrun_vm_resources_get(context, backupjobrun_vm.vm_id, backupjobrun.id)
         
        #restore, rebase, commit & upload
        for backupjobrun_vm_resource in backupjobrun_vm_resources:
            vm_resource_backup = db.vm_resource_backup_get_top(context, backupjobrun_vm_resource.id)
            restored_file_path = restored_file_path = temp_directory + '/' + vm_resource_backup.id + '_' + backupjobrun_vm_resource.resource_name + '.qcow2'
            vault_metadata = {'vault_service_url' : vm_resource_backup.vault_service_url,
                              'vault_service_metadata' : vm_resource_backup.vault_service_metadata,
                              'vm_resource_backup_id' : vm_resource_backup.id,
                              'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                              'resource_name':  backupjobrun_vm_resource.resource_name,
                              'backupjobrun_vm_id': backupjobrun_vm_resource.vm_id,
                              'backupjobrun_id': backupjobrun_vm_resource.backupjobrun_id}
            vault_service.restore(vault_metadata, restored_file_path)                            
            while vm_resource_backup.vm_resource_backup_backing_id is not None:
                vm_resource_backup_backing = db.vm_resource_backup_get(context, vm_resource_backup.vm_resource_backup_backing_id)
                backupjobrun_vm_resource_backing = db.backupjobrun_vm_resource_get2(context, vm_resource_backup_backing.backupjobrun_vm_resource_id)
                restored_file_path_backing = temp_directory + '/' + vm_resource_backup_backing.id + '_' + backupjobrun_vm_resource_backing.resource_name + '.qcow2'
                vault_metadata = {'vault_service_url' : vm_resource_backup_backing.vault_service_url,
                                  'vault_service_metadata' : vm_resource_backup_backing.vault_service_metadata,
                                  'vm_resource_backup_id' : vm_resource_backup_backing.id,
                                  'backupjobrun_vm_resource_id': backupjobrun_vm_resource_backing.id,
                                  'resource_name':  backupjobrun_vm_resource_backing.resource_name,
                                  'backupjobrun_vm_id': backupjobrun_vm_resource_backing.vm_id,
                                  'backupjobrun_id': backupjobrun_vm_resource_backing.backupjobrun_id}
                vault_service.restore(vault_metadata, restored_file_path_backing)                                 
                #rebase
                self.rebase(restored_file_path_backing, restored_file_path)
                #commit
                self.commit(restored_file_path)
                utils.delete_if_exists(restored_file_path)
                vm_resource_backup = vm_resource_backup_backing
                restored_file_path = restored_file_path_backing

            #upload to glance
            with file(restored_file_path) as image_file:
                image_metadata = {'is_public': False,
                                  'status': 'active',
                                  'name': backupjobrun_vm_resource.id,
                                  'disk_format' : 'ami',
                                  'properties': {
                                               'image_location': 'TODO',
                                               'image_state': 'available',
                                               'owner_id': context.project_id
                                               }
                                  }
                #if 'architecture' in base.get('properties', {}):
                #    arch = base['properties']['architecture']
                #    image_metadata['properties']['architecture'] = arch
                
                image_service = glance.get_default_image_service()
                if backupjobrun_vm_resource.resource_name == 'vda':
                    restored_image = image_service.create(context, image_metadata, image_file)
                else:
                    #TODO(gbasava): Request a feature in cinder to create volume from a file.
                    #As a workaround we will create the image and covert that to cinder volume

                    restored_volume_image = image_service.create(context, image_metadata, image_file)
                    restored_volume_name = uuid.uuid4().hex
                    volume_service = cinder.API()
                    restored_volume = volume_service.create(context, max(restored_volume_image['size']/(1024*1024*1024), 1), restored_volume_name, 
                                                        'from raksha', None, restored_volume_image['id'], None, None, None)
                    device_restored_volumes.setdefault(backupjobrun_vm_resource.resource_name, restored_volume)
                   
                    #delete the image...it is not needed anymore
                    #TODO(gbasava): Cinder takes a while to create the volume from image... so we need to verify the volume creation is complete.
                    time.sleep(30)
                    image_service.delete(context, restored_volume_image['id'])
            utils.delete_if_exists(restored_file_path)
                    
        #create nova instance
        restored_instance_name = uuid.uuid4().hex
        compute_service = nova.API()
        restored_compute_image = compute_service.get_image(context, restored_image['id'])
        restored_compute_flavor = compute_service.get_flavor(context, 'm1.small')
        restored_instance = compute_service.create_server(context, restored_instance_name, restored_compute_image, restored_compute_flavor)
        #attach volumes 
        for device, restored_volume in device_restored_volumes.iteritems():
            compute_service.attach_volume(context, restored_instance.id, restored_volume['id'], ('/dev/' + device))
Example #2
0
    def backup_execute(self, backupjob, backupjobrun, backupjobrun_vm, vault_service, db, context, update_task_state = None):
        """
        Incremental backup of the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        
        #TODO(gbasava): Check if the previous backup exists by calling vm_recent_backupjobrun_get
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_START)    
            
        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path, backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
 
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {} # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(device, 
                        snapshot_directory + '/' + snapshot_name + '_' + device + '.qcow2' )

        #TODo(gbasava): snapshot_create_as is failing with permissions issue while the VM is running
        #Need
        self.snapshot_create_as(instance_name, snapshot_name, 
                                snapshot_description, dev_snapshot_disk_paths)
        #TODo(gbasava): Handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)
        
        
        vm_recent_backupjobrun = db.vm_recent_backupjobrun_get(context, backupjobrun_vm.vm_id)  
         
                    
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            previous_backupjobrun_vm_resource = db.backupjobrun_vm_resource_get(
                                                            context, 
                                                            backupjobrun_vm.vm_id, 
                                                            vm_recent_backupjobrun.backupjobrun_id, 
                                                            dev)
            previous_vm_resource_backup = db.vm_resource_backup_get_top(context, 
                                                                        previous_backupjobrun_vm_resource.id)
                 
            
            src_backing_path = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)        
            backupjobrun_vm_resource_values = {'id': str(uuid.uuid4()),
                                               'vm_id': backupjobrun_vm.vm_id,
                                               'backupjobrun_id': backupjobrun.id,       
                                               'resource_type': 'disk',
                                               'resource_name':  dev,
                                               'status': 'creating'}

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(context, 
                                                backupjobrun_vm_resource_values)                                                
            # create an entry in the vm_resource_backups table
            vm_resource_backup_backing_id = previous_vm_resource_backup.id
            vm_resource_backup_id = str(uuid.uuid4())
            vm_resource_backup_metadata = {} # Dictionary to hold the metadata
            vm_resource_backup_metadata.setdefault('disk_format','qcow2')
            vm_resource_backup_values = {'id': vm_resource_backup_id,
                                         'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                         'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                                         'metadata': vm_resource_backup_metadata,       
                                         'top':  True,
                                         'vault_service_id' : '1',
                                         'status': 'creating'}     
                                                         
            vm_resource_backup = db.vm_resource_backup_create(context, vm_resource_backup_values)                
            #upload to vault service
            vault_service_url = None
            with utils.temporary_chown(src_backing_path):
                vault_metadata = {'metadata': vm_resource_backup_metadata,
                                  'vm_resource_backup_id' : vm_resource_backup_id,
                                  'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                  'resource_name':  dev,
                                  'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                                  'backupjobrun_id': backupjobrun.id}
                vault_service_url = vault_service.backup(vault_metadata, src_backing_path); 
                
            # update the entry in the vm_resource_backup table
            vm_resource_backup_values = {'vault_service_url' :  vault_service_url ,
                                         'vault_service_metadata' : 'None',
                                         'status': 'completed'} 
            vm_resource_backup.update(vm_resource_backup_values)

                
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)

        # do a block commit. 
        # TODO(gbasava): Consider the case of a base image shared by multiple instances
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_INPROGRESS)

        state = self.get_info(instance_name)['state']
        
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():    
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            # if the instance is running we will do a blockcommit
            if (backing_file_backing != None and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    self.blockcommit(instance_name, dev, backing_file_backing, backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN or  state == power_state.SUSPENDED ): #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)                     
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates     
                   

                    
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
Example #3
0
    def backup_prepare(self, backupjob, backupjobrun, backupjobrun_vm, vault_service, db, context, update_task_state = None):
        """
        Prepares the backsup for the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        # Todo - Check the min supported version of the QEMU and Libvirt 
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_PREPARE)    
            
        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path, backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {} # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(device, 
                        snapshot_directory + '/' + snapshot_name + '_' + device + '.qcow2' )

        # we may have to powerdown/suspend until the permissions issue is resolved
        #self.suspend(instance_name)
        self.snapshot_create_as(instance_name, snapshot_name, 
                                snapshot_description, dev_snapshot_disk_paths)
        # Todo - handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)
        
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        # stream the backing files of the new snapshots
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOAD_INPROGESS)
        
        
        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():    
            src_backing_path = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)        
            backupjobrun_vm_resource_values = {'id': str(uuid.uuid4()),
                                               'vm_id': backupjobrun_vm.vm_id,
                                               'backupjobrun_id': backupjobrun.id,       
                                               'resource_type': 'disk',
                                               'resource_name':  dev,
                                               'status': 'creating'}

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(context, 
                                                backupjobrun_vm_resource_values)                                                
            
            src_backings = [] # using list as a stack for the disk backings
            while (src_backing_path != None):
                src_backings.append(src_backing_path)
                mode = os.stat(src_backing_path).st_mode
                if S_ISREG(mode) :
                    src_backing_path = libvirt_utils.get_disk_backing_file(src_backing_path, basename=False)      
                else:
                    src_backing_path = None
            
            base_backing_path = None
            vm_resource_backup_id = None
            if(len(src_backings) > 0):
                base_backing_path = src_backings.pop() 
            while (base_backing_path != None):
                top_backing_path = None
                if(len(src_backings) > 0):
                    top_backing_path = src_backings.pop()
                    
                # create an entry in the vm_resource_backups table
                vm_resource_backup_backing_id = vm_resource_backup_id
                vm_resource_backup_id = str(uuid.uuid4())
                vm_resource_backup_metadata = {} # Dictionary to hold the metadata
                if(dev == 'vda' and top_backing_path == None):
                    vm_resource_backup_metadata.setdefault('base_image_ref','TODO')                    
                vm_resource_backup_metadata.setdefault('disk_format','qcow2')
                vm_resource_backup_values = {'id': vm_resource_backup_id,
                                             'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                             'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                                             'metadata': vm_resource_backup_metadata,       
                                             'top':  (top_backing_path == None),
                                             'vault_service_id' : '1',
                                             'status': 'creating'}     
                                                             
                vm_resource_backup = db.vm_resource_backup_create(context, vm_resource_backup_values)                
                #upload to vault service
                vault_service_url = None
                with utils.temporary_chown(base_backing_path):
                    vault_metadata = {'metadata': vm_resource_backup_metadata,
                                      'vm_resource_backup_id' : vm_resource_backup_id,
                                      'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                                      'resource_name':  dev,
                                      'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                                      'backupjobrun_id': backupjobrun.id}
                    vault_service_url = vault_service.backup(vault_metadata, base_backing_path); 
                # update the entry in the vm_resource_backup table
                vm_resource_backup_values = {'vault_service_url' :  vault_service_url ,
                                             'vault_service_metadata' : 'None',
                                             'status': 'completed'} 
                vm_resource_backup.update(vm_resource_backup_values)
                base_backing_path = top_backing_path

            if dev == 'vda': 
                #TODO(gbasava): Base image can be shared by multiple instances...should leave a minimum of 
                # two qcow2 files in front of the base image
                continue
            
            state = self.get_info(instance_name)['state']    
            #TODO(gbasava): Walk the qcow2 for each disk device and commit and intermediate qcow2 files into base
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            
            if (backing_file_backing != None and backing_file_backing != backing_file):
                if state == power_state.RUNNING: 
                    # if the instance is running we will do a blockcommit
                    self.blockcommit(instance_name, dev, backing_file_backing, backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN or  state == power_state.SUSPENDED ): #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)                     
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates     

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
Example #4
0
    def restore_instance(self,
                         backupjob,
                         backupjobrun,
                         backupjobrun_vm,
                         vault_service,
                         db,
                         context,
                         update_task_state=None):
        """
        Restores the specified instance from a backupjobrun
        """
        restored_image = None
        device_restored_volumes = {
        }  # Dictionary that holds dev and restored volumes
        temp_directory = "/tmp"
        fileutils.ensure_tree(temp_directory)
        backupjobrun_vm_resources = db.backupjobrun_vm_resources_get(
            context, backupjobrun_vm.vm_id, backupjobrun.id)

        #restore, rebase, commit & upload
        for backupjobrun_vm_resource in backupjobrun_vm_resources:
            vm_resource_backup = db.vm_resource_backup_get_top(
                context, backupjobrun_vm_resource.id)
            restored_file_path = restored_file_path = temp_directory + '/' + vm_resource_backup.id + '_' + backupjobrun_vm_resource.resource_name + '.qcow2'
            vault_metadata = {
                'vault_service_url': vm_resource_backup.vault_service_url,
                'vault_service_metadata':
                vm_resource_backup.vault_service_metadata,
                'vm_resource_backup_id': vm_resource_backup.id,
                'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                'resource_name': backupjobrun_vm_resource.resource_name,
                'backupjobrun_vm_id': backupjobrun_vm_resource.vm_id,
                'backupjobrun_id': backupjobrun_vm_resource.backupjobrun_id
            }
            vault_service.restore(vault_metadata, restored_file_path)
            while vm_resource_backup.vm_resource_backup_backing_id is not None:
                vm_resource_backup_backing = db.vm_resource_backup_get(
                    context, vm_resource_backup.vm_resource_backup_backing_id)
                backupjobrun_vm_resource_backing = db.backupjobrun_vm_resource_get2(
                    context,
                    vm_resource_backup_backing.backupjobrun_vm_resource_id)
                restored_file_path_backing = temp_directory + '/' + vm_resource_backup_backing.id + '_' + backupjobrun_vm_resource_backing.resource_name + '.qcow2'
                vault_metadata = {
                    'vault_service_url':
                    vm_resource_backup_backing.vault_service_url,
                    'vault_service_metadata':
                    vm_resource_backup_backing.vault_service_metadata,
                    'vm_resource_backup_id':
                    vm_resource_backup_backing.id,
                    'backupjobrun_vm_resource_id':
                    backupjobrun_vm_resource_backing.id,
                    'resource_name':
                    backupjobrun_vm_resource_backing.resource_name,
                    'backupjobrun_vm_id':
                    backupjobrun_vm_resource_backing.vm_id,
                    'backupjobrun_id':
                    backupjobrun_vm_resource_backing.backupjobrun_id
                }
                vault_service.restore(vault_metadata,
                                      restored_file_path_backing)
                #rebase
                self.rebase(restored_file_path_backing, restored_file_path)
                #commit
                self.commit(restored_file_path)
                utils.delete_if_exists(restored_file_path)
                vm_resource_backup = vm_resource_backup_backing
                restored_file_path = restored_file_path_backing

            #upload to glance
            with file(restored_file_path) as image_file:
                image_metadata = {
                    'is_public': False,
                    'status': 'active',
                    'name': backupjobrun_vm_resource.id,
                    'disk_format': 'ami',
                    'properties': {
                        'image_location': 'TODO',
                        'image_state': 'available',
                        'owner_id': context.project_id
                    }
                }
                #if 'architecture' in base.get('properties', {}):
                #    arch = base['properties']['architecture']
                #    image_metadata['properties']['architecture'] = arch

                image_service = glance.get_default_image_service()
                if backupjobrun_vm_resource.resource_name == 'vda':
                    restored_image = image_service.create(
                        context, image_metadata, image_file)
                else:
                    #TODO(gbasava): Request a feature in cinder to create volume from a file.
                    #As a workaround we will create the image and covert that to cinder volume

                    restored_volume_image = image_service.create(
                        context, image_metadata, image_file)
                    restored_volume_name = uuid.uuid4().hex
                    volume_service = cinder.API()
                    restored_volume = volume_service.create(
                        context,
                        max(
                            restored_volume_image['size'] /
                            (1024 * 1024 * 1024), 1), restored_volume_name,
                        'from raksha', None, restored_volume_image['id'], None,
                        None, None)
                    device_restored_volumes.setdefault(
                        backupjobrun_vm_resource.resource_name,
                        restored_volume)

                    #delete the image...it is not needed anymore
                    #TODO(gbasava): Cinder takes a while to create the volume from image... so we need to verify the volume creation is complete.
                    time.sleep(30)
                    image_service.delete(context, restored_volume_image['id'])
            utils.delete_if_exists(restored_file_path)

        #create nova instance
        restored_instance_name = uuid.uuid4().hex
        compute_service = nova.API()
        restored_compute_image = compute_service.get_image(
            context, restored_image['id'])
        restored_compute_flavor = compute_service.get_flavor(
            context, 'm1.small')
        restored_instance = compute_service.create_server(
            context, restored_instance_name, restored_compute_image,
            restored_compute_flavor)
        #attach volumes
        for device, restored_volume in device_restored_volumes.iteritems():
            compute_service.attach_volume(context, restored_instance.id,
                                          restored_volume['id'],
                                          ('/dev/' + device))
Example #5
0
    def backup_execute(self,
                       backupjob,
                       backupjobrun,
                       backupjobrun_vm,
                       vault_service,
                       db,
                       context,
                       update_task_state=None):
        """
        Incremental backup of the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """

        #TODO(gbasava): Check if the previous backup exists by calling vm_recent_backupjobrun_get

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_START)

        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path,
                                          backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)

        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {
        }  # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(
                device, snapshot_directory + '/' + snapshot_name + '_' +
                device + '.qcow2')

        #TODo(gbasava): snapshot_create_as is failing with permissions issue while the VM is running
        #Need
        self.snapshot_create_as(instance_name, snapshot_name,
                                snapshot_description, dev_snapshot_disk_paths)
        #TODo(gbasava): Handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        vm_recent_backupjobrun = db.vm_recent_backupjobrun_get(
            context, backupjobrun_vm.vm_id)

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            previous_backupjobrun_vm_resource = db.backupjobrun_vm_resource_get(
                context, backupjobrun_vm.vm_id,
                vm_recent_backupjobrun.backupjobrun_id, dev)
            previous_vm_resource_backup = db.vm_resource_backup_get_top(
                context, previous_backupjobrun_vm_resource.id)

            src_backing_path = libvirt_utils.get_disk_backing_file(
                snapshot_disk_path, basename=False)
            backupjobrun_vm_resource_values = {
                'id': str(uuid.uuid4()),
                'vm_id': backupjobrun_vm.vm_id,
                'backupjobrun_id': backupjobrun.id,
                'resource_type': 'disk',
                'resource_name': dev,
                'status': 'creating'
            }

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(
                context, backupjobrun_vm_resource_values)
            # create an entry in the vm_resource_backups table
            vm_resource_backup_backing_id = previous_vm_resource_backup.id
            vm_resource_backup_id = str(uuid.uuid4())
            vm_resource_backup_metadata = {}  # Dictionary to hold the metadata
            vm_resource_backup_metadata.setdefault('disk_format', 'qcow2')
            vm_resource_backup_values = {
                'id': vm_resource_backup_id,
                'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                'vm_resource_backup_backing_id': vm_resource_backup_backing_id,
                'metadata': vm_resource_backup_metadata,
                'top': True,
                'vault_service_id': '1',
                'status': 'creating'
            }

            vm_resource_backup = db.vm_resource_backup_create(
                context, vm_resource_backup_values)
            #upload to vault service
            vault_service_url = None
            with utils.temporary_chown(src_backing_path):
                vault_metadata = {
                    'metadata': vm_resource_backup_metadata,
                    'vm_resource_backup_id': vm_resource_backup_id,
                    'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                    'resource_name': dev,
                    'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                    'backupjobrun_id': backupjobrun.id
                }
                vault_service_url = vault_service.backup(
                    vault_metadata, src_backing_path)

            # update the entry in the vm_resource_backup table
            vm_resource_backup_values = {
                'vault_service_url': vault_service_url,
                'vault_service_metadata': 'None',
                'status': 'completed'
            }
            vm_resource_backup.update(vm_resource_backup_values)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)

        # do a block commit.
        # TODO(gbasava): Consider the case of a base image shared by multiple instances
        if update_task_state:
            update_task_state(
                task_state=task_states.BACKUP_BLOCKCOMMIT_INPROGRESS)

        state = self.get_info(instance_name)['state']

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(
                    snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(
                    backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):
            # if the instance is running we will do a blockcommit
            if (backing_file_backing != None
                    and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    self.blockcommit(instance_name, dev, backing_file_backing,
                                     backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN
                      or state == power_state.SUSPENDED):  #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_BLOCKCOMMIT_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)
Example #6
0
    def backup_prepare(self,
                       backupjob,
                       backupjobrun,
                       backupjobrun_vm,
                       vault_service,
                       db,
                       context,
                       update_task_state=None):
        """
        Prepares the backsup for the instance specified in backupjobrun_vm

        :param backupjob: 
        :param backupjobrun: 
        :param backupjobrun_vm: 
        """
        # Todo - Check the min supported version of the QEMU and Libvirt
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_PREPARE)

        instance_name = self.get_instance_name_by_uuid(backupjobrun_vm.vm_id)
        snapshot_directory = os.path.join(CONF.instances_path,
                                          backupjobrun_vm.vm_id)
        fileutils.ensure_tree(snapshot_directory)
        snapshot_name = uuid.uuid4().hex
        snapshot_description = "BackupJobRun " + backupjobrun.id + "of BackupJob " + backupjob.id
        dev_snapshot_disk_paths = {
        }  # Dictionary that holds dev and snapshot_disk_path
        devices = self.get_disks(instance_name)
        for device in devices:
            dev_snapshot_disk_paths.setdefault(
                device, snapshot_directory + '/' + snapshot_name + '_' +
                device + '.qcow2')

        # we may have to powerdown/suspend until the permissions issue is resolved
        #self.suspend(instance_name)
        self.snapshot_create_as(instance_name, snapshot_name,
                                snapshot_description, dev_snapshot_disk_paths)
        # Todo - handle the failure of snapshot_create_as
        self.snapshot_delete(instance_name, snapshot_name, True)

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_SNAPSHOT_CREATED)

        # stream the backing files of the new snapshots
        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOAD_INPROGESS)

        for dev, snapshot_disk_path in dev_snapshot_disk_paths.iteritems():
            src_backing_path = libvirt_utils.get_disk_backing_file(
                snapshot_disk_path, basename=False)
            backupjobrun_vm_resource_values = {
                'id': str(uuid.uuid4()),
                'vm_id': backupjobrun_vm.vm_id,
                'backupjobrun_id': backupjobrun.id,
                'resource_type': 'disk',
                'resource_name': dev,
                'status': 'creating'
            }

            backupjobrun_vm_resource = db.backupjobrun_vm_resource_create(
                context, backupjobrun_vm_resource_values)

            src_backings = []  # using list as a stack for the disk backings
            while (src_backing_path != None):
                src_backings.append(src_backing_path)
                mode = os.stat(src_backing_path).st_mode
                if S_ISREG(mode):
                    src_backing_path = libvirt_utils.get_disk_backing_file(
                        src_backing_path, basename=False)
                else:
                    src_backing_path = None

            base_backing_path = None
            vm_resource_backup_id = None
            if (len(src_backings) > 0):
                base_backing_path = src_backings.pop()
            while (base_backing_path != None):
                top_backing_path = None
                if (len(src_backings) > 0):
                    top_backing_path = src_backings.pop()

                # create an entry in the vm_resource_backups table
                vm_resource_backup_backing_id = vm_resource_backup_id
                vm_resource_backup_id = str(uuid.uuid4())
                vm_resource_backup_metadata = {
                }  # Dictionary to hold the metadata
                if (dev == 'vda' and top_backing_path == None):
                    vm_resource_backup_metadata.setdefault(
                        'base_image_ref', 'TODO')
                vm_resource_backup_metadata.setdefault('disk_format', 'qcow2')
                vm_resource_backup_values = {
                    'id': vm_resource_backup_id,
                    'backupjobrun_vm_resource_id': backupjobrun_vm_resource.id,
                    'vm_resource_backup_backing_id':
                    vm_resource_backup_backing_id,
                    'metadata': vm_resource_backup_metadata,
                    'top': (top_backing_path == None),
                    'vault_service_id': '1',
                    'status': 'creating'
                }

                vm_resource_backup = db.vm_resource_backup_create(
                    context, vm_resource_backup_values)
                #upload to vault service
                vault_service_url = None
                with utils.temporary_chown(base_backing_path):
                    vault_metadata = {
                        'metadata': vm_resource_backup_metadata,
                        'vm_resource_backup_id': vm_resource_backup_id,
                        'backupjobrun_vm_resource_id':
                        backupjobrun_vm_resource.id,
                        'resource_name': dev,
                        'backupjobrun_vm_id': backupjobrun_vm.vm_id,
                        'backupjobrun_id': backupjobrun.id
                    }
                    vault_service_url = vault_service.backup(
                        vault_metadata, base_backing_path)
                # update the entry in the vm_resource_backup table
                vm_resource_backup_values = {
                    'vault_service_url': vault_service_url,
                    'vault_service_metadata': 'None',
                    'status': 'completed'
                }
                vm_resource_backup.update(vm_resource_backup_values)
                base_backing_path = top_backing_path

            if dev == 'vda':
                #TODO(gbasava): Base image can be shared by multiple instances...should leave a minimum of
                # two qcow2 files in front of the base image
                continue

            state = self.get_info(instance_name)['state']
            #TODO(gbasava): Walk the qcow2 for each disk device and commit and intermediate qcow2 files into base
            with utils.temporary_chown(snapshot_disk_path):
                backing_file = libvirt_utils.get_disk_backing_file(
                    snapshot_disk_path, basename=False)
            with utils.temporary_chown(backing_file):
                backing_file_backing = libvirt_utils.get_disk_backing_file(
                    backing_file, basename=False)
            #with utils.temporary_chown(backing_file_backing):

            if (backing_file_backing != None
                    and backing_file_backing != backing_file):
                if state == power_state.RUNNING:
                    # if the instance is running we will do a blockcommit
                    self.blockcommit(instance_name, dev, backing_file_backing,
                                     backing_file)
                    utils.delete_if_exists(backing_file)
                elif (state == power_state.SHUTDOWN
                      or state == power_state.SUSPENDED):  #commit and rebase
                    self.commit(backing_file)
                    utils.delete_if_exists(backing_file)
                    self.rebase(backing_file_backing, snapshot_disk_path)
                #else: TODO(gbasava): investigate and handle other powerstates

        if update_task_state:
            update_task_state(task_state=task_states.BACKUP_UPLOADING_FINISH)
            update_task_state(task_state=task_states.BACKUP_COMPLETE)