コード例 #1
0
ファイル: tasks.py プロジェクト: cgvarela/esdc-ce
def vm_migrate_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.migrate.views.vm_migrate.
    """
    ghost_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    msg = result.get('message', '')

    if result['returncode'] == 0 and msg and 'Successfully migrated' in msg:
        # Save node and delete placeholder VM first
        node = ghost_vm.vm.node
        nss = set(ghost_vm.vm.get_node_storages())
        ghost_vm.delete(
        )  # post_delete signal will update node and storage resources
        # Fetch VM after ghost_vm is deleted, because it updates vm.slave_vms array
        vm = Vm.objects.select_related('node', 'dc').get(uuid=vm_uuid)
        changing_node = vm.node != ghost_vm.vm.node
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
            vm.json_active = json_active
            vm.json = json_active
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from vm_migrate(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        nss.update(list(vm.get_node_storages()))
        # Revert status and set new node (should trigger node resource update)
        vm.revert_notready(save=False)
        if changing_node:
            vm.set_node(node)
        vm.save(update_node_resources=True, update_storage_resources=nss)
        SlaveVm.switch_vm_snapshots_node_storages(vm, nss=nss)
        vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!

    else:
        vm = Vm.objects.get(uuid=vm_uuid)
        _vm_migrate_cb_failed(result, task_id, vm, ghost_vm)
        logger.error(
            'Found nonzero returncode in result from vm_migrate(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    if vm.json_changed():
        vm_update(vm)

    return result
コード例 #2
0
ファイル: tasks.py プロジェクト: zetacloud/esdc-ce
def vm_replica_cb_failed_cleanup(sender, apiview, result, task_id, status, obj,
                                 **kwargs):
    """Signal receiver emitted after task is revoked."""
    if sender == 'vm_replica':
        slave_vm = SlaveVm.get_by_uuid(apiview['slave_vm_uuid'])
        _vm_replica_cb_failed(result, task_id, obj, slave_vm,
                              apiview['method'])
コード例 #3
0
ファイル: tasks.py プロジェクト: zetacloud/esdc-ce
def vm_replica_reinit_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.replica.views.vm_replica_reinit.
    """
    slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    vm = slave_vm.master_vm
    assert vm.uuid == vm_uuid
    action = result['meta']['apiview']['method']
    result, jsons = _parse_vm_replica_result(result,
                                             vm,
                                             slave_vm,
                                             action,
                                             key_json_idx=0,
                                             cb_name='vm_replica_reinit')

    if result['returncode'] != 0:
        if jsons and jsons[0].get('success',
                                  False):  # Successfully reversed replication
            slave_vm.last_sync = _parse_last_sync(jsons[0])
            slave_vm.rep_reinit_required = False
            slave_vm.save()

        msg = result['detail']
        logger.error(
            'Found nonzero returncode in result from %s vm_replica_reinit(%s, %s). Error: %s',
            action, vm_uuid, slave_vm_uuid, msg)
        errmsg = _update_task_result_failure(result, msg)
        raise TaskException(result, errmsg)

    slave_vm.rep_reinit_required = False
    slave_vm.last_sync = _parse_last_sync(jsons[0])
    _save_svc_state(slave_vm, jsons)
    _save_svc_params(slave_vm, jsons)
    slave_vm.save()
    msg = 'Server replica was successfully reinitialized'
    _update_task_result_success(result, slave_vm, action, msg)
    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    return result
コード例 #4
0
ファイル: tasks.py プロジェクト: zetacloud/esdc-ce
def vm_replica_failover_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.replica.views.vm_replica_failover.
    """
    slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid,
                                   sr=(
                                       'vm',
                                       'master_vm',
                                       'vm__node',
                                       'vm__dc',
                                   ))
    vm = slave_vm.master_vm
    assert vm.uuid == vm_uuid
    action = result['meta']['apiview']['method']
    force = result['meta']['apiview']['force']
    result, jsons = _parse_vm_replica_result(result,
                                             vm,
                                             slave_vm,
                                             action,
                                             key_json_idx=-1,
                                             cb_name='vm_replica_failover')
    sync_status = _save_svc_state(slave_vm, jsons)

    if result['returncode'] != 0:
        if sync_status is not None:
            slave_vm.save(update_fields=('sync_status', ))

        vm.revert_notready()
        msg = result['detail']
        logger.error(
            'Found nonzero returncode in result from %s vm_replica_failover(%s, %s). Error: %s',
            action, vm_uuid, slave_vm_uuid, msg)
        errmsg = _update_task_result_failure(result, msg)
        raise TaskException(result, errmsg)

    # New master VM was born
    # Delete tasks for old master
    if force:
        tasks = list(vm.tasks.keys())
        try:
            tasks.remove(task_id)
        except ValueError:
            pass
        _delete_tasks(vm, tasks)

    # Create internal shutdown task of old master VM
    old_vm_status = result['meta']['apiview']['orig_status']
    _vm_shutdown(vm)

    # Save new master, degrade old master
    slave_vm.master_vm.revert_notready(save=False)
    new_vm = slave_vm.fail_over()

    # Re-check status of old master (current degraded slave) because it was shut down,
    # but the state wasn't save (it was notready back then)
    vm_status_one(task_id, vm)

    # Continue with prompting of new master and degradation of old
    SlaveVm.switch_vm_snapshots_node_storages(new_vm,
                                              nss=vm.get_node_storages())
    # Force update of zabbix
    vm_json_active_changed.send(task_id,
                                vm=new_vm,
                                old_json_active={},
                                force_update=True)  # Signal!

    if new_vm.node != vm.node:
        vm_node_changed.send(task_id, vm=new_vm, force_update=True)  # Signal!

    msg = 'Server replica was successfully promoted to master'
    _update_task_result_success(result, slave_vm, action, msg)
    task_log_cb_success(result, task_id, vm=new_vm, **result['meta'])
    request = get_dummy_request(vm.dc, method='PUT', system_user=True)

    # Mark pending backups as "lost" :(  TODO: implement vm_backup_sync
    new_vm.backup_set.filter(status=Backup.PENDING).update(status=Backup.LOST)

    # Sync snapshots on new master VM (mark missing snapshots as "lost")
    for disk_id, _ in enumerate(new_vm.json_active_get_disks(), start=1):
        call_api_view(request,
                      'PUT',
                      vm_snapshot_list,
                      new_vm.hostname,
                      data={'disk_id': disk_id},
                      log_response=True)

    if old_vm_status == Vm.RUNNING:
        # Start new master VM
        call_api_view(request,
                      'PUT',
                      vm_status,
                      new_vm.hostname,
                      action='start',
                      log_response=True)

    return result
コード例 #5
0
ファイル: tasks.py プロジェクト: zetacloud/esdc-ce
def vm_replica_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.replica.views.vm_replica.
    """
    slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    vm = slave_vm.master_vm
    assert vm.uuid == vm_uuid
    action = result['meta']['apiview']['method']
    result, jsons = _parse_vm_replica_result(result, vm, slave_vm, action)

    if action == 'POST':
        vm.revert_notready()

        if jsons and jsons[0].get('success', False):
            esrep_init = jsons[0]
            # New slave VM was successfully created on target node
            # noinspection PyTypeChecker
            json_active = pickle.loads(
                base64.decodestring(esrep_init.pop('slave_json')))
            slave_vm.vm.json = slave_vm.vm.json_active = json_active
            slave_vm.vm.status = Vm.STOPPED
            slave_vm.vm.save(update_fields=('status', 'status_change',
                                            'enc_json', 'enc_json_active',
                                            'changed'))
            slave_vm.last_sync = _parse_last_sync(esrep_init)
        else:
            slave_vm.delete()

    sync_status = _save_svc_state(slave_vm, jsons)
    msg = result['detail']

    if result['returncode'] == 0 and jsons:
        if action == 'POST':
            _save_svc_params(slave_vm, jsons)
            slave_vm.save()
            msg = 'Server replica was successfully initialized'
        elif action == 'PUT':
            _save_svc_params(slave_vm, jsons)
            slave_vm.save()
            msg = 'Server replication service was successfully updated'
        elif action == 'DELETE':
            slave_vm.delete()
            msg = 'Server replica was successfully destroyed'

            # noinspection PyTypeChecker
            if len(jsons[-1]['master_cleaned_disks']) != len(
                    vm.json_active_get_disks()):
                warning = "WARNING: Master server's disks were not cleaned properly"
                result['detail'] += ' msg=' + warning
                msg += '; ' + warning
    else:
        if sync_status is not None:
            slave_vm.save(update_fields=('sync_status', ))
        logger.error(
            'Found nonzero returncode in result from %s vm_replica(%s, %s). Error: %s',
            action, vm_uuid, slave_vm_uuid, msg)
        errmsg = _update_task_result_failure(result, msg)
        raise TaskException(result, errmsg)

    _update_task_result_success(result, slave_vm, action, msg)
    task_log_cb_success(result, task_id, vm=vm, **result['meta'])
    return result
コード例 #6
0
def _task_cleanup(result, task_id, task_status, obj, **kwargs):
    """
    Cleanup after task is revoked.
    """
    apiview = result['meta']['apiview']
    view = apiview['view']

    if view == 'vm_snapshot':
        from vms.models import Snapshot
        from api.vm.snapshot.tasks import _vm_snapshot_cb_failed

        snap = Snapshot.objects.get(vm=obj,
                                    disk_id=Snapshot.get_disk_id(
                                        obj, apiview['disk_id']),
                                    name=apiview['snapname'])
        _vm_snapshot_cb_failed(result, task_id, snap, apiview['method'])

    elif view == 'vm_snapshot_list':
        from vms.models import Snapshot
        from api.vm.snapshot.tasks import _vm_snapshot_list_cb_failed

        snaps = Snapshot.objects.filter(vm=obj,
                                        disk_id=Snapshot.get_disk_id(
                                            obj, apiview['disk_id']),
                                        name__in=apiview['snapnames'])
        _vm_snapshot_list_cb_failed(result, task_id, snaps, apiview['method'])

    elif view == 'vm_backup':
        from vms.models import Backup
        from api.vm.backup.tasks import _vm_backup_cb_failed

        bkp = Backup.objects.get(vm_hostname=apiview['hostname'],
                                 vm_disk_id=apiview['disk_id'] - 1,
                                 name=apiview['bkpname'])
        _vm_backup_cb_failed(result, task_id, bkp, apiview['method'], vm=obj)

    elif view == 'vm_backup_list':
        from vms.models import Backup
        from api.vm.backup.tasks import _vm_backup_list_cb_failed

        bkps = Backup.objects.filter(vm_hostname=apiview['hostname'],
                                     vm_disk_id=apiview['disk_id'] - 1,
                                     name__in=apiview['bkpnames'])
        _vm_backup_list_cb_failed(result, task_id, bkps, apiview['method'])

    elif view == 'vm_manage':
        if apiview['method'] == 'POST':
            from api.vm.base.tasks import _vm_create_cb_failed
            result['message'] = ''
            _vm_create_cb_failed(result, task_id, obj)
        elif apiview['method'] == 'DELETE':
            from api.vm.base.tasks import _vm_delete_cb_failed
            _vm_delete_cb_failed(result, task_id, obj)
        elif apiview['method'] == 'PUT':
            from api.vm.base.tasks import _vm_update_cb_done
            _vm_update_cb_done(result, task_id, obj)

    elif view == 'vm_status':
        from api.vm.status.tasks import _vm_status_cb_failed

        if apiview['method'] == 'PUT':
            _vm_status_cb_failed(result, task_id, obj)

    elif view == 'vm_migrate':
        from vms.models import SlaveVm
        from api.vm.migrate.tasks import _vm_migrate_cb_failed

        ghost_vm = SlaveVm.get_by_uuid(apiview['slave_vm_uuid'])
        _vm_migrate_cb_failed(result, task_id, obj, ghost_vm)

    elif view == 'image_manage' or view == 'image_snapshot':
        # obj = Image
        from vms.models import Snapshot
        from api.image.base.tasks import _image_manage_cb_failed

        method = apiview['method']
        snap_id = obj.src_snap_id

        if method == 'POST' and snap_id:
            snap = Snapshot.objects.get(id=snap_id)
        else:
            snap = None

        _image_manage_cb_failed(result, task_id, obj, method, snap=snap)

    elif view == 'node_image':
        # obj = NodeStorage
        from vms.models import Image
        from api.node.image.tasks import _node_image_cb_failed

        img = Image.objects.get(name=apiview['name'])
        _node_image_cb_failed(result, task_id, obj, img)

    else:
        task_cleanup_signal.send(sender=view,
                                 apiview=apiview,
                                 result=result,
                                 task_id=task_id,
                                 status=task_status,
                                 obj=obj)
コード例 #7
0
    def validate(self, attrs):
        vm = self.vm
        node = attrs.get('node', vm.node)
        changing_node = attrs.get('node', vm.node) != vm.node
        # Ghost VM is a copy of a VM used to take up place in DB.
        # When node is changing we have to have all disks in a ghost VM.
        # When changing only disk pools, only the changed disks have to be in a ghost VM.
        ghost_vm = SlaveVm(_master_vm=vm)
        ghost_vm.reserve_resources = changing_node
        ghost_vm.set_migration_hostname()
        ghost_vm.node = node
        ghost_vm_define = SlaveVmDefine(ghost_vm)

        # Validate root_zpool (we can do this after we know the new node)
        root_zpool = attrs.get('root_zpool', None)
        # Every pool must be validated when changing node
        try:
            root_zpool = ghost_vm_define.save_root_zpool(
                root_zpool, save_same_zpool=changing_node)
        except APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return attrs

        # Validate disk_zpools (we can do this after we know the new node)
        if ghost_vm.vm.is_kvm():
            disk_zpools = attrs.get('disk_zpools', {})
            try:
                disk_zpools = ghost_vm_define.save_disk_zpools(
                    disk_zpools, save_same_zpool=changing_node)
            except APIValidationError as exc:
                self._errors['node'] = exc.api_errors
                return attrs
        else:
            disk_zpools = {}

        # Nothing changed, he?
        if not changing_node and not (root_zpool or disk_zpools):
            raise s.ValidationError(_('Nothing to do.'))

        # Validate dc_node resources
        try:
            ghost_vm_define.validate_node_resources(
                ignore_cpu_ram=not changing_node)
        except APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return attrs

        # Validate storage resources
        try:
            ghost_vm_define.validate_storage_resources()
        except APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return attrs

        # Validate images
        self.img_required = ghost_vm_define.check_required_images()

        # Save params
        # noinspection PyAttributeOutsideInit
        self._root_zpool = root_zpool
        # noinspection PyAttributeOutsideInit
        self._disk_zpools = disk_zpools
        # noinspection PyAttributeOutsideInit
        self.ghost_vm_define = ghost_vm_define
        # noinspection PyAttributeOutsideInit
        self.changing_node = changing_node

        return attrs
コード例 #8
0
ファイル: tasks.py プロジェクト: hafo821/esdc-ce
def vm_migrate_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.migrate.views.vm_migrate.
    """
    ghost_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    msg = result.get('message', '')

    if result['returncode'] == 0 and msg and 'Successfully migrated' in msg:
        # Save node and delete placeholder VM first
        node = ghost_vm.vm.node
        nss = set(ghost_vm.vm.get_node_storages())
        ghost_vm.delete(
        )  # post_delete signal will update node and storage resources
        # Fetch VM after ghost_vm is deleted, because it updates vm.slave_vms array
        vm = Vm.objects.select_related('node', 'dc').get(uuid=vm_uuid)
        changing_node = vm.node != ghost_vm.vm.node
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
            vm.json_active = json_active
            vm.json = json_active
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from vm_migrate(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        nss.update(list(vm.get_node_storages()))
        # Revert status and set new node (should trigger node resource update)
        vm.revert_notready(save=False)
        if changing_node:
            vm.set_node(node)
        vm.save(update_node_resources=True, update_storage_resources=nss)
        SlaveVm.switch_vm_snapshots_node_storages(vm, nss=nss)
        vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!

    else:
        vm = Vm.objects.get(uuid=vm_uuid)
        _vm_migrate_cb_failed(result, task_id, vm, ghost_vm)
        logger.error(
            'Found nonzero returncode in result from vm_migrate(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    if vm.json_changed():
        logger.info(
            'Running PUT vm_manage(%s), because something (vnc port?) has changed changed',
            vm)
        from api.vm.base.views import vm_manage
        from api.utils.request import get_dummy_request
        from api.utils.views import call_api_view
        request = get_dummy_request(vm.dc, method='PUT', system_user=True)
        res = call_api_view(request, 'PUT', vm_manage, vm.hostname)

        if res.status_code == 201:
            logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
        else:
            logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm,
                         res.status_code, res.status_text, res.data)

    return result