Ejemplo n.º 1
0
def vm_migrate_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.migrate.views.vm_migrate.
    """
    ghost_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    msg = result.get('message', '')

    if result['returncode'] == 0 and msg and 'Successfully migrated' in msg:
        # Save node and delete placeholder VM first
        node = ghost_vm.vm.node
        nss = set(ghost_vm.vm.get_node_storages())
        ghost_vm.delete(
        )  # post_delete signal will update node and storage resources
        # Fetch VM after ghost_vm is deleted, because it updates vm.slave_vms array
        vm = Vm.objects.select_related('node', 'dc').get(uuid=vm_uuid)
        changing_node = vm.node != ghost_vm.vm.node
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
            vm.json_active = json_active
            vm.json = json_active
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from vm_migrate(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        nss.update(list(vm.get_node_storages()))
        # Revert status and set new node (should trigger node resource update)
        vm.revert_notready(save=False)
        if changing_node:
            vm.set_node(node)
        vm.save(update_node_resources=True, update_storage_resources=nss)
        SlaveVm.switch_vm_snapshots_node_storages(vm, nss=nss)
        vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!

    else:
        vm = Vm.objects.get(uuid=vm_uuid)
        _vm_migrate_cb_failed(result, task_id, vm, ghost_vm)
        logger.error(
            'Found nonzero returncode in result from vm_migrate(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    if vm.json_changed():
        vm_update(vm)

    return result
Ejemplo n.º 2
0
def vm_replica_failover_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.replica.views.vm_replica_failover.
    """
    slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid,
                                   sr=(
                                       'vm',
                                       'master_vm',
                                       'vm__node',
                                       'vm__dc',
                                   ))
    vm = slave_vm.master_vm
    assert vm.uuid == vm_uuid
    action = result['meta']['apiview']['method']
    force = result['meta']['apiview']['force']
    result, jsons = _parse_vm_replica_result(result,
                                             vm,
                                             slave_vm,
                                             action,
                                             key_json_idx=-1,
                                             cb_name='vm_replica_failover')
    sync_status = _save_svc_state(slave_vm, jsons)

    if result['returncode'] != 0:
        if sync_status is not None:
            slave_vm.save(update_fields=('sync_status', ))

        vm.revert_notready()
        msg = result['detail']
        logger.error(
            'Found nonzero returncode in result from %s vm_replica_failover(%s, %s). Error: %s',
            action, vm_uuid, slave_vm_uuid, msg)
        errmsg = _update_task_result_failure(result, msg)
        raise TaskException(result, errmsg)

    # New master VM was born
    # Delete tasks for old master
    if force:
        tasks = list(vm.tasks.keys())
        try:
            tasks.remove(task_id)
        except ValueError:
            pass
        _delete_tasks(vm, tasks)

    # Create internal shutdown task of old master VM
    old_vm_status = result['meta']['apiview']['orig_status']
    _vm_shutdown(vm)

    # Save new master, degrade old master
    slave_vm.master_vm.revert_notready(save=False)
    new_vm = slave_vm.fail_over()

    # Re-check status of old master (current degraded slave) because it was shut down,
    # but the state wasn't save (it was notready back then)
    vm_status_one(task_id, vm)

    # Continue with prompting of new master and degradation of old
    SlaveVm.switch_vm_snapshots_node_storages(new_vm,
                                              nss=vm.get_node_storages())
    # Force update of zabbix
    vm_json_active_changed.send(task_id,
                                vm=new_vm,
                                old_json_active={},
                                force_update=True)  # Signal!

    if new_vm.node != vm.node:
        vm_node_changed.send(task_id, vm=new_vm, force_update=True)  # Signal!

    msg = 'Server replica was successfully promoted to master'
    _update_task_result_success(result, slave_vm, action, msg)
    task_log_cb_success(result, task_id, vm=new_vm, **result['meta'])
    request = get_dummy_request(vm.dc, method='PUT', system_user=True)

    # Mark pending backups as "lost" :(  TODO: implement vm_backup_sync
    new_vm.backup_set.filter(status=Backup.PENDING).update(status=Backup.LOST)

    # Sync snapshots on new master VM (mark missing snapshots as "lost")
    for disk_id, _ in enumerate(new_vm.json_active_get_disks(), start=1):
        call_api_view(request,
                      'PUT',
                      vm_snapshot_list,
                      new_vm.hostname,
                      data={'disk_id': disk_id},
                      log_response=True)

    if old_vm_status == Vm.RUNNING:
        # Start new master VM
        call_api_view(request,
                      'PUT',
                      vm_status,
                      new_vm.hostname,
                      action='start',
                      log_response=True)

    return result
Ejemplo n.º 3
0
def vm_update_cb(result, task_id, vm_uuid=None, new_node_uuid=None):
    """
    A callback function for api.vm.base.views.vm_manage.
    """
    vm = Vm.objects.select_related('dc').get(uuid=vm_uuid)
    _vm_update_cb_done(result, task_id, vm)
    msg = result.get('message', '')
    force = result['meta']['apiview']['force']

    if result['returncode'] == 0 and (force or
                                      msg.find('Successfully updated') >= 0):
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from PUT vm_manage(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        vm_delete_snapshots_of_removed_disks(
            vm)  # Do this before updating json and json_active
        vm.json = json_active
        update_fields = ['enc_json', 'enc_json_active', 'changed']
        ignored_changed_vm_attrs = (
            'set_customer_metadata',
            'remove_customer_metadata',
            'create_timestamp',
            'boot_timestamp',
            'autoboot',
            'vnc_port',
            'update_disks',
        )

        if new_node_uuid:
            update_dict = vm.json_update()

            for i in ignored_changed_vm_attrs:
                update_dict.pop(i, None)

            if update_dict:
                raise TaskException(
                    result,
                    'VM definition on compute node differs from definition in DB in '
                    'following attributes: %s' % ','.join(update_dict.keys()))
            update_fields.append('node_id')

        old_json_active = vm.json_active
        vm.json_active = json_active

        if new_node_uuid:
            node = Node.objects.get(uuid=new_node_uuid)
            vm.set_node(node)

        with transaction.atomic():
            vm.save(update_node_resources=True,
                    update_storage_resources=True,
                    update_fields=update_fields)
            vm_update_ipaddress_usage(vm)
            vm_json_active_changed.send(
                task_id, vm=vm, old_json_active=old_json_active)  # Signal!

        if new_node_uuid:
            vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!
            result[
                'message'] = 'Node association successfully changed on VM %s' % vm.hostname
            if vm.json_changed():
                vm_update(vm)

    else:
        logger.error(
            'Found nonzero returncode in result from PUT vm_manage(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])
    return result
Ejemplo n.º 4
0
def vm_migrate_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.migrate.views.vm_migrate.
    """
    ghost_vm = SlaveVm.get_by_uuid(slave_vm_uuid)
    msg = result.get('message', '')

    if result['returncode'] == 0 and msg and 'Successfully migrated' in msg:
        # Save node and delete placeholder VM first
        node = ghost_vm.vm.node
        nss = set(ghost_vm.vm.get_node_storages())
        ghost_vm.delete(
        )  # post_delete signal will update node and storage resources
        # Fetch VM after ghost_vm is deleted, because it updates vm.slave_vms array
        vm = Vm.objects.select_related('node', 'dc').get(uuid=vm_uuid)
        changing_node = vm.node != ghost_vm.vm.node
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
            vm.json_active = json_active
            vm.json = json_active
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from vm_migrate(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        nss.update(list(vm.get_node_storages()))
        # Revert status and set new node (should trigger node resource update)
        vm.revert_notready(save=False)
        if changing_node:
            vm.set_node(node)
        vm.save(update_node_resources=True, update_storage_resources=nss)
        SlaveVm.switch_vm_snapshots_node_storages(vm, nss=nss)
        vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!

    else:
        vm = Vm.objects.get(uuid=vm_uuid)
        _vm_migrate_cb_failed(result, task_id, vm, ghost_vm)
        logger.error(
            'Found nonzero returncode in result from vm_migrate(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    if vm.json_changed():
        logger.info(
            'Running PUT vm_manage(%s), because something (vnc port?) has changed changed',
            vm)
        from api.vm.base.views import vm_manage
        from api.utils.request import get_dummy_request
        from api.utils.views import call_api_view
        request = get_dummy_request(vm.dc, method='PUT', system_user=True)
        res = call_api_view(request, 'PUT', vm_manage, vm.hostname)

        if res.status_code == 201:
            logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
        else:
            logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm,
                         res.status_code, res.status_text, res.data)

    return result