Exemplo n.º 1
0
def vm_update_cb(result, task_id, vm_uuid=None):
    """
    A callback function for api.vm.base.views.vm_manage.
    """
    vm = Vm.objects.select_related('dc').get(uuid=vm_uuid)
    _vm_update_cb_done(result, task_id, vm)
    msg = result.get('message', '')
    force = result['meta']['apiview']['force']

    if result['returncode'] == 0 and (force or
                                      msg.find('Successfully updated') >= 0):
        json = result.pop('json', None)
        try:  # save json from smartos
            json_active = vm.json.load(json)
            vm_delete_snapshots_of_removed_disks(
                vm)  # Do this before updating json and json_active
            vm.json_active = json_active
            vm.json = json_active
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from PUT vm_manage(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')
        else:
            vm.save(update_node_resources=True,
                    update_storage_resources=True,
                    update_fields=('enc_json', 'enc_json_active', 'changed'))
            vm_update_ipaddress_usage(vm)
            vm_json_active_changed.send(task_id, vm=vm)  # Signal!

    else:
        logger.error(
            'Found nonzero returncode in result from PUT vm_manage(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])
    return result
Exemplo n.º 2
0
def vm_replica_failover_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None):
    """
    A callback function for api.vm.replica.views.vm_replica_failover.
    """
    slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid,
                                   sr=(
                                       'vm',
                                       'master_vm',
                                       'vm__node',
                                       'vm__dc',
                                   ))
    vm = slave_vm.master_vm
    assert vm.uuid == vm_uuid
    action = result['meta']['apiview']['method']
    force = result['meta']['apiview']['force']
    result, jsons = _parse_vm_replica_result(result,
                                             vm,
                                             slave_vm,
                                             action,
                                             key_json_idx=-1,
                                             cb_name='vm_replica_failover')
    sync_status = _save_svc_state(slave_vm, jsons)

    if result['returncode'] != 0:
        if sync_status is not None:
            slave_vm.save(update_fields=('sync_status', ))

        vm.revert_notready()
        msg = result['detail']
        logger.error(
            'Found nonzero returncode in result from %s vm_replica_failover(%s, %s). Error: %s',
            action, vm_uuid, slave_vm_uuid, msg)
        errmsg = _update_task_result_failure(result, msg)
        raise TaskException(result, errmsg)

    # New master VM was born
    # Delete tasks for old master
    if force:
        tasks = list(vm.tasks.keys())
        try:
            tasks.remove(task_id)
        except ValueError:
            pass
        _delete_tasks(vm, tasks)

    # Create internal shutdown task of old master VM
    old_vm_status = result['meta']['apiview']['orig_status']
    _vm_shutdown(vm)

    # Save new master, degrade old master
    slave_vm.master_vm.revert_notready(save=False)
    new_vm = slave_vm.fail_over()

    # Re-check status of old master (current degraded slave) because it was shut down,
    # but the state wasn't save (it was notready back then)
    vm_status_one(task_id, vm)

    # Continue with prompting of new master and degradation of old
    SlaveVm.switch_vm_snapshots_node_storages(new_vm,
                                              nss=vm.get_node_storages())
    # Force update of zabbix
    vm_json_active_changed.send(task_id,
                                vm=new_vm,
                                old_json_active={},
                                force_update=True)  # Signal!

    if new_vm.node != vm.node:
        vm_node_changed.send(task_id, vm=new_vm, force_update=True)  # Signal!

    msg = 'Server replica was successfully promoted to master'
    _update_task_result_success(result, slave_vm, action, msg)
    task_log_cb_success(result, task_id, vm=new_vm, **result['meta'])
    request = get_dummy_request(vm.dc, method='PUT', system_user=True)

    # Mark pending backups as "lost" :(  TODO: implement vm_backup_sync
    new_vm.backup_set.filter(status=Backup.PENDING).update(status=Backup.LOST)

    # Sync snapshots on new master VM (mark missing snapshots as "lost")
    for disk_id, _ in enumerate(new_vm.json_active_get_disks(), start=1):
        call_api_view(request,
                      'PUT',
                      vm_snapshot_list,
                      new_vm.hostname,
                      data={'disk_id': disk_id},
                      log_response=True)

    if old_vm_status == Vm.RUNNING:
        # Start new master VM
        call_api_view(request,
                      'PUT',
                      vm_status,
                      new_vm.hostname,
                      action='start',
                      log_response=True)

    return result
Exemplo n.º 3
0
def vm_status_cb(result, task_id, vm_uuid=None):
    """
    A callback function for PUT api.vm.status.views.vm_status.
    Always updates the VM's status in DB.
    """
    vm = Vm.objects.get(uuid=vm_uuid)
    msg = result.get('message', '')
    json = result.pop('json', None)

    if result['returncode'] == 0 and msg and msg.find('Successfully') == 0:
        # json was updated
        if result['meta']['apiview']['update'] and msg.find(
                'Successfully updated') == 0:
            try:  # save json from smartos
                json_active = vm.json.load(json)
                vm_delete_snapshots_of_removed_disks(
                    vm)  # Do this before updating json and json_active
                vm.json_active = json_active
                vm.json = json_active
            except Exception as e:
                logger.exception(e)
                logger.error(
                    'Could not parse json output from vm_status(%s). Error: %s',
                    vm_uuid, e)
            else:
                vm.save(update_node_resources=True,
                        update_storage_resources=True,
                        update_fields=('enc_json', 'enc_json_active',
                                       'changed'))
                vm_update_ipaddress_usage(vm)
                vm_json_active_changed.send(task_id, vm=vm)  # Signal!

        change_time = _get_task_time(result, 'exec_time')

        if msg.find('Successfully started') >= 0:
            new_status = Vm.RUNNING
        elif msg.find('Successfully completed stop') >= 0:
            if result['meta']['apiview']['freeze']:
                new_status = Vm.FROZEN
                change_time = _get_task_time(
                    result, 'finish_time')  # Force status save
            else:
                new_status = Vm.STOPPED
        elif msg.find('Successfully completed reboot') >= 0:
            new_status = Vm.RUNNING
        else:
            logger.error(
                'Did not find successful status change in result from vm_status(%s). Error: %s',
                vm_uuid, msg)
            raise TaskException(result, 'Unknown status (%s)' % msg)

    else:
        logger.error(
            'Found nonzero returncode in result from vm_status(%s). Error: %s',
            vm_uuid, msg)

        if is_vm_missing(vm, msg):
            logger.critical('VM %s has vanished from compute node!', vm_uuid)

            if vm.status == Vm.STOPPING:
                _save_vm_status(task_id,
                                vm,
                                Vm.STOPPED,
                                change_time=_get_task_time(
                                    result, 'finish_time'))
        else:
            _vm_status_cb_failed(result, task_id, vm)

        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    _save_vm_status(task_id, vm, new_status, change_time=change_time)
    task_log_cb_success(result, task_id, vm=vm, **result['meta'])

    return result
Exemplo n.º 4
0
def vm_update_cb(result, task_id, vm_uuid=None, new_node_uuid=None):
    """
    A callback function for api.vm.base.views.vm_manage.
    """
    vm = Vm.objects.select_related('dc').get(uuid=vm_uuid)
    _vm_update_cb_done(result, task_id, vm)
    msg = result.get('message', '')
    force = result['meta']['apiview']['force']

    if result['returncode'] == 0 and (force or
                                      msg.find('Successfully updated') >= 0):
        json = result.pop('json', None)

        try:  # save json from smartos
            json_active = vm.json.load(json)
        except Exception as e:
            logger.exception(e)
            logger.error(
                'Could not parse json output from PUT vm_manage(%s). Error: %s',
                vm_uuid, e)
            raise TaskException(result, 'Could not parse json output')

        vm_delete_snapshots_of_removed_disks(
            vm)  # Do this before updating json and json_active
        vm.json = json_active
        update_fields = ['enc_json', 'enc_json_active', 'changed']
        ignored_changed_vm_attrs = (
            'set_customer_metadata',
            'remove_customer_metadata',
            'create_timestamp',
            'boot_timestamp',
            'autoboot',
            'vnc_port',
            'update_disks',
        )

        if new_node_uuid:
            update_dict = vm.json_update()

            for i in ignored_changed_vm_attrs:
                update_dict.pop(i, None)

            if update_dict:
                raise TaskException(
                    result,
                    'VM definition on compute node differs from definition in DB in '
                    'following attributes: %s' % ','.join(update_dict.keys()))
            update_fields.append('node_id')

        old_json_active = vm.json_active
        vm.json_active = json_active

        if new_node_uuid:
            node = Node.objects.get(uuid=new_node_uuid)
            vm.set_node(node)

        with transaction.atomic():
            vm.save(update_node_resources=True,
                    update_storage_resources=True,
                    update_fields=update_fields)
            vm_update_ipaddress_usage(vm)
            vm_json_active_changed.send(
                task_id, vm=vm, old_json_active=old_json_active)  # Signal!

        if new_node_uuid:
            vm_node_changed.send(task_id, vm=vm, force_update=True)  # Signal!
            result[
                'message'] = 'Node association successfully changed on VM %s' % vm.hostname
            if vm.json_changed():
                vm_update(vm)

    else:
        logger.error(
            'Found nonzero returncode in result from PUT vm_manage(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    task_log_cb_success(result, task_id, vm=vm, **result['meta'])
    return result