Esempio n. 1
0
    def put(self):
        request, vm = self.request, self.vm

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.RUNNING, vm.NOTCREATED):
            raise VmIsNotOperational(
                'VM is not stopped, running or notcreated')

        if vm.json_changed():
            raise PreconditionRequired(
                'VM definition has changed; Update first')

        ser = VmDcSerializer(request, vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        old_dc = vm.dc
        dc = ser.dc
        # Change DC for one VM, repeat this for other VM + Recalculate node & storage resources in target and source
        vm.dc = dc
        vm.save(update_node_resources=True, update_storage_resources=True)
        # Change task log entries DC for target VM
        TaskLogEntry.objects.filter(object_pk=vm.uuid).update(dc=dc)
        # Change related VM backup's DC
        Backup.objects.filter(vm=vm).update(dc=dc)

        for ns in ser.nss:  # Issue #chili-885
            for i in (dc, old_dc):
                Backup.update_resources(ns, vm, i)
                Snapshot.update_resources(ns, vm, i)

        detail = 'Successfully migrated VM %s from datacenter %s to datacenter %s' % (
            vm.hostname, old_dc.name, dc.name)
        # Will create task log entry in old DC
        res = SuccessTaskResponse(request,
                                  detail,
                                  vm=vm,
                                  msg=LOG_MIGRATE_DC,
                                  detail=detail)
        # Create task log entry in new DC too
        task_log_success(task_id_from_task_id(res.data.get('task_id'),
                                              dc_id=dc.id),
                         LOG_MIGRATE_DC,
                         obj=vm,
                         detail=detail,
                         update_user_tasks=False)

        return res
Esempio n. 2
0
def vm_snapshot_sync_cb(result, task_id, vm_uuid=None, disk_id=None):
    """
    A callback function for PUT api.vm.snapshot.views.vm_snapshot_list a.k.a. vm_snapshot_sync.
    """
    vm = Vm.objects.select_related('dc').get(uuid=vm_uuid)
    data = result.pop('data', '')

    if result['returncode'] != 0:
        msg = result.get('message', '') or data
        logger.error(
            'Found nonzero returncode in result from PUT vm_snapshot_list(%s). Error: %s',
            vm_uuid, msg)
        raise TaskException(
            result, 'Got bad return code (%s). Error: %s' %
            (result['returncode'], msg))

    node_snaps = parse_node_snaps(data)
    logger.info('Found %d snapshots for VM %s on disk ID %s', len(node_snaps),
                vm, disk_id)
    lost = sync_snapshots(
        vm.snapshot_set.select_related('vm').filter(disk_id=disk_id).all(),
        node_snaps)

    # Remaining snapshots on compute node are internal or old lost snapshots which do not exist in DB
    # remaining es- and as- snapshots must be created in DB; some is- and rs- could be probably removed, but
    # these are hard to determine, so we are ignoring them
    snap_prefix = Snapshot.USER_PREFIX
    new_snaps = {
        snap: node_snaps.pop(snap)
        for snap in tuple(node_snaps.keys()) if snap.startswith(snap_prefix)
    }

    ns = vm.get_node_storage(disk_id)

    if new_snaps:
        logger.warn(
            'VM %s has following snapshots on disk ID %s, which are not defined in DB: %s',
            vm, disk_id, new_snaps.keys())

        for zfs_name, info in new_snaps.items():
            try:
                name = info[2]
                if not name:
                    raise IndexError
            except IndexError:
                name = info[0]

            try:
                Snapshot.create_from_zfs_name(zfs_name,
                                              name=name,
                                              timestamp=int(info[0]),
                                              vm=vm,
                                              disk_id=disk_id,
                                              zpool=ns,
                                              size=t_long(info[1]),
                                              note='Found by snapshot sync')
            except Exception as exc:
                logger.error(
                    'Could not recreate snapshot %s (vm=%s, disk_id=%s). Error: %s',
                    zfs_name, vm, disk_id, exc)
            else:
                logger.warn('Recreated snapshot %s (vm=%s, disk_id=%s)',
                            zfs_name, vm, disk_id)

    logger.info(
        'VM %s has following internal/service snapshots on disk ID %s: %s', vm,
        disk_id, node_snaps.keys())
    # Update node storage snapshot size counters
    Snapshot.update_resources(ns, vm)

    try:
        # Update last flag on dataset backups
        bkp_ids = [snap[3:] for snap in node_snaps if snap.startswith('is-')]
        if bkp_ids:
            vm.backup_set.filter(disk_id=disk_id,
                                 id__in=bkp_ids).update(last=True)
            vm.backup_set.filter(
                disk_id=disk_id,
                last=True).exclude(id__in=bkp_ids).update(last=False)
        else:
            vm.backup_set.filter(disk_id=disk_id, last=True).update(last=False)
    except Exception as exc:
        logger.exception(exc)

    msg = 'Snapshots successfully synced'
    if lost:
        msg += '; WARNING: %d snapshot(s) lost' % lost
    if new_snaps:
        msg += '; WARNING: %d snapshot(s) found' % len(new_snaps)

    result['message'] = msg
    task_log_cb_success(result, task_id, vm=vm, **result['meta'])
    return result