Пример #1
0
    def init_classes(self):
        # Init CloudStackOps class
        self.cosmic = cloudstackops.CloudStackOps(self.DEBUG, self.DRYRUN)
        self.cosmic.task = "VMware -> KVM migration"
        self.cosmic.slack_custom_title = "Migration details"

        # Init VMware class
        v = vmware.vmware('root', self.threads)
        v.DEBUG = self.DEBUG
        v.DRYRUN = self.DRYRUN
        self.cosmic.vmware = v

        # Init KVM class
        k = kvm.Kvm(ssh_user=getpass.getuser(),
                    threads=self.threads,
                    helper_scripts_path=self.helperScriptsPath)
        k.DEBUG = self.DEBUG
        k.DRYRUN = self.DRYRUN
        self.cosmic.kvm = k

        # Init SQL class
        self.sql = cloudstacksql.CloudStackSQL(self.DEBUG, self.DRYRUN)

        self.connect_mysql()

        # make credentials file known to our class
        self.cosmic.configProfileName = self.configProfileName

        # Init the Cosmic API
        self.cosmic.initCloudStackAPI()

        if self.DEBUG == 1:
            print "API address: " + self.cosmic.apiurl
            print "ApiKey: " + self.cosmic.apikey
            print "SecretKey: " + self.cosmic.secretkey

        # Check cloudstack IDs
        if self.DEBUG == 1:
            print "Debug: Checking CloudStack IDs of provided input.."

        self.cosmic.slack_custom_title = "Migration details for vmx %s" % self.vmxPath
Пример #2
0
            DEBUG = 1
        elif opt in ("--exec"):
            DRYRUN = 0

    # We need at least these vars
    if len(mysqlHost) == 0:
        print help
        sys.exit()


# Parse arguments
if __name__ == "__main__":
    handleArguments(sys.argv[1:])

# Init our class
s = cloudstacksql.CloudStackSQL(DEBUG, DRYRUN)

if DEBUG == 1:
    print "Warning: Debug mode is enabled!"

if DRYRUN == 1:
    print "Warning: dry-run mode is enabled, not running any commands!"

# Connect MySQL
result = s.connectMySQL(mysqlHost, mysqlPasswd)
if result > 0:
    print "Error: MySQL connection failed"
    sys.exit(1)
elif DEBUG == 1:
    print "DEBUG: MySQL connection successful"
    print s.conn
def liveMigrateVirtualMachine(c=None,
                              DEBUG=0,
                              DRYRUN=1,
                              vmname='',
                              toCluster='',
                              configProfileName='',
                              isProjectVm=0,
                              force=0,
                              zwps2cwps=False,
                              destination_dc_name='',
                              affinityGroupToAdd='',
                              multirun=False):
    # Start time
    print("Note: Starting @ %s" % time.strftime("%Y-%m-%d %H:%M"))
    start_time = datetime.now()

    # Check cloudstack IDs
    if DEBUG == 1:
        print("Note: Checking CloudStack IDs of provided input..")

    if isProjectVm == 1:
        projectParam = "true"
    else:
        projectParam = "false"

    to_slack = True
    if DRYRUN == 1:
        to_slack = False

    vmID = c.checkCloudStackName({
        'csname': vmname,
        'csApiCall': 'listVirtualMachines',
        'listAll': 'true',
        'isProjectVm': projectParam
    })

    toClusterID = c.checkCloudStackName({
        'csname': toCluster,
        'csApiCall': 'listClusters'
    })

    if toClusterID == 1 or toClusterID is None:
        print("Error: Cluster with name '" + toCluster +
              "' can not be found! Halting!")
        if multirun:
            return True
        sys.exit(1)

    # Get data from vm
    vmdata = c.getVirtualmachineData(vmID)
    if vmdata is None:
        print("Error: Could not find vm " + vmname + "!")
        if multirun:
            return True
        sys.exit(1)
    vm = vmdata[0]
    c.instance_name = vm.instancename
    c.slack_custom_value = vm.domain
    c.vm_name = vm.name
    c.zone_name = vm.zonename

    snapshotData = c.listVMSnapshot(vm.id)
    snapshot_found = False
    if snapshotData == 1:
        print("Error: Could not list VM snapshots")
    elif snapshotData is None:
        print("Note: No VM snapshots found for this vm.")
    else:
        for snapshot in snapshotData:
            print(
                "Note: Found VM snapshot %s, unable to live migrate. Please remove VM snapshots first. "
                % snapshot.displayname)
            snapshot_found = True

    if snapshot_found:
        message = "VM %s has VM snapshots, unable to live migrate. Please remove VM snapshots!" % vmname
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    if vm.state != "Running":
        message = "VM %s is in state %s, can only live migrate when in state Running. Skipping this vm!" % (
            vmname, vm.state)
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    hostData = c.getHostData({'hostid': vm.hostid})[0]
    clusterData = c.listClusters({'clusterid': hostData.clusterid})
    c.cluster = clusterData[0].name

    if hostData.clusterid == toClusterID:
        message = "VM %s is already running on cluster %s. Skipping this vm!" % (
            vmname, toCluster)
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    # Init SQL class
    s = cloudstacksql.CloudStackSQL(DEBUG, DRYRUN)

    # Connect MySQL
    result = s.connectMySQL(configProfileName)
    if result > 0:
        message = "MySQL connection failed"
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)
    elif DEBUG == 1:
        print("DEBUG: MySQL connection successful")
        print(s.conn)

    # Init KVM class
    k = kvm.Kvm(ssh_user=getpass.getuser())
    k.DRYRUN = DRYRUN
    k.PREPARE = False
    c.kvm = k

    # Libvirt disk info
    try:
        libvirt_disk_info = c.kvm.libvirt_get_disks(
            vmname=vm.instancename, hypervisor_fqdn=vm.hostname)

        for path, disk_info in libvirt_disk_info.iteritems():
            print("Note: Disk %s on pool %s has size %s" %
                  (disk_info['path'], disk_info['pool'], disk_info['size']))

            name, path, uuid, voltype, size = s.get_volume_size(
                path=disk_info['path'])

            if int(size) < int(disk_info['size']):
                print(
                    "Warning: looks like size in DB (%s) is less than libvirt reports (%s)"
                    % (size, disk_info['size']))
                print("Note: Setting size of disk %s to %s" %
                      (path, int(disk_info['size'])))
                s.update_volume_size(instance_name=vm.instancename,
                                     path=path,
                                     size=disk_info['size'])
            else:
                print(
                    "OK: looks like size in DB (%s) is >= libvirt reports (%s)"
                    % (size, disk_info['size']))
    except Exception as e:
        message = "Error: Unable to read disk sizes from hypervisor. Is VM running?: %s" % str(
            e)
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    # Do ZWPS to CWPS conversion before finding migration hosts or else it will return none
    if zwps2cwps:
        message = "Switching any ZWPS volume of vm %s to CWPS so they will move along with the VM" % vm.name
        c.print_message(message=message,
                        message_type="Note",
                        to_slack=to_slack)

        if not s.update_zwps_to_cwps(instance_name=vm.instancename,
                                     disk_offering_name="MCC_v1.CWPS"):
            message = "Switching disk offerings to CWPS failed. Halting"
            c.print_message(message=message,
                            message_type="Error",
                            to_slack=to_slack)
            if multirun:
                return True
            sys.exit(1)

    # Do DC offering migrate before finding migration hosts or else it will return none
    if len(destination_dc_name) > 0:
        datacenters = ["SBP1", "EQXAMS2", "EVO"]
        current_offering_name = vm.serviceofferingname

        if destination_dc_name not in datacenters:
            print("Unknown DC %s. Should be one of: %s" %
                  (destination_dc_name, str(datacenters)))
            sys.exit(1)

        for dc_name in datacenters:
            if dc_name == destination_dc_name:
                continue
            if dc_name in current_offering_name:
                print("Note: replacing %s DC with %s" %
                      (dc_name, destination_dc_name))
                print("Note: current offering: %s" % current_offering_name)
                new_offering_name = current_offering_name.replace(
                    dc_name, destination_dc_name)
                print("Note: new offering: %s" % new_offering_name)
                s.update_service_offering_of_vm(
                    instance_name=vm.instancename,
                    service_offering_name=new_offering_name)
                break

    # Quick scan
    zwps_found = False
    zwps_name = None
    root_disk = None
    hwps_name = None
    cwps_found = False
    hwps_found = False
    voldata = c.getVirtualmachineVolumes(vm.id, projectParam)
    for vol in voldata:
        if vol.type == 'DATADISK':
            storagePoolData = c.getStoragePoolData(
                storagepoolID=vol.storageid)[0]
            if storagePoolData.scope == 'CLUSTER':
                cwps_found = True
            if storagePoolData.scope == 'ZONE':
                zwps_found = True
                zwps_name = vol.storage
            if storagePoolData.scope == 'HOST':
                hwps_found = True
                hwps_name = vol.storage
        elif vol.type == 'ROOT':
            root_disk = vol

    if hwps_found:
        message = "This VM has HWPS data disks attached. That is not currently handled by this script."
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    if cwps_found and zwps_found:
        message = "This VM has both ZWPS and CWPS data disks attached. That is not currently handled by this script."
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    # Make sure we have no ZWPS and CWPS combi or else migrating will fail

    # Root disk is also CWPS.
    if zwps_found:
        print("Note: ZWPS data disk attached!")
        print(
            "Note: For migration to work we need to migrate root disk %s to ZWPS pool %s first,"
            % (root_disk.name, zwps_name))
        print(
            "Note: then migrate the VM and its disks and finally move ROOT disk to a CWPS pool at %s"
            % toCluster)

        # Check if volume is already on correct storage
        if root_disk.storage == zwps_name:
            print(
                "Warning: No need to migrate volume %s -- already on the desired storage pool. Skipping."
                % root_disk.name)
        else:
            target_storage_pool_data = c.getStoragePoolByName(
                poolName=zwps_name)

            # Check if file exists
            volume_path = "/mnt/%s/%s" % (target_storage_pool_data[0].id,
                                          root_disk.path)
            destination_file_exists, destination_file_details = k.does_file_exist(
                kvmhost=hostData, volume_path=volume_path)
            if destination_file_exists:
                last_changed = "%s %s %s" % (destination_file_details[-4],
                                             destination_file_details[-3],
                                             destination_file_details[-2])
                message = 'Cannot migrate: Disk %s already exists at destination pool %s. Last changed: %s' % (
                    root_disk.name, volume_path, last_changed)
                c.print_message(message=message,
                                message_type="Error",
                                to_slack=False)
                if DRYRUN == 1:
                    sys.exit(1)
                else:
                    message = 'Moving away existing disk %s at destination pool %s.' % (
                        root_disk.name, volume_path)
                    c.print_message(message=message,
                                    message_type="Warning",
                                    to_slack=False)
                    move_result = k.rename_existing_destination_file(
                        kvmhost=hostData, volume_path=volume_path)
                    if move_result:
                        message = 'Successfully moved away existing disk'
                        c.print_message(message=message,
                                        message_type="Note",
                                        to_slack=False)
                    else:
                        message = 'Cannot move away existing disk %s at destination pool %s.' % (
                            root_disk.name, volume_path)
                        c.print_message(message=message,
                                        message_type="Error",
                                        to_slack=False)
                        sys.exit(1)

            else:
                message = 'Disk %s does not yet exist at destination pool %s' % (
                    root_disk.name, volume_path)
                c.print_message(message=message,
                                message_type="Note",
                                to_slack=False)

            if DRYRUN == 1:
                message = "Would have migrated ROOT disk %s of VM %s to ZWPS pool %s" % \
                          (root_disk.name, vm.instancename, zwps_name)
                c.print_message(message=message,
                                message_type="Note",
                                to_slack=to_slack)
                if multirun:
                    return True
                sys.exit(1)

            message = "Migrating ROOT disk of %s VM %s to ZWPS pool %s" % (
                root_disk.name, vm.instancename, zwps_name)
            c.print_message(message=message,
                            message_type="Note",
                            to_slack=to_slack)

            result = c.migrateVolume(volid=root_disk.id,
                                     storageid=target_storage_pool_data[0].id,
                                     live=True)
            if result == 1:
                message = "Migrate volume %s (%s) failed -- exiting." % (
                    root_disk.name, root_disk.id)
                c.print_message(message=message,
                                message_type="Error",
                                to_slack=to_slack)
                if multirun:
                    return True
                sys.exit(1)

    # VM now runs with root disk at ZWPS

    # Detach any isos
    if vm.isoid is not None:
        print("Note: Detaching any connected ISO from vm %s" % vm.name)
        c.detach_iso(vm.id)
    else:
        print("Note: No ISOs connected to detach")

    # Get hosts that belong to toCluster
    toClusterHostsData = c.getHostsFromCluster(toClusterID)
    migrationHost = c.findBestMigrationHost(toClusterID, vm.hostname,
                                            vm.memory)
    currentHostname = vm.hostname

    if not migrationHost:
        message = "No hosts with enough capacity to migrate %s to. Please migrate manually to another cluster." % vm.name
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    # Do we need liveMigrate or liveMigrateWithVolume
    migrate_with_volume_required = True
    voldata = c.getVirtualmachineVolumes(vm.id, projectParam)
    for vol in voldata:
        # ROOT disk has no offering info so we cannot detect ZWPS
        if 'POD012' in vol.storage or 'POD022' in vol.storage:
            # Migrate ZWPS without disks
            migrate_with_volume_required = False

    migration_method = 'live'
    if migrate_with_volume_required:
        migration_method = 'liveWithVolume'

    if DRYRUN == 1:
        message = "Would have migrated (%s) %s to %s on cluster %s"\
                  % (migration_method, vm.name, migrationHost.name, toCluster)
        c.print_message(message=message, message_type="Note", to_slack=False)
        if multirun:
            return True
        sys.exit(1)

    message = "Starting migration (%s) of %s to %s on cluster %s"\
              % (migration_method, vm.name, migrationHost.name, toCluster)
    c.print_message(message=message, message_type="Note", to_slack=to_slack)

    if migrate_with_volume_required:
        result = c.migrateVirtualMachineWithVolume(vm.id, migrationHost.id)
    else:
        result = c.migrateVirtualMachine(vm.id, migrationHost.id)

    if not result:
        message = "Migrate (%s) vm %s failed -- exiting." % (migration_method,
                                                             vm.name)
        c.print_message(message=message,
                        message_type="Error",
                        to_slack=to_slack)
        if multirun:
            return True
        sys.exit(1)

    # Hack -- Is this still needed?
    while True:
        vmdata = c.getVirtualmachineData(vmID)
        if vmdata is None:
            print("Error: Could not find vm " + vmname + "!")
            if multirun:
                return True
            sys.exit(1)
        vm = vmdata[0]

        if vm.state == "Running":
            break
        time.sleep(60)
        print("Vm %s is in %s state and not Running. Sleeping." %
              (vm.name, vm.state))

    if zwps_found:
        message = "Note: Making sure root disk %s is on CWPS" % root_disk.name
        c.print_message(message=message,
                        message_type="Note",
                        to_slack=to_slack)
        # Select storage pool
        target_storage = c.getStoragePoolWithMostFreeSpace(toClusterID)
        if target_storage == 1 or target_storage is None:
            print("Error: Storage Pool with id '" + toClusterID +
                  "' can not be found! Halting!")
            sys.exit(1)

        # Migrate
        result = c.migrateVolume(volid=root_disk.id,
                                 storageid=target_storage.id,
                                 live=True)
        if result == 1:
            message = "Migrate volume %s (%s) failed -- retrying." % (
                root_disk.name, root_disk.id)
            c.print_message(message=message,
                            message_type="Warning",
                            to_slack=to_slack)

            target_storage_name = target_storage.name
            retry_target_storage_name = None
            if 'CS01' in target_storage_name:
                retry_target_storage_name = target_storage_name.replace(
                    'CS01', 'CS02')
            if 'CS02' in target_storage_name:
                retry_target_storage_name = target_storage_name.replace(
                    'CS02', 'CS01')
            if retry_target_storage_name is not None:
                target_storage = c.getStoragePoolByName(
                    poolName=retry_target_storage_name)
                if target_storage == 1 or target_storage is None:
                    print("Error: Storage Pool with name '" +
                          retry_target_storage_name +
                          "' can not be found! Halting!")
                    if multirun:
                        return True
                    sys.exit(1)
                # Migrate
                result = c.migrateVolume(volid=root_disk.id,
                                         storageid=target_storage.id,
                                         live=True)
                if result == 1:
                    message = "Migrate volume %s (%s) failed again -- halting." % (
                        root_disk.name, root_disk.id)
                    c.print_message(message=message,
                                    message_type="Error",
                                    to_slack=to_slack)
                    if multirun:
                        return True
                    sys.exit(1)
        message = "Note: Root disk %s migrated to pool %s" % (
            root_disk.name, target_storage.name)
        c.print_message(message=message,
                        message_type="Note",
                        to_slack=to_slack)

    result = True
    if currentHostname == vm.hostname:
        result = False

    # End time
    message = "Finished @ " + time.strftime("%Y-%m-%d %H:%M")
    c.print_message(message=message, message_type="Note", to_slack=False)
    elapsed_time = datetime.now() - start_time

    if result:
        if len(affinityGroupToAdd) > 0:
            message = "Adding affinity group %s to VM %s" % (
                affinityGroupToAdd, vm.name)
            s.add_vm_to_affinity_group(affinity_group_name=affinityGroupToAdd,
                                       instance_name=vm.instancename)

        message = "VM %s is successfully migrated to %s on cluster %s in %s seconds" % (
            vm.name, migrationHost.name, toCluster,
            elapsed_time.total_seconds())
        c.print_message(message=message,
                        message_type="Note",
                        to_slack=to_slack)
    else:
        message = "VM %s is failed to migrate to %s on cluster %s in %s seconds" % (
            vm.name, migrationHost.name, toCluster,
            elapsed_time.total_seconds())
        c.print_message(message=message,
                        message_type="Warning",
                        to_slack=to_slack)
        print(
            "Hint: Due to a known cleanup issue this might be because the disk already exists at the destination."
        )