def main(profile, dry_run, ignore_volumes, zwps_to_cwps, skip_disk_offerings,
         only_project, source_cluster_name, destination_cluster_name):
    """Migrate offline volumes from SOURCE_CLUSTER to DESTINATION_CLUSTER"""

    click_log.basic_config()

    if source_cluster_name == destination_cluster_name:
        logging.error('Destination cluster cannot be the source cluster!')
        sys.exit(1)

    if dry_run:
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run)
    cs = CosmicSQL(server=profile, dry_run=dry_run)

    source_cluster = co.get_cluster(name=source_cluster_name)
    source_storage_pools = co.get_all_storage_pools(name=source_cluster_name)
    if not source_cluster and not source_storage_pools:
        logging.error(f"Source cluster not found:'{source_cluster_name}'!")
        sys.exit(1)

    destination_cluster = co.get_cluster(name=destination_cluster_name)
    if not destination_cluster:
        logging.error(
            f"Destination cluster not found:'{destination_cluster_name}'!")
        sys.exit(1)

    if source_cluster:
        try:
            source_storage_pools = source_cluster.get_storage_pools(
                scope='CLUSTER')
        except IndexError:
            logging.error(
                f"No storage pools  found for cluster '{source_cluster['name']}'"
            )
            sys.exit(1)

    logging.info('Source storage pools found:')
    for source_storage_pool in source_storage_pools:
        logging.info(f" - '{source_storage_pool['name']}'")

    try:
        destination_storage_pools = destination_cluster.get_storage_pools(
            scope='CLUSTER')
    except IndexError:
        logging.error(
            f"No storage pools  found for cluster '{destination_cluster['name']}'"
        )
        sys.exit(1)
    logging.info('Destination storage pools found:')
    for destination_storage_pool in destination_storage_pools:
        logging.info(f" - '{destination_storage_pool['name']}'")

    if ignore_volumes:
        ignore_volumes = ignore_volumes.replace(' ', '').split(',')
        logging.info(f"Ignoring volumes: {str(ignore_volumes)}")

    if skip_disk_offerings:
        skip_disk_offerings = skip_disk_offerings.replace(' ', '').split(',')
        logging.info(f"Skipping disk offerings: {str(skip_disk_offerings)}")

    for source_storage_pool in source_storage_pools:
        destination_storage_pool = choice(destination_storage_pools)
        volumes = source_storage_pool.get_volumes(only_project)

        for volume in volumes:
            if volume['id'] in ignore_volumes:
                continue

            if skip_disk_offerings and volume.get(
                    'diskofferingname') in skip_disk_offerings:
                logging.warning(
                    f"Volume '{volume['name']}' has offering '{volume['diskofferingname']}', skipping..."
                )
                continue

            if 'storage' not in volume:
                logging.warning(
                    f"No storage attribute found for volume '{volume['name']}' ({volume['id']}), skipping..."
                )
                continue

            if volume['storage'] == destination_storage_pool['name']:
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) already on cluster '{destination_cluster['name']}', skipping..."
                )
                continue

            if volume['state'] != 'Ready':
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) is in state '{volume['state']}', skipping..."
                )
                continue

            if 'vmstate' in volume and volume['vmstate'] != 'Stopped':
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) is attached to {volume['vmstate']} VM '{volume['vmname']}', skipping..."
                )
                continue

            if zwps_to_cwps:
                if not dry_run:
                    logging.info(
                        f"Converting ZWPS volume '{volume['name']}' to CWPS before starting the migration"
                    )
                    if not cs.update_zwps_to_cwps('MCC_v1.CWPS',
                                                  volume_id=volume['id']):
                        logging.error(
                            f"Failed to apply CWPS disk offering to volume '{volume['name']}'"
                        )
                        return False
                else:
                    logging.info(
                        f"Would have changed the diskoffering for volume '{volume['name']}' to CWPS before starting the migration"
                    )

            if source_cluster:
                logging.info(
                    f"Volume '{volume['name']}' will be migrated from cluster '{source_cluster['name']}' to '{destination_cluster['name']}'"
                )
            else:
                logging.info(
                    f"Volume '{volume['name']}' will be migrated from storage pool '{source_storage_pool['name']}' to '{destination_cluster['name']}'"
                )

            if not volume.migrate(destination_storage_pool):
                continue
Ejemplo n.º 2
0
def main(dry_run, zwps_cluster, destination_cluster, virtual_machines,
         force_end_hour):
    """Empty ZWPS by migrating VMs and/or it's volumes to the destination cluster."""

    click_log.basic_config()

    if force_end_hour:
        try:
            force_end_hour = int(force_end_hour)
        except ValueError as e:
            logging.error(
                f"Specified time:'{force_end_hour}' is not a valid integer due to: '{e}'"
            )
            sys.exit(1)
        if force_end_hour >= 24:
            logging.error(f"Specified time:'{force_end_hour}' should be < 24")
            sys.exit(1)

    profile = 'nl2'

    log_to_slack = True
    logging.task = 'Live Migrate VM Volumes'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    if not dry_run:
        cs = CosmicSQL(server=profile, dry_run=dry_run)
    else:
        cs = None

    zwps_storage_pools = []
    for storage_pool in co.get_all_storage_pools():
        if zwps_cluster.upper() in storage_pool['name']:
            zwps_storage_pools.append(storage_pool)

    logging.info('ZWPS storage pools found:')
    for zwps_storage_pool in zwps_storage_pools:
        logging.info(f" - '{zwps_storage_pool['name']}'")

    target_cluster = co.get_cluster(name=destination_cluster)
    if not target_cluster:
        logging.error(
            f"Destination cluster not found:'{target_cluster['name']}'!")
        sys.exit(1)

    try:
        destination_storage_pools = target_cluster.get_storage_pools(
            scope='CLUSTER')
    except IndexError:
        logging.error(
            f"No storage pools  found for cluster '{target_cluster['name']}'")
        sys.exit(1)
    logging.info('Destination storage pools found:')
    for target_storage_pool in destination_storage_pools:
        logging.info(f" - '{target_storage_pool['name']}'")

    target_storage_pool = random.choice(destination_storage_pools)

    volumes = []
    for zwps_storage_pool in zwps_storage_pools:
        vols = co.get_all_volumes(list_all=True,
                                  storageid=zwps_storage_pool['id'])
        if vols:
            volumes += vols

    vm_ids = []
    logging.info('Volumes found:')
    for volume in volumes:
        for virtual_machine in virtual_machines:
            if re.search(virtual_machine, volume['vmname'], re.IGNORECASE):
                logging.info(
                    f" - '{volume['name']}' on VM '{volume['vmname']}'")
                if volume['virtualmachineid'] not in vm_ids:
                    vm_ids.append(volume['virtualmachineid'])

    vms = []
    for vm_id in vm_ids:
        vm = co.get_vm(id=vm_id)
        if vm['affinitygroup']:
            for affinitygroup in vm['affinitygroup']:
                if 'DedicatedGrp' in affinitygroup['name']:
                    logging.warning(
                        f"Skipping VM '{vm['name']}' because of 'DedicatedGrp' affinity group"
                    )
                    continue
        vms.append(vm)

    logging.info('Virtualmachines found:')
    for vm in vms:
        logging.info(f" - '{vm['name']}'")

    logging.info(
        f"Starting live migration of volumes and/or virtualmachines from the ZWPS storage pools to storage pool '{target_cluster['name']}'"
    )

    for vm in vms:
        """ Can we start a new migration? """
        if force_end_hour:
            now = datetime.datetime.now(pytz.timezone('CET'))
            if now.hour >= force_end_hour:
                logging.info(
                    f"Stopping migration batch. We are not starting new migrations after '{force_end_hour}':00",
                    log_to_slack=log_to_slack)
                sys.exit(0)

        source_host = co.get_host(id=vm['hostid'])
        source_cluster = co.get_cluster(zone='nl2',
                                        id=source_host['clusterid'])
        if source_cluster['name'] == target_cluster['name']:
            """ VM is already on the destination cluster, so we only need to migrate the volumes to this storage pool """
            logging.info(
                f"Starting live migration of volumes of VM '{vm['name']}' to storage pool '{target_storage_pool['name']}' ({target_storage_pool['id']})",
                log_to_slack=log_to_slack)
            live_migrate_volumes(target_storage_pool['name'], co, cs, dry_run,
                                 False, log_to_slack, 0, vm['name'], True)
        else:
            """ VM needs to be migrated live to the destination cluster, including volumes """
            live_migrate(co=co,
                         cs=cs,
                         cluster=target_cluster['name'],
                         vm_name=vm['name'],
                         destination_dc=None,
                         add_affinity_group=None,
                         is_project_vm=None,
                         zwps_to_cwps=True,
                         log_to_slack=log_to_slack,
                         dry_run=dry_run)