def main(profile, dry_run, skip_version, older_then, skip_zone, only_zone, restart_agent):
    """Destroy SVM per zone and waits for a new one"""

    click_log.basic_config()

    if dry_run:
        logging.info('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run)
    if older_then:
        older_then = datetime.strptime(f"{older_then}T00:00:00+0200", '%Y%m%dT%H:%M:%S%z')

    svms = co.get_all_systemvms()
    zones = defaultdict(list)
    for svm in svms:
        if only_zone and co.get_host(name=svm['name']).get('zonename') != only_zone:
            continue
        if skip_zone and co.get_host(name=svm['name']).get('zonename') == skip_zone:
            continue
        if skip_version and co.get_host(name=svm['name']).get('version') == skip_version:
            continue
        if older_then and datetime.strptime(svm['created'], '%Y-%m-%dT%H:%M:%S%z') > older_then:
            continue
        zones[svm['zonename']].append(svm)

    for zone in zones:
        logging.info(f"Processing zone: {zone}")
        for vm in zones[zone]:
            if not vm.destroy():
                sys.exit(1)

            up = list()
            down = list(zones[zone])
            retries = 60
            zone_id = vm['zoneid']
            while len(up) < len(zones[zone]) or len(down) > 0:
                if not dry_run:
                    time.sleep(5)

                try:
                    systemvms = {x['name']: x for x in co.get_all_systemvms(zoneid=zone_id)}
                    host_status = {k: co.get_host(name=k) for k in systemvms}
                    up = list(filter(lambda x: x and x['state'] == 'Up' and x['resourcestate'] == 'Enabled', host_status.values()))
                    down = list(filter(lambda x: x and x['state'] != 'Up' and x['resourcestate'] == 'Enabled', host_status.values()))
                    retries -= 1
                    if retries == 0:
                        break
                    if down and restart_agent:
                        for d in down:
                            svm_object = list(filter(lambda x: x and x['name'].lower() == d['name'], systemvms.values()))[0]
                            svm_object.restart_agent()
                except KeyError:
                    # Ignore keyerror, systemvm is still not available as host
                    pass

            if retries == 0:
                logging.error("Exceeded retry count waiting for new systemvm")
                sys.exit(1)
Exemplo n.º 2
0
def main(profile, destination_dc, dry_run, host, cluster):
    """Migrate all VMs on HOST to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate HV to new POD'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    host = co.get_host(name=host)
    if not host:
        sys.exit(1)

    for vm in host.get_all_vms() + host.get_all_project_vms():
        live_migrate(co=co,
                     cs=cs,
                     cluster=cluster,
                     vm_name=vm['name'],
                     destination_dc=destination_dc,
                     add_affinity_group=None,
                     is_project_vm=None,
                     zwps_to_cwps=None,
                     log_to_slack=log_to_slack,
                     dry_run=dry_run)
Exemplo n.º 3
0
def main(profile, dry_run, router):
    """Live migrate ROUTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate HV to new POD'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    router = co.get_system_vm(name=router)
    if not router:
        sys.exit(1)

    source_host = co.get_host(id=router['hostid'])
    if not source_host:
        sys.exit(1)

    cluster = co.get_cluster(id=source_host['clusterid'])
    if not cluster:
        sys.exit(1)

    destination_host = cluster.find_migration_host(router)
    if not destination_host:
        sys.exit(1)

    if not router.migrate(destination_host):
        sys.exit(1)
def main(profile, zwps_to_cwps, add_affinity_group, destination_dc, is_project_vm,
         skip_within_cluster, dry_run, vm, cluster):
    """Live migrate VM to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate VM'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    # Work around migration issue: first in the same pod to limit possible hiccup
    vm_instance = co.get_vm(name=vm, is_project_vm=is_project_vm)

    if not vm_instance:
        logging.error(f"Cannot migrate, VM '{vm}' not found!")
        sys.exit(1)

    if not vm_instance['state'] == 'Running':
        logging.error(f"Cannot migrate, VM has has state: '{vm_instance['state']}'")
        sys.exit(1)

    source_host = co.get_host(id=vm_instance['hostid'])
    source_cluster = co.get_cluster(id=source_host['clusterid'])
    if not skip_within_cluster:
        if not vm_instance.migrate_within_cluster(vm=vm_instance, source_cluster=source_cluster,
                                                  source_host=source_host, instancename=vm_instance):
            logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
            sys.exit(1)

    if not live_migrate(co, cs, cluster, vm, destination_dc, add_affinity_group, is_project_vm, zwps_to_cwps,
                        log_to_slack, dry_run):
        logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
        sys.exit(1)
    logging.info(f"VM Migration completed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
Exemplo n.º 5
0
def empty_host(profile, shutdown, skip_disable, dry_run, host):
    co = CosmicOps(profile=profile, dry_run=dry_run)

    host = co.get_host(name=host)
    if not host:
        raise RuntimeError(f"Host '{host['name']}' not found")

    if not skip_disable and host['resourcestate'] != 'Disabled':
        if not host.disable():
            raise RuntimeError(f"Failed to disable host '{host['name']}'")

    (total, success, failed) = host.empty()
    result_message = f"Result: {success} successful, {failed} failed out of {total} total VMs"

    if not failed and shutdown:
        host.set_uid_led(True)
        if not host.reboot(RebootAction.HALT):
            raise RuntimeError(f"Failed to shutdown host '{host['name']}'")
        host.wait_until_offline()
        result_message = f"{result_message}\nHost '{host['name']}' has shutdown, UID led is turned on"
    elif failed and shutdown:
        result_message = f"{result_message}\nNot shutting down host '{host['name']}' because migration completed with failed VMs"

    return result_message
def main(profile, is_project_router, only_when_required, cleanup, dry_run,
         router):
    """Router restart and upgrade script"""

    click_log.basic_config()

    log_to_slack = True

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    router = co.get_router(name=router, is_project_router=is_project_router)
    if not router:
        sys.exit(1)

    logging.instance_name = router['name']
    logging.slack_title = 'Domain'
    logging.slack_value = router['domain']

    host = co.get_host(id=router['hostid'])
    if not host:
        sys.exit(1)

    cluster = co.get_cluster(id=host['clusterid'])
    if not cluster:
        sys.exit(1)

    logging.cluster = cluster['name']

    if only_when_required and not router['requiresupgrade']:
        logging.info(
            f"Router '{router['name']}' does not need to be upgraded. Will not reboot because --only-when-required was specified."
        )
        sys.exit(0)

    if cleanup:
        if not router['vpcid']:
            logging.error(
                f"Cleanup specified but no VPC ID found for router '{router['name']}'"
            )
            sys.exit(1)

        logging.task = 'Restart VPC with clean up'

        vpc = co.get_vpc(id=router['vpcid'])
        if not vpc:
            sys.exit(1)

        if not vpc.restart():
            sys.exit(1)

        logging.info(
            f"Successfully restarted VPC '{vpc['name']}' with cleanup for router '{router['name']}'"
        )
    else:
        logging.task = 'Reboot virtual router'

        if not router.reboot():
            sys.exit(1)

        logging.info(f"Successfully rebooted router '{router['name']}'",
                     log_to_slack)
Exemplo n.º 7
0
def main(dry_run, zwps_cluster, destination_cluster, virtual_machines,
         force_end_hour):
    """Empty ZWPS by migrating VMs and/or it's volumes to the destination cluster."""

    click_log.basic_config()

    if force_end_hour:
        try:
            force_end_hour = int(force_end_hour)
        except ValueError as e:
            logging.error(
                f"Specified time:'{force_end_hour}' is not a valid integer due to: '{e}'"
            )
            sys.exit(1)
        if force_end_hour >= 24:
            logging.error(f"Specified time:'{force_end_hour}' should be < 24")
            sys.exit(1)

    profile = 'nl2'

    log_to_slack = True
    logging.task = 'Live Migrate VM Volumes'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    if not dry_run:
        cs = CosmicSQL(server=profile, dry_run=dry_run)
    else:
        cs = None

    zwps_storage_pools = []
    for storage_pool in co.get_all_storage_pools():
        if zwps_cluster.upper() in storage_pool['name']:
            zwps_storage_pools.append(storage_pool)

    logging.info('ZWPS storage pools found:')
    for zwps_storage_pool in zwps_storage_pools:
        logging.info(f" - '{zwps_storage_pool['name']}'")

    target_cluster = co.get_cluster(name=destination_cluster)
    if not target_cluster:
        logging.error(
            f"Destination cluster not found:'{target_cluster['name']}'!")
        sys.exit(1)

    try:
        destination_storage_pools = target_cluster.get_storage_pools(
            scope='CLUSTER')
    except IndexError:
        logging.error(
            f"No storage pools  found for cluster '{target_cluster['name']}'")
        sys.exit(1)
    logging.info('Destination storage pools found:')
    for target_storage_pool in destination_storage_pools:
        logging.info(f" - '{target_storage_pool['name']}'")

    target_storage_pool = random.choice(destination_storage_pools)

    volumes = []
    for zwps_storage_pool in zwps_storage_pools:
        vols = co.get_all_volumes(list_all=True,
                                  storageid=zwps_storage_pool['id'])
        if vols:
            volumes += vols

    vm_ids = []
    logging.info('Volumes found:')
    for volume in volumes:
        for virtual_machine in virtual_machines:
            if re.search(virtual_machine, volume['vmname'], re.IGNORECASE):
                logging.info(
                    f" - '{volume['name']}' on VM '{volume['vmname']}'")
                if volume['virtualmachineid'] not in vm_ids:
                    vm_ids.append(volume['virtualmachineid'])

    vms = []
    for vm_id in vm_ids:
        vm = co.get_vm(id=vm_id)
        if vm['affinitygroup']:
            for affinitygroup in vm['affinitygroup']:
                if 'DedicatedGrp' in affinitygroup['name']:
                    logging.warning(
                        f"Skipping VM '{vm['name']}' because of 'DedicatedGrp' affinity group"
                    )
                    continue
        vms.append(vm)

    logging.info('Virtualmachines found:')
    for vm in vms:
        logging.info(f" - '{vm['name']}'")

    logging.info(
        f"Starting live migration of volumes and/or virtualmachines from the ZWPS storage pools to storage pool '{target_cluster['name']}'"
    )

    for vm in vms:
        """ Can we start a new migration? """
        if force_end_hour:
            now = datetime.datetime.now(pytz.timezone('CET'))
            if now.hour >= force_end_hour:
                logging.info(
                    f"Stopping migration batch. We are not starting new migrations after '{force_end_hour}':00",
                    log_to_slack=log_to_slack)
                sys.exit(0)

        source_host = co.get_host(id=vm['hostid'])
        source_cluster = co.get_cluster(zone='nl2',
                                        id=source_host['clusterid'])
        if source_cluster['name'] == target_cluster['name']:
            """ VM is already on the destination cluster, so we only need to migrate the volumes to this storage pool """
            logging.info(
                f"Starting live migration of volumes of VM '{vm['name']}' to storage pool '{target_storage_pool['name']}' ({target_storage_pool['id']})",
                log_to_slack=log_to_slack)
            live_migrate_volumes(target_storage_pool['name'], co, cs, dry_run,
                                 False, log_to_slack, 0, vm['name'], True)
        else:
            """ VM needs to be migrated live to the destination cluster, including volumes """
            live_migrate(co=co,
                         cs=cs,
                         cluster=target_cluster['name'],
                         vm_name=vm['name'],
                         destination_dc=None,
                         add_affinity_group=None,
                         is_project_vm=None,
                         zwps_to_cwps=True,
                         log_to_slack=log_to_slack,
                         dry_run=dry_run)