Пример #1
0
def main():
    with open('/etc/instance-dumper.yaml') as f:
        config = yaml.safe_load(f)

    clients = mwopenstackclients.Clients(envfile='/etc/novaobserver.yaml')
    servers = clients.allinstances()

    data = {}
    for s in servers:
        server_info = {
            'name': s.name,
            'created_by': s.user_id,
            'created_at': s.created,
            'status': s.status,
            'project': s.tenant_id,
            'ips': s.networks['public'],
            'image': get_image_name(clients, s.image['id']),
        }
        server_info.update(get_enc_info(config['enc_host'], s.tenant_id, s.name))
        if s.tenant_id in data:
            data[s.tenant_id].append(server_info)
        else:
            data[s.tenant_id] = [server_info]

    with open(config['output_path'], 'w') as f:
        json.dump(data, f)
Пример #2
0
def list_proxies(args):
    """List proxies for a tenant."""
    client = mwopenstackclients.Clients(envfile=args.envfile)
    base_url = url_template(client).replace('$(tenant_id)s', args.project)

    resp = requests.get('{}/mapping'.format(base_url))
    if resp.status_code == 400 and resp.text == 'No such project':
        raise Exception('Unknown project {}'.format(args.project))
    data = resp.json()
    row = "{:<48} {}"
    print(row.format('domain', 'backend'))
    print(row.format('='*48, '='*24))
    for route in sorted(data['routes'], key=operator.itemgetter('domain')):
        print(row.format(route['domain'], route['backends'][0]))
Пример #3
0
def main():
    parser = argparse.ArgumentParser(description='Instance distribution check')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=0,
                        dest='loglevel',
                        help='Increase logging verbosity')
    parser.add_argument('--envfile',
                        default='/etc/novaobserver.yaml',
                        help='Path to OpenStack authentication YAML file')
    parser.add_argument('--config',
                        type=argparse.FileType('r'),
                        help='Path to yaml config file')

    args = parser.parse_args()

    logging.basicConfig(
        level=max(logging.DEBUG, logging.WARNING - (10 * args.loglevel)),
        format='%(asctime)s %(name)-12s %(levelname)-8s: %(message)s',
        datefmt='%Y-%m-%dT%H:%M:%SZ')
    logging.captureWarnings(True)
    # Quiet some noisy 3rd-party loggers channels
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('urllib3').setLevel(logging.WARNING)

    config = yaml.safe_load(args.config)
    client = mwopenstackclients.Clients(envfile=args.envfile)

    classification = classify_instances(project=config['project'],
                                        servers=client.allinstances(
                                            projectid=config['project'],
                                            allregions=True),
                                        classifier=config['classifier'])
    failed_classes = get_failed_classes(classification)

    if failed_classes:
        print("CRITICAL: {} class instances not spread out enough".format(
            ','.join(failed_classes)))
        return 2
    else:
        print("OK: All critical toolforge instances are spread out enough")
        return 1
Пример #4
0
def delete_proxy(args):
    """Delete a proxy."""
    client = mwopenstackclients.Clients(envfile=args.envfile)
    base_url = url_template(client).replace('$(tenant_id)s', args.project)

    dns = mwopenstackclients.DnsManager(client, tenant=TENANT)
    z = dns.zones(name=ZONE)[0]  # blow up if zone doesn't exist
    zone_id = z['id']
    fqdn = '{}.{}'.format(args.host, ZONE)

    # Remove proxy
    resp = requests.delete('{}/mapping/{}'.format(base_url, fqdn.rstrip('.')))
    if resp:
        # Remove DNS
        rs = dns.recordsets(zone_id, name=fqdn)[0]
        dns.delete_recordset(zone_id, rs['id'])
    else:
        raise Exception(
            'HTTP {} response from dynamicproxy: {}'.format(
                resp.status_code, resp.text))
Пример #5
0
def add_proxy(args):
    """Setup DNS and dynamicproxy mapping from a host to a URL."""
    client = mwopenstackclients.Clients(envfile=args.envfile)
    base_url = url_template(client).replace('$(tenant_id)s', args.project)

    dns = mwopenstackclients.DnsManager(client, tenant=TENANT)
    proxyip = socket.gethostbyname(urlparse.urlparse(base_url).hostname)
    z = dns.zones(name=ZONE)[0]  # blow up if zone doesn't exist
    zone_id = z['id']
    fqdn = '{}.{}'.format(args.host, ZONE)
    dns.ensure_recordset(zone_id, fqdn, 'A', [proxyip])

    resp = requests.put(
        '{}/mapping'.format(base_url),
        data=json.dumps({
            'backends': [args.target_url],
            'domain': fqdn.rstrip('.')
        }))
    if not resp:
        raise Exception(
            'HTTP {} response from dynamicproxy: {}'.format(
                resp.status_code, resp.text))
Пример #6
0
def update_proxies(args):
    """List proxies for a tenant."""
    client = mwopenstackclients.Clients(envfile=args.envfile)

    dns = mwopenstackclients.DnsManager(client, tenant=TENANT)

    allprojects = client.allprojects()
    allprojectslist = [project.name for project in allprojects]

    for projectid in allprojectslist:
        print(" ----  project: %s" % projectid)
        base_url = url_template(client).replace('$(tenant_id)s', projectid)

        resp = requests.get('{}/mapping'.format(base_url))
        if resp.status_code != 400:
            data = resp.json()
            for route in sorted(data['routes'],
                                key=operator.itemgetter('domain')):
                z = dns.zones(name=ZONE)[0]  # blow up if zone doesn't exist
                zone_id = z['id']
                fqdn = route['domain']
                if not fqdn.endswith('.'):
                    fqdn += "."
                recordset = dns.recordsets(zone_id, fqdn)
                if not recordset:
                    print("Bad news! Can't find %s in zone %s" %
                          (fqdn, zone_id))
                elif len(recordset) > 1:
                    print("Bad news! Multiple recordsets for %s." % fqdn)
                elif len(recordset[0]['records']) > 1:
                    print("Bad news! Multiple records for %s." % fqdn)
                elif recordset[0]['records'][0] != args.ip:
                    print("Updating recordset %s from %s to %s" %
                          (fqdn, recordset[0]['records'][0], args.ip))
                    if not args.dryrun:
                        dns.ensure_recordset(zone_id, fqdn, 'A', [args.ip])
                else:
                    print("This one (%s) is already good." % fqdn)
Пример #7
0
def update(config, envfile):
    floating_ip_ptr_fqdn_matching_regex = re.compile(
        config['floating_ip_ptr_fqdn_matching_regex'])

    client = mwopenstackclients.Clients(envfile=envfile)

    project_main_zone_ids = {}
    public_addrs = {}
    existing_As = []
    # Go through every tenant
    for tenant in client.keystoneclient().projects.list():
        logger.debug("Checking project %s", tenant.name)
        if tenant.name == 'admin':
            continue

        server_addresses = {}
        nova_client = client.novaclient(tenant.name)
        # Go through every instance
        for server in nova_client.servers.list():
            for network_name, addresses in server.addresses.items():
                public = [
                    str(ip['addr']) for ip in addresses
                    if ip['OS-EXT-IPS:type'] == 'floating'
                ]
                # If the instance has a public IP...
                if public:
                    # Record their public IPs and generate their public name
                    # according to FQDN_TEMPLATE. Technically there can be more
                    # than one floating (and/or fixed) IP Although this is never
                    # practically the case...
                    server_addresses[server.name] = public
                    A_FQDN = FQDN_TEMPLATE.format(server=server.name,
                                                  project=tenant.name)
                    public_addrs[A_FQDN, tenant.name] = True, public
                    logger.debug("Found public IP %s -> %s", public, A_FQDN)

        dns = mwopenstackclients.DnsManager(client, tenant=tenant.name)
        existing_match_regex = re.compile(
            FQDN_REGEX.format(project=tenant.name))
        # Now go through every zone the project controls
        for zone in dns.zones():
            logger.debug("Checking zone %s", zone['name'])
            # If this is their main zone, record the ID for later use
            if zone['name'] == PROJECT_ZONE_TEMPLATE.format(
                    project=tenant.name):
                project_main_zone_ids[tenant.name] = zone['id']

            # Go through every recordset in the zone
            for recordset in dns.recordsets(zone['id']):
                logger.debug("Found recordset %s %s", recordset['name'],
                             recordset['type'])
                existing_As.append(recordset['name'])
                # No IPv6 support in labs so no AAAAs
                if recordset['type'] != 'A':
                    continue

                match = existing_match_regex.match(recordset['name'])
                if match:
                    # Matches instances for this project, managed by this script
                    if (match.group(1) in server_addresses
                            and set(recordset['records']) != set(
                                server_addresses[match.group(1)])):
                        # ... But instance has a different set of IPs. Update!
                        if recordset['description'] == MANAGED_DESCRIPTION:
                            new_records = server_addresses[match.group(1)]
                            logger.info(
                                "Updating type A record for %s"
                                " - instance has different IPs - correct: %s"
                                " vs. current: %s",
                                recordset['name'],
                                str(new_records),
                                str(recordset['records']),
                            )
                            try:
                                dns.update_recordset(
                                    zone['id'],
                                    recordset['id'],
                                    new_records,
                                )
                            except Exception:
                                logger.exception('Failed to update %s',
                                                 recordset['name'])
                        else:
                            managed_description_error('update', 'A',
                                                      recordset['name'])
                    elif match.group(1) not in server_addresses:
                        # ... But instance does not actually exist. Delete!
                        if recordset['description'] == MANAGED_DESCRIPTION:
                            logger.info(
                                "Deleting type A record for %s "
                                " - instance does not exist",
                                recordset['name'])
                            try:
                                dns.delete_recordset(zone['id'],
                                                     recordset['id'])
                            except Exception:
                                logger.exception('Failed to delete %s',
                                                 recordset['name'])
                        else:
                            managed_description_error('delete', 'A',
                                                      recordset['name'])
                elif '*' not in recordset['name']:
                    # Recordset is not one of our FQDN_TEMPLATE ones, so just
                    # store it so we can reflect its existence in PTR records
                    # where appropriate.
                    public_addrs[recordset['name'],
                                 tenant.name] = (False, recordset['records'])

    # Now we go through all the A record data we have stored
    public_PTRs = {}
    for (A_FQDN, project), (managed_here, IPs) in public_addrs.items():
        # Set up any that need to be and don't already exist
        if managed_here and A_FQDN not in existing_As:
            dns = mwopenstackclients.DnsManager(client, tenant=project)
            # Create instance-$instance.$project.wmflabs.org 120 IN A $IP
            # No IPv6 support in labs so no AAAAs
            logger.info("Creating A record for %s", A_FQDN)
            if project in project_main_zone_ids:
                try:
                    dns.create_recordset(project_main_zone_ids[project],
                                         A_FQDN,
                                         'A',
                                         IPs,
                                         description=MANAGED_DESCRIPTION)
                except Exception:
                    logger.exception('Failed to create %s', A_FQDN)
            else:
                logger.warning("Oops! No main zone for project %s.", project)

        # Generate PTR record data, handling rewriting for RFC 2317 delegation as
        # configured
        for IP in IPs:
            PTR_FQDN = ipaddress.ip_address(IP).reverse_pointer + '.'
            delegated_PTR_FQDN = floating_ip_ptr_fqdn_matching_regex.sub(
                config['floating_ip_ptr_fqdn_replacement_pattern'], PTR_FQDN)
            if delegated_PTR_FQDN.endswith(config['floating_ip_ptr_zone']):
                if delegated_PTR_FQDN in public_PTRs:
                    public_PTRs[delegated_PTR_FQDN].append(A_FQDN)
                else:
                    public_PTRs[delegated_PTR_FQDN] = [A_FQDN]
            else:
                logger.warning(
                    "Not handling %s" + " because it doesn't end with %s",
                    delegated_PTR_FQDN, config['floating_ip_ptr_zone'])

    # Clean up reverse proxies. We don't want to generate PTR records for dozens
    # or hundreds of hostnames that are sharing a single reverse proxy like
    # project-proxy handles. If any IP has more than 10 reverse mappings then we
    # will try to figure out a reasonable truncated list.
    proxies = (k for k in public_PTRs if len(public_PTRs[k]) > 10)
    proxy_fqdn_re = re.compile(
        FQDN_TEMPLATE.replace(r'.', r'\.').format(server='(.*)',
                                                  project='(.*)'))
    for ptr in proxies:
        logger.info("Trimming FQDN list for %s", ptr)
        # Usually there will be an FQDN_TEMPLATE host in there somewhere
        fqdns = [h for h in public_PTRs[ptr] if proxy_fqdn_re.match(h)]
        if not fqdns:
            # If for some reason there are no FQDN_TEMPLATE hosts take the whole
            # giant list, but sorted just for fun
            fqdns = sorted(public_PTRs[ptr])
        # Only use the first 10 no matter how many ended up being found
        public_PTRs[ptr] = fqdns[:10]
        logger.debug("Trimmed FQDN list for %s is %s", ptr, public_PTRs[ptr])

    # Set up designate client to write recordsets with
    dns = mwopenstackclients.DnsManager(client, tenant='wmflabsdotorg')
    # Find the correct zone ID for the floating IP zone
    floating_ip_ptr_zone_id = None
    for zone in dns.zones():
        if zone['name'] == config['floating_ip_ptr_zone']:
            floating_ip_ptr_zone_id = zone['id']
            break

    # Zone should already exist!
    assert floating_ip_ptr_zone_id is not None

    existing_public_PTRs = {}
    # Go through each record in the delegated PTR zone, deleting any with our
    # managed_description that don't exist and updating any that don't match our
    # public_PTRs data.
    for recordset in dns.recordsets(floating_ip_ptr_zone_id):
        existing_public_PTRs[recordset['name']] = recordset
        if recordset['type'] == 'PTR':
            if recordset['name'] not in public_PTRs:
                if recordset['description'] == MANAGED_DESCRIPTION:
                    # Delete whole recordset, it shouldn't exist anymore.
                    logger.info("Deleting PTR record %s", recordset['name'])
                    try:
                        dns.delete_recordset(floating_ip_ptr_zone_id,
                                             recordset['id'])
                    except Exception:
                        logger.exception('Failed to delete %s',
                                         recordset['name'])
                else:
                    managed_description_error('delete', 'PTR',
                                              recordset['name'])
                continue
            new_records = set(public_PTRs[recordset['name']])
            if new_records != set(recordset['records']):
                if recordset['description'] == MANAGED_DESCRIPTION:
                    # Update the recordset to have the correct IPs
                    logger.info("Updating PTR record %s", recordset['name'])
                    try:
                        dns.update_recordset(
                            floating_ip_ptr_zone_id,
                            recordset['id'],
                            list(new_records),
                        )
                    except Exception:
                        logger.exception('Failed to update %s',
                                         recordset['name'])
                else:
                    managed_description_error('update', 'PTR',
                                              recordset['name'])

    # Create PTRs in delegated PTR zone
    for delegated_PTR_FQDN, records in public_PTRs.items():
        # We already dealt with updating existing PTRs above.
        if delegated_PTR_FQDN not in existing_public_PTRs:
            logger.info("Creating PTR record %s pointing to %s",
                        delegated_PTR_FQDN, str(records))
            try:
                dns.create_recordset(floating_ip_ptr_zone_id,
                                     delegated_PTR_FQDN,
                                     'PTR',
                                     records,
                                     description=MANAGED_DESCRIPTION)
            except Exception:
                logger.exception('Failed to create %s', delegated_PTR_FQDN)
Пример #8
0
import argparse
import mwopenstackclients

parser = argparse.ArgumentParser(description='Learn about image usage.')
parser.add_argument('--project',
                    dest='project',
                    help='limit stats to a single project',
                    default=None)
parser.add_argument('--imageid',
                    dest='imageid',
                    help='Check usage of the specified image',
                    default=None)
args = parser.parse_args()

allinstances = mwopenstackclients.Clients().allinstances(args.project)
bigDict = {instance.id: instance for instance in allinstances}

usedimages = set()
activeimages = set()
usinginstances = []

imagelist = mwopenstackclients.Clients().globalimages()
images = {image.id: image for image in imagelist}
allimages = set(images.keys())

for ID in bigDict.keys():
    instance = bigDict[ID]

    imageid = instance.image['id']
Пример #9
0
def main():
    """Manage Designate DNS records for Wiki Replicas."""
    parser = argparse.ArgumentParser(description='Wiki Replica DNS Manager')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=0,
                        dest='loglevel',
                        help='Increase logging verbosity')
    parser.add_argument('--config',
                        default='/etc/wikireplica_dns.yaml',
                        help='Path to YAML config file')
    parser.add_argument('--envfile',
                        default='/etc/novaadmin.yaml',
                        help='Path to OpenStack authentication YAML file')
    parser.add_argument('--zone', help='limit changes to the given zone')
    parser.add_argument('--aliases',
                        action='store_true',
                        help='Update per-wiki CNAME records')
    parser.add_argument('--shard', help='limit changes to the given shard')
    args = parser.parse_args()

    logging.basicConfig(
        level=max(logging.DEBUG, logging.WARNING - (10 * args.loglevel)),
        format='%(asctime)s %(name)-12s %(levelname)-8s: %(message)s',
        datefmt='%Y-%m-%dT%H:%M:%SZ')
    logging.captureWarnings(True)
    # Quiet some noisy 3rd-party loggers
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('urllib3').setLevel(logging.WARNING)
    logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)

    with open(args.config) as f:
        config = yaml.safe_load(f)

    all_zones = [z for z in config['zones']]
    if args.zone:
        if args.zone not in all_zones:
            parser.error('Unknown zone "{}". Expected one of:\n\t- {}'.format(
                args.zone, '\n\t- '.join(all_zones)))
        zones = [args.zone]
    else:
        zones = all_zones

    all_shards = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8']
    if args.shard:
        if args.shard not in all_shards:
            parser.error('Unknown shard "{}". Expected one of:\n\t- {}'.format(
                args.shard, '\n\t- '.join(all_shards)))
        shards = [args.shard]
    else:
        shards = all_shards

    dns = mwopenstackclients.DnsManager(
        mwopenstackclients.Clients(envfile=args.envfile), 'noauth-project')
    for zone in zones:
        r = dns.zones(name=zone)
        if not r:
            logger.warning('Creating zone %s', zone)
            z = dns.create_zone(zone, email='*****@*****.**', ttl=60)
        else:
            z = r[0]
        zone_id = z['id']

        for svc, ips in config['zones'][zone].iteritems():
            # Goofy, but true -- Designate needs FQDNs for names.
            fqdn = '{}.{}'.format(svc, zone)
            dns.ensure_recordset(zone_id, fqdn, 'A', ips)

            if args.aliases and svc in shards:
                # Ensure that there are wikidb aliases for shards
                dblist = requests.get(
                    'https://noc.wikimedia.org/conf/dblists/{}.dblist'.format(
                        svc))
                try:
                    dblist.raise_for_status()
                except requests.exceptions.HTTPError:
                    logger.warning('DBList "%s" not found', svc)
                else:
                    for wikidb in dblist.text.splitlines():
                        if wikidb.startswith('#'):
                            continue
                        db_fqdn = '{}.{}'.format(wikidb, zone)
                        dns.ensure_recordset(zone_id, db_fqdn, 'CNAME', [fqdn])
                        # Take a small break to be nicer to Designate
                        time.sleep(0.25)

            if fqdn in config['cnames']:
                # Add additional aliases for this fqdn
                for cname in config['cnames'][fqdn]:
                    cname_zone = find_zone_for_fqdn(dns, cname)
                    if cname_zone:
                        dns.ensure_recordset(cname_zone['id'], cname, 'CNAME',
                                             [fqdn])
                    else:
                        logger.warning('Failed to find zone for %s', cname)
Пример #10
0
    level=max(logging.DEBUG, logging.WARNING - (10 * args.loglevel)),
    format='%(asctime)s %(name)-12s %(levelname)-8s: %(message)s',
    datefmt='%Y-%m-%dT%H:%M:%SZ'
)
logging.captureWarnings(True)
# Quiet some noisy 3rd-party loggers
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)

config = yaml.safe_load(args.config_file)

floating_ip_ptr_fqdn_matching_regex = re.compile(
    config['floating_ip_ptr_fqdn_matching_regex'])

client = mwopenstackclients.Clients(envfile=args.envfile)

project_main_zone_ids = {}
public_addrs = {}
existing_As = []
# Go through every tenant
for tenant in client.keystoneclient().projects.list():
    logger.debug("Checking project %s", tenant.name)
    if tenant.name == 'admin':
        continue

    server_addresses = {}
    nova_client = client.novaclient(tenant.name)
    # Go through every instance
    for server in nova_client.servers.list():
        for network_name, addresses in server.addresses.items():
Пример #11
0
#!/usr/bin/python3

import logging
import yaml

import mwopenstackclients
import rbd2backy2

with open("/etc/wmcs_backup_images.yaml") as f:
    config = yaml.safe_load(f)

clients = mwopenstackclients.Clients(envfile="/etc/novaadmin.yaml")
glance = clients.glanceclient()
images = glance.images.list()

for image in images:
    logging.info("Backing up %s (%s)" % (image.id, image.name))
    rbd2backy2.backup_volume(config["ceph_pool"], image.id,
                             config["live_for_days"])
Пример #12
0
    # This should return the short hostname, e.g. 'cloudvirt1024'
    hostname = socket.gethostname()

    if config["project_assignments"].get(project, "").lower() == "ignore":
        return False

    if project in config["project_assignments"]:
        return config["project_assignments"][project] == hostname

    return config["project_assignments"].get("ALLOTHERS", "") == hostname


with open("/etc/wmcs_backup_instances.yaml") as f:
    config = yaml.safe_load(f)

openstackclients = mwopenstackclients.Clients(envfile="/etc/novaobserver.yaml")
ceph_servers = rbd2backy2.ceph_vms(config["ceph_pool"])

for project in openstackclients.allprojects():
    if not backup_this_project_on_this_host(config, project.id):
        continue

    servers = openstackclients.allinstances(projectid=project.id)

    not_in_ceph = []
    for server in servers:
        if exclude_server(config, project.id, server.name):
            continue

        if server.id in ceph_servers:
            logging.info("Backing up %s:%s" % (project, server.name))
#!/usr/bin/python3

import argparse
import mwopenstackclients

parser = argparse.ArgumentParser(description="Learn about image usage.")
parser.add_argument(
    "--project", dest="project", help="limit stats to a single project", default=None
)
parser.add_argument(
    "--imageid", dest="imageid", help="Check usage of the specified image", default=None
)
args = parser.parse_args()

allinstances = mwopenstackclients.Clients().allinstances(args.project, allregions=True)
bigDict = {instance.id: instance for instance in allinstances}

usedimages = set()
activeimages = set()
usinginstances = []

imagelist = mwopenstackclients.Clients().globalimages()
images = {image.id: image for image in imagelist}
allimages = set(images.keys())

for ID in list(bigDict.keys()):
    instance = bigDict[ID]

    imageid = instance.image["id"]

    usedimages.add(imageid)
Пример #14
0
def main():
    """Manage Designate DNS records for Wiki Replicas."""
    parser = argparse.ArgumentParser(description="Wiki Replica DNS Manager")
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        default=0,
        dest="loglevel",
        help="Increase logging verbosity",
    )
    parser.add_argument("--config",
                        default="/etc/wikireplica_dns.yaml",
                        help="Path to YAML config file")
    parser.add_argument(
        "--envfile",
        default="/etc/novaadmin.yaml",
        help="Path to OpenStack authentication YAML file",
    )
    parser.add_argument("--zone", help="limit changes to the given zone")
    parser.add_argument("--aliases",
                        action="store_true",
                        help="Update per-wiki CNAME records")
    parser.add_argument("--shard", help="limit changes to the given shard")
    args = parser.parse_args()

    logging.basicConfig(
        level=max(logging.DEBUG, logging.WARNING - (10 * args.loglevel)),
        format="%(asctime)s %(name)-12s %(levelname)-8s: %(message)s",
        datefmt="%Y-%m-%dT%H:%M:%SZ",
    )
    logging.captureWarnings(True)
    # Quiet some noisy 3rd-party loggers
    logging.getLogger("requests").setLevel(logging.WARNING)
    logging.getLogger("urllib3").setLevel(logging.WARNING)
    logging.getLogger("iso8601.iso8601").setLevel(logging.WARNING)

    with open(args.config) as f:
        config = yaml.safe_load(f)

    all_zones = [z for z in config["zones"]]
    if args.zone:
        if args.zone not in all_zones:
            parser.error('Unknown zone "{}". Expected one of:\n\t- {}'.format(
                args.zone, "\n\t- ".join(all_zones)))
        zones = [args.zone]
    else:
        zones = all_zones

    all_shards = ["s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8"]
    if args.shard:
        if args.shard not in all_shards:
            parser.error('Unknown shard "{}". Expected one of:\n\t- {}'.format(
                args.shard, "\n\t- ".join(all_shards)))
        shards = [args.shard]
    else:
        shards = all_shards

    for zone in zones:
        logger.warning("Ensuring %s" % zone)

        if zone.endswith("wmflabs."):
            dns = mwopenstackclients.DnsManager(
                mwopenstackclients.Clients(envfile=args.envfile),
                "noauth-project")
        elif zone.endswith("db.svc.wikimedia.cloud."):
            dns = mwopenstackclients.DnsManager(
                mwopenstackclients.Clients(envfile=args.envfile),
                "clouddb-services")
        else:
            logging.error(
                "This zone is in an unknown tld; supported are wmflabs and "
                "wikimidia.cloud")
            continue

        r = dns.zones(name=zone)
        if not r:
            logging.error(
                "Zone %s does not exist.  Please create it and re-run.\n"
                "Example: \n\n"
                "openstack zone create --sudo-project-id "
                "clouddb-services --email [email protected] %s\n" %
                (zone, zone))
            continue
        else:
            z = r[0]
        zone_id = z["id"]

        for svc, ips in config["zones"][zone].items():
            # Goofy, but true -- Designate needs FQDNs for names.
            fqdn = "{}.{}".format(svc, zone)
            dns.ensure_recordset(zone_id, fqdn, "A", ips)

            if args.aliases and svc in shards:
                # Ensure that there are wikidb aliases for shards
                dblist = requests.get(
                    "https://noc.wikimedia.org/conf/dblists/{}.dblist".format(
                        svc))
                try:
                    dblist.raise_for_status()
                except requests.exceptions.HTTPError:
                    logger.warning('DBList "%s" not found', svc)
                else:
                    for wikidb in dblist.text.splitlines():
                        if wikidb.startswith("#"):
                            continue
                        db_fqdn = "{}.{}".format(wikidb, zone)
                        dns.ensure_recordset(zone_id, db_fqdn, "CNAME", [fqdn])
                        # Take a small break to be nicer to Designate
                        time.sleep(0.25)

            if fqdn in config["cnames"]:
                # Add additional aliases for this fqdn
                for cname in config["cnames"][fqdn]:
                    cname_zone = find_zone_for_fqdn(dns, cname)
                    if cname_zone:
                        dns.ensure_recordset(cname_zone["id"], cname, "CNAME",
                                             [fqdn])
                    else:
                        logger.warning("Failed to find zone for %s", cname)