Exemple #1
0
 def test_inject_opendaylight(self, mock_customize, mock_add_repo,
                              mock_git_archive):
     mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
     archive = '/dummytmp/puppet-opendaylight.tar'
     test_virt_ops = [{
         con.VIRT_UPLOAD:
         "{}:/etc/puppet/modules/".format(archive)
     }, {
         con.VIRT_RUN_CMD:
         'rm -rf /etc/puppet/modules/opendaylight'
     }, {
         con.VIRT_RUN_CMD:
         "cd /etc/puppet/modules/ && tar xvf "
         "puppet-opendaylight.tar"
     }, {
         con.VIRT_INSTALL: 'opendaylight'
     }]
     oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION,
                                    'dummy.qcow2',
                                    '/dummytmp/',
                                    uc_ip='192.0.2.2',
                                    os_version=con.DEFAULT_OS_VERSION)
     assert mock_git_archive.called
     assert mock_add_repo.called
     mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
 def test_inject_opendaylight_docker(self, mock_customize, mock_add_repo,
                                     mock_git_archive, mock_build_docker):
     mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
     archive = '/dummytmp/puppet-opendaylight.tar'
     test_virt_ops = [
         {
             con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)
         },
         {
             con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'
         },
         {
             con.VIRT_RUN_CMD:
             "cd /etc/puppet/modules/ && tar xvf "
             "puppet-opendaylight.tar"
         },
         {
             con.VIRT_INSTALL: "java-1.8.0-openjdk"
         },
     ]
     oc_builder.inject_opendaylight('oxygen',
                                    'dummy.qcow2',
                                    '/dummytmp/',
                                    uc_ip='192.0.2.2',
                                    os_version=con.DEFAULT_OS_VERSION,
                                    docker_tag='latest')
     odl_url = "https://nexus.opendaylight.org/content/repositories" \
               "/opendaylight-oxygen-epel-7-x86_64-devel/"
     docker_cmds = [
         "RUN yum remove opendaylight -y", "RUN echo $'[opendaylight]\\n\\",
         "baseurl={}\\n\\".format(odl_url), "gpgcheck=0\\n\\",
         "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
         "RUN yum -y install opendaylight"
     ]
     src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
     assert mock_git_archive.called
     assert mock_add_repo.called
     assert mock_build_docker.called_once_with('opendaylight', '/dummytmp',
                                               docker_cmds, src_img_uri)
     mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
Exemple #3
0
def prep_image(ds,
               ns,
               img,
               tmp_dir,
               root_pw=None,
               docker_tag=None,
               patches=None,
               upstream=False):
    """
    Locates sdn image and preps for deployment.
    :param ds: deploy settings
    :param ns: network settings
    :param img: sdn image
    :param tmp_dir: dir to store modified sdn image
    :param root_pw: password to configure for overcloud image
    :param docker_tag: Docker image tag for RDO version (default None)
    :param patches: List of patches to apply to overcloud image
    :param upstream: (boolean) Indicates if upstream deployment or not
    :return: None
    """
    # TODO(trozet): Come up with a better way to organize this logic in this
    # function
    logging.info("Preparing image: {} for deployment".format(img))
    if not os.path.isfile(img):
        logging.error("Missing SDN image {}".format(img))
        raise ApexDeployException("Missing SDN image file: {}".format(img))

    ds_opts = ds['deploy_options']
    virt_cmds = list()
    sdn = ds_opts['sdn_controller']
    patched_containers = set()
    # we need this due to rhbz #1436021
    # fixed in systemd-219-37.el7
    if sdn is not False:
        logging.info("Neutron openvswitch-agent disabled")
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "rm -f /etc/systemd/system/multi-user.target.wants/"
            "neutron-openvswitch-agent.service"
        }, {
            con.VIRT_RUN_CMD:
            "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
            ".service"
        }])

    if ns.get('http_proxy', ''):
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'http_proxy={}' >> /etc/environment".format(ns['http_proxy'])
        })

    if ns.get('https_proxy', ''):
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'https_proxy={}' >> /etc/environment".format(
                ns['https_proxy'])
        })

    if ds_opts['vpn']:
        virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
            "/opt/quagga/etc/init.d/zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "chmod +x /opt/quagga/etc/init.d/"
            "zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.local "
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"
        })
        logging.info("ZRPCD process started")

    dataplane = ds_opts['dataplane']
    if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
        logging.info("Enabling kernel modules for dpdk")
        # file to module mapping
        uio_types = {
            os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
            os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
        }
        for mod_file, mod in uio_types.items():
            with open(mod_file, 'w') as fh:
                fh.write('#!/bin/bash\n')
                fh.write('exec /sbin/modprobe {}'.format(mod))
                fh.close()

            virt_cmds.extend([{
                con.VIRT_UPLOAD:
                "{}:/etc/sysconfig/modules/".format(mod_file)
            }, {
                con.VIRT_RUN_CMD:
                "chmod 0755 /etc/sysconfig/modules/"
                "{}".format(os.path.basename(mod_file))
            }])
    if root_pw:
        pw_op = "password:{}".format(root_pw)
        virt_cmds.append({con.VIRT_PW: pw_op})
    if ds_opts['sfc'] and dataplane == 'ovs':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "yum -y install "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_KMOD_RPM)
        }, {
            con.VIRT_RUN_CMD:
            "yum downgrade -y "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_RPM)
        }])
    if dataplane == 'fdio':
        # Patch neutron with using OVS external interface for router
        # and add generic linux NS interface driver
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "cd /usr/lib/python2.7/site-packages && patch "
            "-p1 < neutron-patch-NSDriver.patch"
        })
        if sdn is False:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum remove -y vpp-lib"
            }, {
                con.VIRT_RUN_CMD:
                "yum install -y "
                "/root/nosdn_vpp_rpms/*.rpm"
            }])

    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
    shutil.copyfile(img, tmp_oc_image)
    logging.debug(
        "Temporary overcloud image stored as: {}".format(tmp_oc_image))

    # TODO (trozet): remove this if block after Fraser
    if sdn == 'opendaylight' and not upstream:
        if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "rm -rf /etc/puppet/modules/opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "cd /etc/puppet/modules && tar xzf "
                "/root/puppet-opendaylight-"
                "{}.tar.gz".format(ds_opts['odl_version'])
            }])
            if ds_opts['odl_version'] == 'master':
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "rpm -ivh --nodeps /root/{}/*".format(
                        ds_opts['odl_version'])
                }])
            else:
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "yum -y install /root/{}/*".format(ds_opts['odl_version'])
                }])

        elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
                and ds_opts['odl_vpp_netvirt']:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "yum -y install /root/{}/*".format(ODL_NETVIRT_VPP_RPM)
            }])
    elif sdn == 'opendaylight':
        undercloud_admin_ip = ns['networks'][
            con.ADMIN_NETWORK]['installer_vm']['ip']
        oc_builder.inject_opendaylight(
            odl_version=ds_opts['odl_version'],
            image=tmp_oc_image,
            tmp_dir=tmp_dir,
            uc_ip=undercloud_admin_ip,
            os_version=ds_opts['os_version'],
            docker_tag=docker_tag,
        )
        if docker_tag:
            patched_containers = patched_containers.union({'opendaylight'})

    if sdn == 'ovn':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum update -y "
            "*openvswitch*"
        }, {
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum downgrade -y "
            "*openvswitch*"
        }])

    if patches:
        if ds_opts['os_version'] == 'master':
            branch = ds_opts['os_version']
        else:
            branch = "stable/{}".format(ds_opts['os_version'])
        logging.info('Adding patches to overcloud')
        patched_containers = patched_containers.union(
            c_builder.add_upstream_patches(patches,
                                           tmp_oc_image,
                                           tmp_dir,
                                           branch,
                                           uc_ip=undercloud_admin_ip,
                                           docker_tag=docker_tag))
    # if containers with ceph, and no ceph device we need to use a
    # persistent loop device for Ceph OSDs
    if docker_tag and not ds_opts.get('ceph_device', None):
        tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
        with open(tmp_losetup, 'w') as fh:
            fh.write(LOSETUP_SERVICE)
        virt_cmds.extend([
            {
                con.VIRT_UPLOAD:
                "{}:/usr/lib/systemd/system/".format(tmp_losetup)
            },
            {
                con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'
            },
            {
                con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'
            },
            {
                con.VIRT_RUN_CMD: 'systemctl daemon-reload'
            },
            {
                con.VIRT_RUN_CMD: 'systemctl enable losetup.service'
            },
        ])
    virt_utils.virt_customize(virt_cmds, tmp_oc_image)
    logging.info("Overcloud image customization complete")
    return patched_containers
Exemple #4
0
def main():
    parser = create_deploy_parser()
    args = parser.parse_args(sys.argv[1:])
    # FIXME (trozet): this is only needed as a workaround for CI.  Remove
    # when CI is changed
    if os.getenv('IMAGES', False):
        args.image_dir = os.getenv('IMAGES')
    if args.debug:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
    formatter = '%(asctime)s %(levelname)s: %(message)s'
    logging.basicConfig(filename=args.log_file,
                        format=formatter,
                        datefmt='%m/%d/%Y %I:%M:%S %p',
                        level=log_level)
    console = logging.StreamHandler()
    console.setLevel(log_level)
    console.setFormatter(logging.Formatter(formatter))
    logging.getLogger('').addHandler(console)
    validate_deploy_args(args)
    # Parse all settings
    deploy_settings = DeploySettings(args.deploy_settings_file)
    logging.info("Deploy settings are:\n {}".format(
        pprint.pformat(deploy_settings)))
    net_settings = NetworkSettings(args.network_settings_file)
    logging.info("Network settings are:\n {}".format(
        pprint.pformat(net_settings)))
    os_version = deploy_settings['deploy_options']['os_version']
    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
    net_env = NetworkEnvironment(net_settings,
                                 net_env_file,
                                 os_version=os_version)
    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
    utils.dump_yaml(dict(net_env), net_env_target)
    ha_enabled = deploy_settings['global_params']['ha_enabled']
    if args.virtual:
        if args.virt_compute_ram is None:
            compute_ram = args.virt_default_ram
        else:
            compute_ram = args.virt_compute_ram
        if deploy_settings['deploy_options']['sdn_controller'] == \
                'opendaylight' and args.virt_default_ram < 12:
            control_ram = 12
            logging.warning('RAM per controller is too low.  OpenDaylight '
                            'requires at least 12GB per controller.')
            logging.info('Increasing RAM per controller to 12GB')
        elif args.virt_default_ram < 10:
            control_ram = 10
            logging.warning('RAM per controller is too low.  nosdn '
                            'requires at least 10GB per controller.')
            logging.info('Increasing RAM per controller to 10GB')
        else:
            control_ram = args.virt_default_ram
        if ha_enabled and args.virt_compute_nodes < 2:
            logging.debug('HA enabled, bumping number of compute nodes to 2')
            args.virt_compute_nodes = 2
        virt_utils.generate_inventory(args.inventory_file,
                                      ha_enabled,
                                      num_computes=args.virt_compute_nodes,
                                      controller_ram=control_ram * 1024,
                                      compute_ram=compute_ram * 1024,
                                      vcpus=args.virt_cpus)
    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)

    validate_cross_settings(deploy_settings, net_settings, inventory)
    ds_opts = deploy_settings['deploy_options']
    if args.quickstart:
        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
                                            'apex_deploy_settings.yaml')
        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
                        deploy_settings_file)
        logging.info("File created: {}".format(deploy_settings_file))
        network_settings_file = os.path.join(APEX_TEMP_DIR,
                                             'apex_network_settings.yaml')
        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
                        network_settings_file)
        logging.info("File created: {}".format(network_settings_file))
        deploy_quickstart(args, deploy_settings_file, network_settings_file,
                          args.inventory_file)
    else:
        # TODO (trozet): add logic back from:
        # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
        ansible_args = {
            'virsh_enabled_networks': net_settings.enabled_network_list
        }
        utils.run_ansible(
            ansible_args,
            os.path.join(args.lib_dir, ANSIBLE_PATH,
                         'deploy_dependencies.yml'))
        uc_external = False
        if 'external' in net_settings.enabled_network_list:
            uc_external = True
        if args.virtual:
            # create all overcloud VMs
            build_vms(inventory, net_settings, args.deploy_dir)
        else:
            # Attach interfaces to jumphost for baremetal deployment
            jump_networks = ['admin']
            if uc_external:
                jump_networks.append('external')
            for network in jump_networks:
                if network == 'external':
                    # TODO(trozet): enable vlan secondary external networks
                    iface = net_settings['networks'][network][0][
                        'installer_vm']['members'][0]
                else:
                    iface = net_settings['networks'][network]['installer_vm'][
                        'members'][0]
                bridge = "br-{}".format(network)
                jumphost.attach_interface_to_ovs(bridge, iface, network)
        instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
        with open(instackenv_json, 'w') as fh:
            json.dump(inventory, fh)

        # Create and configure undercloud
        if args.debug:
            root_pw = constants.DEBUG_OVERCLOUD_PW
        else:
            root_pw = None

        upstream = (os_version != constants.DEFAULT_OS_VERSION
                    or args.upstream)
        if os_version == 'master':
            branch = 'master'
        else:
            branch = "stable/{}".format(os_version)
        if upstream:
            logging.info("Deploying with upstream artifacts for OpenStack "
                         "{}".format(os_version))
            args.image_dir = os.path.join(args.image_dir, os_version)
            upstream_url = constants.UPSTREAM_RDO.replace(
                constants.DEFAULT_OS_VERSION, os_version)
            upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
            utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
                                            upstream_targets)
            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
            if ds_opts['sdn_controller'] == 'opendaylight':
                logging.info("Preparing upstream image with OpenDaylight")
                oc_builder.inject_opendaylight(
                    odl_version=ds_opts['odl_version'],
                    image=sdn_image,
                    tmp_dir=APEX_TEMP_DIR)
            # copy undercloud so we don't taint upstream fetch
            uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
            uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
            shutil.copyfile(uc_fetch_img, uc_image)
            # prep undercloud with required packages
            uc_builder.add_upstream_packages(uc_image)
            # add patches from upstream to undercloud and overcloud
            logging.info('Adding patches to undercloud')
            patches = deploy_settings['global_params']['patches']
            c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                           APEX_TEMP_DIR, branch)
            logging.info('Adding patches to overcloud')
            c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
                                           APEX_TEMP_DIR, branch)
        else:
            sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
            uc_image = 'undercloud.qcow2'
        undercloud = uc_lib.Undercloud(args.image_dir,
                                       args.deploy_dir,
                                       root_pw=root_pw,
                                       external_network=uc_external,
                                       image_name=os.path.basename(uc_image))
        undercloud.start()

        # Generate nic templates
        for role in 'compute', 'controller':
            oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                       args.deploy_dir, APEX_TEMP_DIR)
        # Install Undercloud
        undercloud.configure(
            net_settings,
            os.path.join(args.lib_dir, ANSIBLE_PATH,
                         'configure_undercloud.yml'), APEX_TEMP_DIR)

        # Prepare overcloud-full.qcow2
        logging.info("Preparing Overcloud for deployment...")
        if os_version != 'ocata':
            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
            net_data = network_data.create_network_data(
                net_settings, net_data_file)
        else:
            net_data = False
        if upstream and args.env_file == 'opnfv-environment.yaml':
            # Override the env_file if it is defaulted to opnfv
            # opnfv env file will not work with upstream
            args.env_file = 'upstream-environment.yaml'
        opnfv_env = os.path.join(args.deploy_dir, args.env_file)
        if not upstream:
            oc_deploy.prep_env(deploy_settings, net_settings, inventory,
                               opnfv_env, net_env_target, APEX_TEMP_DIR)
            oc_deploy.prep_image(deploy_settings,
                                 sdn_image,
                                 APEX_TEMP_DIR,
                                 root_pw=root_pw)
        else:
            shutil.copyfile(
                sdn_image, os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
            shutil.copyfile(
                opnfv_env,
                os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env)))

        oc_deploy.create_deploy_cmd(deploy_settings,
                                    net_settings,
                                    inventory,
                                    APEX_TEMP_DIR,
                                    args.virtual,
                                    os.path.basename(opnfv_env),
                                    net_data=net_data)
        deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                       'deploy_overcloud.yml')
        virt_env = 'virtual-environment.yaml'
        bm_env = 'baremetal-environment.yaml'
        for p_env in virt_env, bm_env:
            shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                            os.path.join(APEX_TEMP_DIR, p_env))

        # Start Overcloud Deployment
        logging.info("Executing Overcloud Deployment...")
        deploy_vars = dict()
        deploy_vars['virtual'] = args.virtual
        deploy_vars['debug'] = args.debug
        deploy_vars['aarch64'] = platform.machine() == 'aarch64'
        deploy_vars['dns_server_args'] = ''
        deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
        deploy_vars['stackrc'] = 'source /home/stack/stackrc'
        deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
        deploy_vars['upstream'] = upstream
        deploy_vars['os_version'] = os_version
        for dns_server in net_settings['dns_servers']:
            deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                dns_server)
        try:
            utils.run_ansible(deploy_vars,
                              deploy_playbook,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Overcloud deployment complete")
        except Exception:
            logging.error("Deployment Failed.  Please check log")
            raise
        finally:
            os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))

        # Post install
        logging.info("Executing post deploy configuration")
        jumphost.configure_bridges(net_settings)
        nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
        deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
        deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
                                     'GlobalKnownHostsFile=/dev/null -o ' \
                                     'UserKnownHostsFile=/dev/null -o ' \
                                     'LogLevel=error'
        deploy_vars['external_network_cmds'] = \
            oc_deploy.external_network_cmds(net_settings)
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['gluon'] = ds_opts['gluon']
        deploy_vars['sdn'] = ds_opts['sdn_controller']
        for dep_option in 'yardstick', 'dovetail', 'vsperf':
            if dep_option in ds_opts:
                deploy_vars[dep_option] = ds_opts[dep_option]
            else:
                deploy_vars[dep_option] = False
        deploy_vars['dataplane'] = ds_opts['dataplane']
        overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
        if ds_opts['congress']:
            deploy_vars['congress_datasources'] = \
                oc_deploy.create_congress_cmds(overcloudrc)
            deploy_vars['congress'] = True
        else:
            deploy_vars['congress'] = False
        deploy_vars['calipso'] = ds_opts.get('calipso', False)
        deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
            'installer_vm']['ip']
        # TODO(trozet): this is probably redundant with getting external
        # network info from undercloud.py
        if 'external' in net_settings.enabled_network_list:
            ext_cidr = net_settings['networks']['external'][0]['cidr']
        else:
            ext_cidr = net_settings['networks']['admin']['cidr']
        deploy_vars['external_cidr'] = str(ext_cidr)
        if ext_cidr.version == 6:
            deploy_vars['external_network_ipv6'] = True
        else:
            deploy_vars['external_network_ipv6'] = False
        post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                       'post_deploy_undercloud.yml')
        logging.info("Executing post deploy configuration undercloud playbook")
        try:
            utils.run_ansible(deploy_vars,
                              post_undercloud,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Post Deploy Undercloud Configuration Complete")
        except Exception:
            logging.error("Post Deploy Undercloud Configuration failed.  "
                          "Please check log")
            raise
        # Post deploy overcloud node configuration
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['sfc'] = ds_opts['sfc']
        deploy_vars['vpn'] = ds_opts['vpn']
        # TODO(trozet): pull all logs and store in tmp dir in overcloud
        # playbook
        post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                      'post_deploy_overcloud.yml')
        # Run per overcloud node
        for node, ip in deploy_vars['overcloud_nodes'].items():
            logging.info("Executing Post deploy overcloud playbook on "
                         "node {}".format(node))
            try:
                utils.run_ansible(deploy_vars,
                                  post_overcloud,
                                  host=ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Post Deploy Overcloud Configuration Complete "
                             "for node {}".format(node))
            except Exception:
                logging.error("Post Deploy Overcloud Configuration failed "
                              "for node {}. Please check log".format(node))
                raise
        logging.info("Apex deployment complete")
        logging.info("Undercloud IP: {}, please connect by doing "
                     "'opnfv-util undercloud'".format(undercloud.ip))