示例#1
0
 def test_create_networks_invalid_cache(self, mock_deploy_snap,
                                        mock_libvirt_open, mock_pull_snap,
                                        mock_oc_node):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = '/doesnotexist/'
     self.assertRaises(exc.SnapshotDeployException, d.create_networks)
示例#2
0
 def test_create_networks(self, mock_deploy_snap, mock_libvirt_open,
                          mock_pull_snap, mock_oc_node):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     conn = mock_libvirt_open('qemu:///system')
     d.create_networks()
     conn.networkCreateXML.assert_called()
示例#3
0
 def test_get_controllers_none(self, mock_deploy_snap, mock_libvirt_open,
                               mock_pull_snap, mock_oc_node):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     node = mock_oc_node()
     node.role = 'compute'
     d.oc_nodes = [node]
     self.assertListEqual(d.get_controllers(), [])
示例#4
0
 def test_parse_and_create_nodes(self, mock_deploy_snap, mock_libvirt_open,
                                 mock_pull_snap, mock_oc_node):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     node = mock_oc_node()
     d.parse_and_create_nodes()
     node.start.assert_called()
     self.assertListEqual([node], d.oc_nodes)
示例#5
0
 def test_pull_snapshot_is_latest(self, mock_fetch_props,
                                  mock_fetch_artifact):
     mock_fetch_props.return_value = {
         'OPNFV_SNAP_URL':
         'artifacts.opnfv.org/apex/master/noha/'
         'apex-csit-snap-2018-08-05.tar.gz',
         'OPNFV_SNAP_SHA512SUM':
         'bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
         '6e36a74d05ee813584f3e5bb92aa23dec77584631'
         '7b75d574f8c86186c666f78a299c24fb68849897b'
         'dd4bc'
     }
     SnapshotDeployment.pull_snapshot('http://dummy_url', TEST_DUMMY_CONFIG)
     mock_fetch_artifact.assert_not_called()
示例#6
0
 def test_is_opendaylight_up(self, mock_deploy_snap, mock_libvirt_open,
                             mock_pull_snap, mock_oc_node, mock_utils,
                             mock_time, mock_get_ctrls):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     node = mock_oc_node()
     node.ip = '123.123.123.123'
     node.name = 'dummy-controller-0'
     mock_get_ctrls.return_value = [node]
     mock_utils.open_webpage.return_value = 0
     self.assertTrue(d.is_service_up('opendaylight'))
示例#7
0
 def test_is_openstack_up_false(self, mock_deploy_snap, mock_libvirt_open,
                                mock_pull_snap, mock_oc_node, mock_socket,
                                mock_time, mock_get_ctrls):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     node = mock_oc_node()
     node.ip = '123.123.123.123'
     node.name = 'dummy-controller-0'
     mock_get_ctrls.return_value = [node]
     sock = mock_socket.socket(mock_socket.AF_INET, mock_socket.SOCK_STREAM)
     sock.connect_ex.return_value = 1
     self.assertFalse(d.is_service_up('openstack'))
示例#8
0
 def test_parse_and_create_nodes_invalid_node_yaml(self, mock_deploy_snap,
                                                   mock_libvirt_open,
                                                   mock_pull_snap,
                                                   mock_oc_node,
                                                   mock_parse_yaml):
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     d = SnapshotDeployment(deploy_settings=ds,
                            snap_cache_dir=DUMMY_SNAP_DIR,
                            fetch=False,
                            all_in_one=False)
     d.snap_cache_dir = TEST_DUMMY_CONFIG
     node = mock_oc_node()
     mock_parse_yaml.return_value = {'blah': 'dummy'}
     self.assertRaises(exc.SnapshotDeployException,
                       d.parse_and_create_nodes)
     node.start.assert_not_called()
示例#9
0
    def test_init(self, mock_deploy_snap, mock_libvirt_open, mock_pull_snap):

        ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
        ds = DeploySettings(ds_file)
        d = SnapshotDeployment(deploy_settings=ds,
                               snap_cache_dir=DUMMY_SNAP_DIR,
                               fetch=True,
                               all_in_one=False)
        snap_dir = os.path.join(DUMMY_SNAP_DIR, 'queens', 'noha')
        self.assertEqual(d.snap_cache_dir, snap_dir)
        mock_pull_snap.assert_called()
        mock_deploy_snap.assert_called()
        self.assertEqual(d.ha_ext, 'noha')
示例#10
0
 def test_deploy_snapshot(self, mock_create_networks, mock_libvirt_open,
                          mock_pull_snap, mock_parse_create,
                          mock_service_up, mock_is_file):
     mock_is_file.return_value = True
     ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
     ds = DeploySettings(ds_file)
     SnapshotDeployment(deploy_settings=ds,
                        snap_cache_dir=DUMMY_SNAP_DIR,
                        fetch=False,
                        all_in_one=False)
     mock_parse_create.assert_called()
     mock_create_networks.assert_called()
     mock_service_up.assert_called()
示例#11
0
文件: deploy.py 项目: dimitris76/apex
def main():
    parser = create_deploy_parser()
    args = parser.parse_args(sys.argv[1:])
    # FIXME (trozet): this is only needed as a workaround for CI.  Remove
    # when CI is changed
    if os.getenv('IMAGES', False):
        args.image_dir = os.getenv('IMAGES')
    if args.debug:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
    formatter = '%(asctime)s %(levelname)s: %(message)s'
    logging.basicConfig(filename=args.log_file,
                        format=formatter,
                        datefmt='%m/%d/%Y %I:%M:%S %p',
                        level=log_level)
    console = logging.StreamHandler()
    console.setLevel(log_level)
    console.setFormatter(logging.Formatter(formatter))
    logging.getLogger('').addHandler(console)
    utils.install_ansible()
    validate_deploy_args(args)
    # Parse all settings
    deploy_settings = DeploySettings(args.deploy_settings_file)
    logging.info("Deploy settings are:\n {}".format(
        pprint.pformat(deploy_settings)))

    if not args.snapshot:
        net_settings = NetworkSettings(args.network_settings_file)
        logging.info("Network settings are:\n {}".format(
            pprint.pformat(net_settings)))
        os_version = deploy_settings['deploy_options']['os_version']
        net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
        net_env = NetworkEnvironment(net_settings,
                                     net_env_file,
                                     os_version=os_version)
        net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
        utils.dump_yaml(dict(net_env), net_env_target)

        # get global deploy params
        ha_enabled = deploy_settings['global_params']['ha_enabled']
        introspect = deploy_settings['global_params'].get('introspect', True)
        net_list = net_settings.enabled_network_list
        if args.virtual:
            if args.virt_compute_ram is None:
                compute_ram = args.virt_default_ram
            else:
                compute_ram = args.virt_compute_ram
            if (deploy_settings['deploy_options']['sdn_controller']
                    == 'opendaylight' and args.virt_default_ram < 12):
                control_ram = 12
                logging.warning('RAM per controller is too low.  OpenDaylight '
                                'requires at least 12GB per controller.')
                logging.info('Increasing RAM per controller to 12GB')
            elif args.virt_default_ram < 10:
                if platform.machine() == 'aarch64':
                    control_ram = 16
                    logging.warning('RAM per controller is too low for '
                                    'aarch64 ')
                    logging.info('Increasing RAM per controller to 16GB')
                else:
                    control_ram = 10
                    logging.warning('RAM per controller is too low.  nosdn '
                                    'requires at least 10GB per controller.')
                    logging.info('Increasing RAM per controller to 10GB')
            else:
                control_ram = args.virt_default_ram
            if platform.machine() == 'aarch64' and args.virt_cpus < 16:
                vcpus = 16
                logging.warning('aarch64 requires at least 16 vCPUS per '
                                'target VM. Increasing to 16.')
            else:
                vcpus = args.virt_cpus
            if ha_enabled and args.virt_compute_nodes < 2:
                logging.debug(
                    'HA enabled, bumping number of compute nodes to 2')
                args.virt_compute_nodes = 2
            virt_utils.generate_inventory(args.inventory_file,
                                          ha_enabled,
                                          num_computes=args.virt_compute_nodes,
                                          controller_ram=control_ram * 1024,
                                          compute_ram=compute_ram * 1024,
                                          vcpus=vcpus)
        inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
        logging.info("Inventory is:\n {}".format(pprint.pformat(inventory)))

        validate_cross_settings(deploy_settings, net_settings, inventory)
    else:
        # only one network with snapshots
        net_list = [constants.ADMIN_NETWORK]

    ds_opts = deploy_settings['deploy_options']
    ansible_args = {
        'virsh_enabled_networks': net_list,
        'snapshot': args.snapshot
    }
    utils.run_ansible(
        ansible_args,
        os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                     'deploy_dependencies.yml'))
    all_in_one = not bool(args.virt_compute_nodes)
    if args.snapshot:
        # Start snapshot Deployment
        logging.info('Executing Snapshot Deployment...')
        SnapshotDeployment(deploy_settings=deploy_settings,
                           snap_cache_dir=args.snap_cache,
                           fetch=not args.no_fetch,
                           all_in_one=all_in_one)
    else:
        # Start Standard TripleO Deployment
        deployment = ApexDeployment(deploy_settings, args.patches_file,
                                    args.deploy_settings_file)
        # TODO (trozet): add logic back from:
        # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
        uc_external = False
        if 'external' in net_settings.enabled_network_list:
            uc_external = True
        if args.virtual:
            # create all overcloud VMs
            build_vms(inventory, net_settings, args.deploy_dir)
        else:
            # Attach interfaces to jumphost for baremetal deployment
            jump_networks = ['admin']
            if uc_external:
                jump_networks.append('external')
            for network in jump_networks:
                if network == 'external':
                    # TODO(trozet): enable vlan secondary external networks
                    iface = net_settings['networks'][network][0][
                        'installer_vm']['members'][0]
                else:
                    iface = net_settings['networks'][network]['installer_vm'][
                        'members'][0]
                bridge = "br-{}".format(network)
                jumphost.attach_interface_to_ovs(bridge, iface, network)
        instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
        with open(instackenv_json, 'w') as fh:
            json.dump(inventory, fh)

        # Create and configure undercloud
        if args.debug:
            root_pw = constants.DEBUG_OVERCLOUD_PW
        else:
            root_pw = None

        if not args.upstream:
            logging.warning("Using upstream is now required for Apex. "
                            "Forcing upstream to true")
        if os_version == 'master':
            branch = 'master'
        else:
            branch = "stable/{}".format(os_version)

        logging.info("Deploying with upstream artifacts for OpenStack "
                     "{}".format(os_version))
        args.image_dir = os.path.join(args.image_dir, os_version)
        upstream_url = constants.UPSTREAM_RDO.replace(
            constants.DEFAULT_OS_VERSION, os_version)

        upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
        if platform.machine() == 'aarch64':
            upstream_targets.append('undercloud.qcow2')
        utils.fetch_upstream_and_unpack(args.image_dir,
                                        upstream_url,
                                        upstream_targets,
                                        fetch=not args.no_fetch)
        # Copy ironic files and overcloud ramdisk and kernel into temp dir
        # to be copied by ansible into undercloud /home/stack
        # Note the overcloud disk does not need to be copied here as it will
        # be modified and copied later
        for tmp_file in UC_DISK_FILES:
            shutil.copyfile(os.path.join(args.image_dir, tmp_file),
                            os.path.join(APEX_TEMP_DIR, tmp_file))
        if platform.machine() == 'aarch64':
            sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
        else:
            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
        # copy undercloud so we don't taint upstream fetch
        uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
        uc_fetch_img = sdn_image
        shutil.copyfile(uc_fetch_img, uc_image)
        # prep undercloud with required packages
        if platform.machine() != 'aarch64':
            uc_builder.update_repos(image=uc_image,
                                    branch=branch.replace('stable/', ''))
        uc_builder.add_upstream_packages(uc_image)
        uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
        # add patches from upstream to undercloud and overcloud
        logging.info('Adding patches to undercloud')
        patches = deployment.determine_patches()
        c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                       APEX_TEMP_DIR, branch)

        # Create/Start Undercloud VM
        undercloud = uc_lib.Undercloud(args.image_dir,
                                       args.deploy_dir,
                                       root_pw=root_pw,
                                       external_network=uc_external,
                                       image_name=os.path.basename(uc_image),
                                       os_version=os_version)
        undercloud.start()
        undercloud_admin_ip = net_settings['networks'][
            constants.ADMIN_NETWORK]['installer_vm']['ip']

        if ds_opts['containers']:
            tag = constants.DOCKER_TAG
        else:
            tag = None

        # Generate nic templates
        for role in 'compute', 'controller':
            oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                       args.deploy_dir, APEX_TEMP_DIR)
        # Prepare/Upload docker images
        docker_env = 'containers-prepare-parameter.yaml'
        shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
                        os.path.join(APEX_TEMP_DIR, docker_env))
        # Upload extra ansible.cfg
        if platform.machine() == 'aarch64':
            ansible_env = 'ansible.cfg'
            shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
                            os.path.join(APEX_TEMP_DIR, ansible_env))

        c_builder.prepare_container_images(
            os.path.join(APEX_TEMP_DIR, docker_env),
            branch=branch.replace('stable/', ''),
            neutron_driver=c_builder.get_neutron_driver(ds_opts))
        # Install Undercloud
        undercloud.configure(net_settings,
                             deploy_settings,
                             os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                          'configure_undercloud.yml'),
                             APEX_TEMP_DIR,
                             virtual_oc=args.virtual)

        # Prepare overcloud-full.qcow2
        logging.info("Preparing Overcloud for deployment...")
        if os_version != 'ocata':
            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
            net_data = network_data.create_network_data(
                net_settings, net_data_file)
        else:
            net_data = False

        shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
                        os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))

        # TODO(trozet): Either fix opnfv env or default to use upstream env
        if args.env_file == 'opnfv-environment.yaml':
            # Override the env_file if it is defaulted to opnfv
            # opnfv env file will not work with upstream
            args.env_file = 'upstream-environment.yaml'
        opnfv_env = os.path.join(args.deploy_dir, args.env_file)
        oc_deploy.prep_env(deploy_settings, net_settings, inventory, opnfv_env,
                           net_env_target, APEX_TEMP_DIR)
        if not args.virtual:
            oc_deploy.LOOP_DEVICE_SIZE = "50G"
        if platform.machine() == 'aarch64':
            oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
        else:
            oc_image = sdn_image
        patched_containers = oc_deploy.prep_image(deploy_settings,
                                                  net_settings,
                                                  oc_image,
                                                  APEX_TEMP_DIR,
                                                  root_pw=root_pw,
                                                  docker_tag=tag,
                                                  patches=patches['overcloud'])

        oc_deploy.create_deploy_cmd(deploy_settings,
                                    net_settings,
                                    inventory,
                                    APEX_TEMP_DIR,
                                    args.virtual,
                                    os.path.basename(opnfv_env),
                                    net_data=net_data)
        # Prepare undercloud with containers
        docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'prepare_overcloud_containers.yml')
        if ds_opts['containers']:
            logging.info("Preparing Undercloud with Docker containers")
            sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
            sdn_env_files = str()
            for sdn_file in sdn_env:
                sdn_env_files += " -e {}".format(sdn_file)
            if patched_containers:
                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
            container_vars = dict()
            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
            container_vars['patched_docker_services'] = list(
                patched_containers)
            container_vars['container_tag'] = constants.DOCKER_TAG
            container_vars['stackrc'] = 'source /home/stack/stackrc'
            container_vars['sdn'] = ds_opts['sdn_controller']
            container_vars['undercloud_ip'] = undercloud_admin_ip
            container_vars['os_version'] = os_version
            container_vars['aarch64'] = platform.machine() == 'aarch64'
            container_vars['sdn_env_file'] = sdn_env_files
            try:
                utils.run_ansible(container_vars,
                                  docker_playbook,
                                  host=undercloud.ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Container preparation complete")
            except Exception:
                logging.error("Unable to complete container prep on "
                              "Undercloud")
                for tmp_file in UC_DISK_FILES:
                    os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
                raise

        deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'deploy_overcloud.yml')
        virt_env = 'virtual-environment.yaml'
        bm_env = 'baremetal-environment.yaml'
        k8s_env = 'kubernetes-environment.yaml'
        for p_env in virt_env, bm_env, k8s_env:
            shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                            os.path.join(APEX_TEMP_DIR, p_env))

        # Start Overcloud Deployment
        logging.info("Executing Overcloud Deployment...")
        deploy_vars = dict()
        deploy_vars['virtual'] = args.virtual
        deploy_vars['debug'] = args.debug
        deploy_vars['aarch64'] = platform.machine() == 'aarch64'
        deploy_vars['introspect'] = not (args.virtual or deploy_vars['aarch64']
                                         or not introspect)
        deploy_vars['dns_server_args'] = ''
        deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
        deploy_vars['stackrc'] = 'source /home/stack/stackrc'
        deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
        deploy_vars['undercloud_ip'] = undercloud_admin_ip
        deploy_vars['ha_enabled'] = ha_enabled
        deploy_vars['os_version'] = os_version
        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
        deploy_vars['vim'] = ds_opts['vim']
        for dns_server in net_settings['dns_servers']:
            deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                dns_server)
        try:
            utils.run_ansible(deploy_vars,
                              deploy_playbook,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Overcloud deployment complete")
        except Exception:
            logging.error("Deployment Failed.  Please check deploy log as "
                          "well as mistral logs in "
                          "{}".format(
                              os.path.join(APEX_TEMP_DIR,
                                           'mistral_logs.tar.gz')))
            raise
        finally:
            os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
            for tmp_file in UC_DISK_FILES:
                os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))

        # Post install
        logging.info("Executing post deploy configuration")
        jumphost.configure_bridges(net_settings)
        nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
        deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
        deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
                                     'GlobalKnownHostsFile=/dev/null -o ' \
                                     'UserKnownHostsFile=/dev/null -o ' \
                                     'LogLevel=error'
        deploy_vars['external_network_cmds'] = \
            oc_deploy.external_network_cmds(net_settings, deploy_settings)
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['gluon'] = ds_opts['gluon']
        deploy_vars['sdn'] = ds_opts['sdn_controller']
        for dep_option in 'yardstick', 'dovetail', 'vsperf':
            if dep_option in ds_opts:
                deploy_vars[dep_option] = ds_opts[dep_option]
            else:
                deploy_vars[dep_option] = False
        deploy_vars['dataplane'] = ds_opts['dataplane']
        overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
        if ds_opts['congress']:
            deploy_vars['congress_datasources'] = \
                oc_deploy.create_congress_cmds(overcloudrc)
            deploy_vars['congress'] = True
        else:
            deploy_vars['congress'] = False
        deploy_vars['calipso'] = ds_opts.get('calipso', False)
        deploy_vars['calipso_ip'] = undercloud_admin_ip
        # overcloudrc.v3 removed and set as default in queens and later
        if os_version == 'pike':
            deploy_vars['overcloudrc_files'] = [
                'overcloudrc', 'overcloudrc.v3'
            ]
        else:
            deploy_vars['overcloudrc_files'] = ['overcloudrc']

        post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'post_deploy_undercloud.yml')
        logging.info("Executing post deploy configuration undercloud "
                     "playbook")
        try:
            utils.run_ansible(deploy_vars,
                              post_undercloud,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Post Deploy Undercloud Configuration Complete")
        except Exception:
            logging.error("Post Deploy Undercloud Configuration failed.  "
                          "Please check log")
            raise

        # Deploy kubernetes if enabled
        # (TODO)zshi move handling of kubernetes deployment
        # to its own deployment class
        if deploy_vars['vim'] == 'k8s':
            # clone kubespray repo
            git.Repo.clone_from(constants.KUBESPRAY_URL,
                                os.path.join(APEX_TEMP_DIR, 'kubespray'))
            shutil.copytree(
                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
                             'sample'),
                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory', 'apex'))
            k8s_node_inventory = {
                'all': {
                    'hosts': {},
                    'children': {
                        'k8s-cluster': {
                            'children': {
                                'kube-master': {
                                    'hosts': {}
                                },
                                'kube-node': {
                                    'hosts': {}
                                }
                            }
                        },
                        'etcd': {
                            'hosts': {}
                        }
                    }
                }
            }
            for node, ip in deploy_vars['overcloud_nodes'].items():
                k8s_node_inventory['all']['hosts'][node] = {
                    'ansible_become': True,
                    'ansible_ssh_host': ip,
                    'ansible_become_user': '******',
                    'ip': ip
                }
                if 'controller' in node:
                    k8s_node_inventory['all']['children']['k8s-cluster'][
                        'children']['kube-master']['hosts'][node] = None
                    k8s_node_inventory['all']['children']['etcd']['hosts'][
                        node] = None
                elif 'compute' in node:
                    k8s_node_inventory['all']['children']['k8s-cluster'][
                        'children']['kube-node']['hosts'][node] = None

            kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
            with open(
                    os.path.join(kubespray_dir, 'inventory', 'apex',
                                 'apex.yaml'), 'w') as invfile:
                yaml.dump(k8s_node_inventory,
                          invfile,
                          default_flow_style=False)
            k8s_deploy_vars = {}
            # Add kubespray ansible control variables in k8s_deploy_vars,
            # example: 'kube_network_plugin': 'flannel'
            k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
            k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
                                               'apex', 'apex.yaml')

            k8s_remove_pkgs = os.path.join(args.lib_dir,
                                           constants.ANSIBLE_PATH,
                                           'k8s_remove_pkgs.yml')
            try:
                logging.debug("Removing any existing overcloud docker "
                              "packages")
                utils.run_ansible(k8s_deploy_vars,
                                  k8s_remove_pkgs,
                                  host=k8s_deploy_inv_file,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("k8s Deploy Remove Existing Docker Related "
                             "Packages Complete")
            except Exception:
                logging.error("k8s Deploy Remove Existing Docker Related "
                              "Packages failed. Please check log")
                raise

            try:
                utils.run_ansible(k8s_deploy_vars,
                                  k8s_deploy,
                                  host=k8s_deploy_inv_file,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("k8s Deploy Overcloud Configuration Complete")
            except Exception:
                logging.error("k8s Deploy Overcloud Configuration failed."
                              "Please check log")
                raise

        # Post deploy overcloud node configuration
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['sfc'] = ds_opts['sfc']
        deploy_vars['vpn'] = ds_opts['vpn']
        deploy_vars['l2gw'] = ds_opts.get('l2gw')
        deploy_vars['sriov'] = ds_opts.get('sriov')
        deploy_vars['tacker'] = ds_opts.get('tacker')
        deploy_vars['all_in_one'] = all_in_one
        # TODO(trozet): pull all logs and store in tmp dir in overcloud
        # playbook
        post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                      'post_deploy_overcloud.yml')
        # Run per overcloud node
        for node, ip in deploy_vars['overcloud_nodes'].items():
            logging.info("Executing Post deploy overcloud playbook on "
                         "node {}".format(node))
            try:
                utils.run_ansible(deploy_vars,
                                  post_overcloud,
                                  host=ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Post Deploy Overcloud Configuration Complete "
                             "for node {}".format(node))
            except Exception:
                logging.error("Post Deploy Overcloud Configuration failed "
                              "for node {}. Please check log".format(node))
                raise
        logging.info("Apex deployment complete")
        logging.info("Undercloud IP: {}, please connect by doing "
                     "'opnfv-util undercloud'".format(undercloud.ip))