示例#1
0
    def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage,
                               mock_prep_sriov):
        mock_sdn_list.return_value = []
        ds = {
            'deploy_options': {
                'ha_enabled': True,
                'congress': True,
                'tacker': True,
                'containers': False,
                'barometer': True,
                'ceph': False,
                'sriov': False
            },
            'global_params': MagicMock()
        }

        ns = {'ntp': ['ntp']}
        inv = MagicMock()
        inv.get_node_counts.return_value = (3, 2)
        virt = True
        result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
        assert_in('--ntp-server ntp', result_cmd)
        assert_in('enable_tacker.yaml', result_cmd)
        assert_in('enable_congress.yaml', result_cmd)
        assert_in('enable_barometer.yaml', result_cmd)
        assert_in('virtual-environment.yaml', result_cmd)
        assert_in('--control-scale 3', result_cmd)
        assert_in('--compute-scale 2', result_cmd)
示例#2
0
    def test_create_deploy_cmd_containers_sdn(self, mock_prep_storage,
                                              mock_prep_sriov):
        ds = {'deploy_options':
              {'ha_enabled': True,
               'congress': False,
               'tacker': False,
               'containers': True,
               'barometer': False,
               'vpn': False,
               'ceph': True,
               'sdn_controller': 'opendaylight',
               'sriov': False,
               'os_version': 'queens',
               'vim': 'openstack'
               },
              'global_params': MagicMock()}

        ns = {'ntp': ['ntp']}
        inv = MagicMock()
        inv.get_node_counts.return_value = (3, 2)
        virt = True
        result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
        assert_in('--ntp-server ntp', result_cmd)
        assert_not_in('enable_tacker.yaml', result_cmd)
        assert_not_in('enable_congress.yaml', result_cmd)
        assert_not_in('enable_barometer.yaml', result_cmd)
        assert_in('virtual-environment.yaml', result_cmd)
        assert_in('--control-scale 3', result_cmd)
        assert_in('--compute-scale 2', result_cmd)
        assert_in('docker-images.yaml', result_cmd)
        assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
                  '/docker.yaml', result_cmd)
        assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
                  'storage-environment.yaml', result_cmd)
        assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
                  '/services/neutron-opendaylight.yaml', result_cmd)
        ds['deploy_options']['os_version'] = 'master'
        result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
        assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
                  '/services/neutron-opendaylight.yaml', result_cmd)
示例#3
0
 def test_create_deploy_cmd_no_ha_bm(self, mock_sdn_list, mock_prep_storage,
                                     mock_prep_sriov):
     mock_sdn_list.return_value = []
     ds = {'deploy_options': MagicMock(), 'global_params': MagicMock()}
     ds['global_params'].__getitem__.side_effect = \
         lambda i: False if i == 'ha_enabled' else MagicMock()
     ns = {'ntp': ['ntp']}
     inv = MagicMock()
     inv.get_node_counts.return_value = (3, 2)
     virt = False
     result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
     assert_in('--ntp-server ntp', result_cmd)
     assert_in('--control-scale 1', result_cmd)
     assert_in('--compute-scale 2', result_cmd)
     assert_in('baremetal-environment.yaml', result_cmd)
     assert_not_in('enable_tacker.yaml', result_cmd)
     assert_not_in('enable_congress.yaml', result_cmd)
     assert_not_in('enable_barometer.yaml', result_cmd)
示例#4
0
文件: deploy.py 项目: dimitris76/apex
def main():
    parser = create_deploy_parser()
    args = parser.parse_args(sys.argv[1:])
    # FIXME (trozet): this is only needed as a workaround for CI.  Remove
    # when CI is changed
    if os.getenv('IMAGES', False):
        args.image_dir = os.getenv('IMAGES')
    if args.debug:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
    formatter = '%(asctime)s %(levelname)s: %(message)s'
    logging.basicConfig(filename=args.log_file,
                        format=formatter,
                        datefmt='%m/%d/%Y %I:%M:%S %p',
                        level=log_level)
    console = logging.StreamHandler()
    console.setLevel(log_level)
    console.setFormatter(logging.Formatter(formatter))
    logging.getLogger('').addHandler(console)
    utils.install_ansible()
    validate_deploy_args(args)
    # Parse all settings
    deploy_settings = DeploySettings(args.deploy_settings_file)
    logging.info("Deploy settings are:\n {}".format(
        pprint.pformat(deploy_settings)))

    if not args.snapshot:
        net_settings = NetworkSettings(args.network_settings_file)
        logging.info("Network settings are:\n {}".format(
            pprint.pformat(net_settings)))
        os_version = deploy_settings['deploy_options']['os_version']
        net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
        net_env = NetworkEnvironment(net_settings,
                                     net_env_file,
                                     os_version=os_version)
        net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
        utils.dump_yaml(dict(net_env), net_env_target)

        # get global deploy params
        ha_enabled = deploy_settings['global_params']['ha_enabled']
        introspect = deploy_settings['global_params'].get('introspect', True)
        net_list = net_settings.enabled_network_list
        if args.virtual:
            if args.virt_compute_ram is None:
                compute_ram = args.virt_default_ram
            else:
                compute_ram = args.virt_compute_ram
            if (deploy_settings['deploy_options']['sdn_controller']
                    == 'opendaylight' and args.virt_default_ram < 12):
                control_ram = 12
                logging.warning('RAM per controller is too low.  OpenDaylight '
                                'requires at least 12GB per controller.')
                logging.info('Increasing RAM per controller to 12GB')
            elif args.virt_default_ram < 10:
                if platform.machine() == 'aarch64':
                    control_ram = 16
                    logging.warning('RAM per controller is too low for '
                                    'aarch64 ')
                    logging.info('Increasing RAM per controller to 16GB')
                else:
                    control_ram = 10
                    logging.warning('RAM per controller is too low.  nosdn '
                                    'requires at least 10GB per controller.')
                    logging.info('Increasing RAM per controller to 10GB')
            else:
                control_ram = args.virt_default_ram
            if platform.machine() == 'aarch64' and args.virt_cpus < 16:
                vcpus = 16
                logging.warning('aarch64 requires at least 16 vCPUS per '
                                'target VM. Increasing to 16.')
            else:
                vcpus = args.virt_cpus
            if ha_enabled and args.virt_compute_nodes < 2:
                logging.debug(
                    'HA enabled, bumping number of compute nodes to 2')
                args.virt_compute_nodes = 2
            virt_utils.generate_inventory(args.inventory_file,
                                          ha_enabled,
                                          num_computes=args.virt_compute_nodes,
                                          controller_ram=control_ram * 1024,
                                          compute_ram=compute_ram * 1024,
                                          vcpus=vcpus)
        inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
        logging.info("Inventory is:\n {}".format(pprint.pformat(inventory)))

        validate_cross_settings(deploy_settings, net_settings, inventory)
    else:
        # only one network with snapshots
        net_list = [constants.ADMIN_NETWORK]

    ds_opts = deploy_settings['deploy_options']
    ansible_args = {
        'virsh_enabled_networks': net_list,
        'snapshot': args.snapshot
    }
    utils.run_ansible(
        ansible_args,
        os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                     'deploy_dependencies.yml'))
    all_in_one = not bool(args.virt_compute_nodes)
    if args.snapshot:
        # Start snapshot Deployment
        logging.info('Executing Snapshot Deployment...')
        SnapshotDeployment(deploy_settings=deploy_settings,
                           snap_cache_dir=args.snap_cache,
                           fetch=not args.no_fetch,
                           all_in_one=all_in_one)
    else:
        # Start Standard TripleO Deployment
        deployment = ApexDeployment(deploy_settings, args.patches_file,
                                    args.deploy_settings_file)
        # TODO (trozet): add logic back from:
        # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
        uc_external = False
        if 'external' in net_settings.enabled_network_list:
            uc_external = True
        if args.virtual:
            # create all overcloud VMs
            build_vms(inventory, net_settings, args.deploy_dir)
        else:
            # Attach interfaces to jumphost for baremetal deployment
            jump_networks = ['admin']
            if uc_external:
                jump_networks.append('external')
            for network in jump_networks:
                if network == 'external':
                    # TODO(trozet): enable vlan secondary external networks
                    iface = net_settings['networks'][network][0][
                        'installer_vm']['members'][0]
                else:
                    iface = net_settings['networks'][network]['installer_vm'][
                        'members'][0]
                bridge = "br-{}".format(network)
                jumphost.attach_interface_to_ovs(bridge, iface, network)
        instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
        with open(instackenv_json, 'w') as fh:
            json.dump(inventory, fh)

        # Create and configure undercloud
        if args.debug:
            root_pw = constants.DEBUG_OVERCLOUD_PW
        else:
            root_pw = None

        if not args.upstream:
            logging.warning("Using upstream is now required for Apex. "
                            "Forcing upstream to true")
        if os_version == 'master':
            branch = 'master'
        else:
            branch = "stable/{}".format(os_version)

        logging.info("Deploying with upstream artifacts for OpenStack "
                     "{}".format(os_version))
        args.image_dir = os.path.join(args.image_dir, os_version)
        upstream_url = constants.UPSTREAM_RDO.replace(
            constants.DEFAULT_OS_VERSION, os_version)

        upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
        if platform.machine() == 'aarch64':
            upstream_targets.append('undercloud.qcow2')
        utils.fetch_upstream_and_unpack(args.image_dir,
                                        upstream_url,
                                        upstream_targets,
                                        fetch=not args.no_fetch)
        # Copy ironic files and overcloud ramdisk and kernel into temp dir
        # to be copied by ansible into undercloud /home/stack
        # Note the overcloud disk does not need to be copied here as it will
        # be modified and copied later
        for tmp_file in UC_DISK_FILES:
            shutil.copyfile(os.path.join(args.image_dir, tmp_file),
                            os.path.join(APEX_TEMP_DIR, tmp_file))
        if platform.machine() == 'aarch64':
            sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
        else:
            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
        # copy undercloud so we don't taint upstream fetch
        uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
        uc_fetch_img = sdn_image
        shutil.copyfile(uc_fetch_img, uc_image)
        # prep undercloud with required packages
        if platform.machine() != 'aarch64':
            uc_builder.update_repos(image=uc_image,
                                    branch=branch.replace('stable/', ''))
        uc_builder.add_upstream_packages(uc_image)
        uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
        # add patches from upstream to undercloud and overcloud
        logging.info('Adding patches to undercloud')
        patches = deployment.determine_patches()
        c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                       APEX_TEMP_DIR, branch)

        # Create/Start Undercloud VM
        undercloud = uc_lib.Undercloud(args.image_dir,
                                       args.deploy_dir,
                                       root_pw=root_pw,
                                       external_network=uc_external,
                                       image_name=os.path.basename(uc_image),
                                       os_version=os_version)
        undercloud.start()
        undercloud_admin_ip = net_settings['networks'][
            constants.ADMIN_NETWORK]['installer_vm']['ip']

        if ds_opts['containers']:
            tag = constants.DOCKER_TAG
        else:
            tag = None

        # Generate nic templates
        for role in 'compute', 'controller':
            oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                       args.deploy_dir, APEX_TEMP_DIR)
        # Prepare/Upload docker images
        docker_env = 'containers-prepare-parameter.yaml'
        shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
                        os.path.join(APEX_TEMP_DIR, docker_env))
        # Upload extra ansible.cfg
        if platform.machine() == 'aarch64':
            ansible_env = 'ansible.cfg'
            shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
                            os.path.join(APEX_TEMP_DIR, ansible_env))

        c_builder.prepare_container_images(
            os.path.join(APEX_TEMP_DIR, docker_env),
            branch=branch.replace('stable/', ''),
            neutron_driver=c_builder.get_neutron_driver(ds_opts))
        # Install Undercloud
        undercloud.configure(net_settings,
                             deploy_settings,
                             os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                          'configure_undercloud.yml'),
                             APEX_TEMP_DIR,
                             virtual_oc=args.virtual)

        # Prepare overcloud-full.qcow2
        logging.info("Preparing Overcloud for deployment...")
        if os_version != 'ocata':
            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
            net_data = network_data.create_network_data(
                net_settings, net_data_file)
        else:
            net_data = False

        shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
                        os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))

        # TODO(trozet): Either fix opnfv env or default to use upstream env
        if args.env_file == 'opnfv-environment.yaml':
            # Override the env_file if it is defaulted to opnfv
            # opnfv env file will not work with upstream
            args.env_file = 'upstream-environment.yaml'
        opnfv_env = os.path.join(args.deploy_dir, args.env_file)
        oc_deploy.prep_env(deploy_settings, net_settings, inventory, opnfv_env,
                           net_env_target, APEX_TEMP_DIR)
        if not args.virtual:
            oc_deploy.LOOP_DEVICE_SIZE = "50G"
        if platform.machine() == 'aarch64':
            oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
        else:
            oc_image = sdn_image
        patched_containers = oc_deploy.prep_image(deploy_settings,
                                                  net_settings,
                                                  oc_image,
                                                  APEX_TEMP_DIR,
                                                  root_pw=root_pw,
                                                  docker_tag=tag,
                                                  patches=patches['overcloud'])

        oc_deploy.create_deploy_cmd(deploy_settings,
                                    net_settings,
                                    inventory,
                                    APEX_TEMP_DIR,
                                    args.virtual,
                                    os.path.basename(opnfv_env),
                                    net_data=net_data)
        # Prepare undercloud with containers
        docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'prepare_overcloud_containers.yml')
        if ds_opts['containers']:
            logging.info("Preparing Undercloud with Docker containers")
            sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
            sdn_env_files = str()
            for sdn_file in sdn_env:
                sdn_env_files += " -e {}".format(sdn_file)
            if patched_containers:
                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
            container_vars = dict()
            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
            container_vars['patched_docker_services'] = list(
                patched_containers)
            container_vars['container_tag'] = constants.DOCKER_TAG
            container_vars['stackrc'] = 'source /home/stack/stackrc'
            container_vars['sdn'] = ds_opts['sdn_controller']
            container_vars['undercloud_ip'] = undercloud_admin_ip
            container_vars['os_version'] = os_version
            container_vars['aarch64'] = platform.machine() == 'aarch64'
            container_vars['sdn_env_file'] = sdn_env_files
            try:
                utils.run_ansible(container_vars,
                                  docker_playbook,
                                  host=undercloud.ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Container preparation complete")
            except Exception:
                logging.error("Unable to complete container prep on "
                              "Undercloud")
                for tmp_file in UC_DISK_FILES:
                    os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
                raise

        deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'deploy_overcloud.yml')
        virt_env = 'virtual-environment.yaml'
        bm_env = 'baremetal-environment.yaml'
        k8s_env = 'kubernetes-environment.yaml'
        for p_env in virt_env, bm_env, k8s_env:
            shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                            os.path.join(APEX_TEMP_DIR, p_env))

        # Start Overcloud Deployment
        logging.info("Executing Overcloud Deployment...")
        deploy_vars = dict()
        deploy_vars['virtual'] = args.virtual
        deploy_vars['debug'] = args.debug
        deploy_vars['aarch64'] = platform.machine() == 'aarch64'
        deploy_vars['introspect'] = not (args.virtual or deploy_vars['aarch64']
                                         or not introspect)
        deploy_vars['dns_server_args'] = ''
        deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
        deploy_vars['stackrc'] = 'source /home/stack/stackrc'
        deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
        deploy_vars['undercloud_ip'] = undercloud_admin_ip
        deploy_vars['ha_enabled'] = ha_enabled
        deploy_vars['os_version'] = os_version
        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
        deploy_vars['vim'] = ds_opts['vim']
        for dns_server in net_settings['dns_servers']:
            deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                dns_server)
        try:
            utils.run_ansible(deploy_vars,
                              deploy_playbook,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Overcloud deployment complete")
        except Exception:
            logging.error("Deployment Failed.  Please check deploy log as "
                          "well as mistral logs in "
                          "{}".format(
                              os.path.join(APEX_TEMP_DIR,
                                           'mistral_logs.tar.gz')))
            raise
        finally:
            os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
            for tmp_file in UC_DISK_FILES:
                os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))

        # Post install
        logging.info("Executing post deploy configuration")
        jumphost.configure_bridges(net_settings)
        nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
        deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
        deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
                                     'GlobalKnownHostsFile=/dev/null -o ' \
                                     'UserKnownHostsFile=/dev/null -o ' \
                                     'LogLevel=error'
        deploy_vars['external_network_cmds'] = \
            oc_deploy.external_network_cmds(net_settings, deploy_settings)
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['gluon'] = ds_opts['gluon']
        deploy_vars['sdn'] = ds_opts['sdn_controller']
        for dep_option in 'yardstick', 'dovetail', 'vsperf':
            if dep_option in ds_opts:
                deploy_vars[dep_option] = ds_opts[dep_option]
            else:
                deploy_vars[dep_option] = False
        deploy_vars['dataplane'] = ds_opts['dataplane']
        overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
        if ds_opts['congress']:
            deploy_vars['congress_datasources'] = \
                oc_deploy.create_congress_cmds(overcloudrc)
            deploy_vars['congress'] = True
        else:
            deploy_vars['congress'] = False
        deploy_vars['calipso'] = ds_opts.get('calipso', False)
        deploy_vars['calipso_ip'] = undercloud_admin_ip
        # overcloudrc.v3 removed and set as default in queens and later
        if os_version == 'pike':
            deploy_vars['overcloudrc_files'] = [
                'overcloudrc', 'overcloudrc.v3'
            ]
        else:
            deploy_vars['overcloudrc_files'] = ['overcloudrc']

        post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'post_deploy_undercloud.yml')
        logging.info("Executing post deploy configuration undercloud "
                     "playbook")
        try:
            utils.run_ansible(deploy_vars,
                              post_undercloud,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Post Deploy Undercloud Configuration Complete")
        except Exception:
            logging.error("Post Deploy Undercloud Configuration failed.  "
                          "Please check log")
            raise

        # Deploy kubernetes if enabled
        # (TODO)zshi move handling of kubernetes deployment
        # to its own deployment class
        if deploy_vars['vim'] == 'k8s':
            # clone kubespray repo
            git.Repo.clone_from(constants.KUBESPRAY_URL,
                                os.path.join(APEX_TEMP_DIR, 'kubespray'))
            shutil.copytree(
                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
                             'sample'),
                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory', 'apex'))
            k8s_node_inventory = {
                'all': {
                    'hosts': {},
                    'children': {
                        'k8s-cluster': {
                            'children': {
                                'kube-master': {
                                    'hosts': {}
                                },
                                'kube-node': {
                                    'hosts': {}
                                }
                            }
                        },
                        'etcd': {
                            'hosts': {}
                        }
                    }
                }
            }
            for node, ip in deploy_vars['overcloud_nodes'].items():
                k8s_node_inventory['all']['hosts'][node] = {
                    'ansible_become': True,
                    'ansible_ssh_host': ip,
                    'ansible_become_user': '******',
                    'ip': ip
                }
                if 'controller' in node:
                    k8s_node_inventory['all']['children']['k8s-cluster'][
                        'children']['kube-master']['hosts'][node] = None
                    k8s_node_inventory['all']['children']['etcd']['hosts'][
                        node] = None
                elif 'compute' in node:
                    k8s_node_inventory['all']['children']['k8s-cluster'][
                        'children']['kube-node']['hosts'][node] = None

            kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
            with open(
                    os.path.join(kubespray_dir, 'inventory', 'apex',
                                 'apex.yaml'), 'w') as invfile:
                yaml.dump(k8s_node_inventory,
                          invfile,
                          default_flow_style=False)
            k8s_deploy_vars = {}
            # Add kubespray ansible control variables in k8s_deploy_vars,
            # example: 'kube_network_plugin': 'flannel'
            k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
            k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
                                               'apex', 'apex.yaml')

            k8s_remove_pkgs = os.path.join(args.lib_dir,
                                           constants.ANSIBLE_PATH,
                                           'k8s_remove_pkgs.yml')
            try:
                logging.debug("Removing any existing overcloud docker "
                              "packages")
                utils.run_ansible(k8s_deploy_vars,
                                  k8s_remove_pkgs,
                                  host=k8s_deploy_inv_file,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("k8s Deploy Remove Existing Docker Related "
                             "Packages Complete")
            except Exception:
                logging.error("k8s Deploy Remove Existing Docker Related "
                              "Packages failed. Please check log")
                raise

            try:
                utils.run_ansible(k8s_deploy_vars,
                                  k8s_deploy,
                                  host=k8s_deploy_inv_file,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("k8s Deploy Overcloud Configuration Complete")
            except Exception:
                logging.error("k8s Deploy Overcloud Configuration failed."
                              "Please check log")
                raise

        # Post deploy overcloud node configuration
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['sfc'] = ds_opts['sfc']
        deploy_vars['vpn'] = ds_opts['vpn']
        deploy_vars['l2gw'] = ds_opts.get('l2gw')
        deploy_vars['sriov'] = ds_opts.get('sriov')
        deploy_vars['tacker'] = ds_opts.get('tacker')
        deploy_vars['all_in_one'] = all_in_one
        # TODO(trozet): pull all logs and store in tmp dir in overcloud
        # playbook
        post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                      'post_deploy_overcloud.yml')
        # Run per overcloud node
        for node, ip in deploy_vars['overcloud_nodes'].items():
            logging.info("Executing Post deploy overcloud playbook on "
                         "node {}".format(node))
            try:
                utils.run_ansible(deploy_vars,
                                  post_overcloud,
                                  host=ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Post Deploy Overcloud Configuration Complete "
                             "for node {}".format(node))
            except Exception:
                logging.error("Post Deploy Overcloud Configuration failed "
                              "for node {}. Please check log".format(node))
                raise
        logging.info("Apex deployment complete")
        logging.info("Undercloud IP: {}, please connect by doing "
                     "'opnfv-util undercloud'".format(undercloud.ip))
示例#5
0
def main():
    parser = create_deploy_parser()
    args = parser.parse_args(sys.argv[1:])
    # FIXME (trozet): this is only needed as a workaround for CI.  Remove
    # when CI is changed
    if os.getenv('IMAGES', False):
        args.image_dir = os.getenv('IMAGES')
    if args.debug:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
    formatter = '%(asctime)s %(levelname)s: %(message)s'
    logging.basicConfig(filename=args.log_file,
                        format=formatter,
                        datefmt='%m/%d/%Y %I:%M:%S %p',
                        level=log_level)
    console = logging.StreamHandler()
    console.setLevel(log_level)
    console.setFormatter(logging.Formatter(formatter))
    logging.getLogger('').addHandler(console)
    utils.install_ansible()
    validate_deploy_args(args)
    # Parse all settings
    deploy_settings = DeploySettings(args.deploy_settings_file)
    logging.info("Deploy settings are:\n {}".format(
        pprint.pformat(deploy_settings)))
    net_settings = NetworkSettings(args.network_settings_file)
    logging.info("Network settings are:\n {}".format(
        pprint.pformat(net_settings)))
    os_version = deploy_settings['deploy_options']['os_version']
    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
    net_env = NetworkEnvironment(net_settings,
                                 net_env_file,
                                 os_version=os_version)
    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
    utils.dump_yaml(dict(net_env), net_env_target)

    # get global deploy params
    ha_enabled = deploy_settings['global_params']['ha_enabled']
    introspect = deploy_settings['global_params'].get('introspect', True)

    if args.virtual:
        if args.virt_compute_ram is None:
            compute_ram = args.virt_default_ram
        else:
            compute_ram = args.virt_compute_ram
        if deploy_settings['deploy_options']['sdn_controller'] == \
                'opendaylight' and args.virt_default_ram < 12:
            control_ram = 12
            logging.warning('RAM per controller is too low.  OpenDaylight '
                            'requires at least 12GB per controller.')
            logging.info('Increasing RAM per controller to 12GB')
        elif args.virt_default_ram < 10:
            control_ram = 10
            logging.warning('RAM per controller is too low.  nosdn '
                            'requires at least 10GB per controller.')
            logging.info('Increasing RAM per controller to 10GB')
        else:
            control_ram = args.virt_default_ram
        if ha_enabled and args.virt_compute_nodes < 2:
            logging.debug('HA enabled, bumping number of compute nodes to 2')
            args.virt_compute_nodes = 2
        virt_utils.generate_inventory(args.inventory_file,
                                      ha_enabled,
                                      num_computes=args.virt_compute_nodes,
                                      controller_ram=control_ram * 1024,
                                      compute_ram=compute_ram * 1024,
                                      vcpus=args.virt_cpus)
    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)

    validate_cross_settings(deploy_settings, net_settings, inventory)
    ds_opts = deploy_settings['deploy_options']
    if args.quickstart:
        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
                                            'apex_deploy_settings.yaml')
        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
                        deploy_settings_file)
        logging.info("File created: {}".format(deploy_settings_file))
        network_settings_file = os.path.join(APEX_TEMP_DIR,
                                             'apex_network_settings.yaml')
        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
                        network_settings_file)
        logging.info("File created: {}".format(network_settings_file))
        deploy_quickstart(args, deploy_settings_file, network_settings_file,
                          args.inventory_file)
    else:
        # TODO (trozet): add logic back from:
        # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
        ansible_args = {
            'virsh_enabled_networks': net_settings.enabled_network_list
        }
        utils.run_ansible(
            ansible_args,
            os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                         'deploy_dependencies.yml'))
        uc_external = False
        if 'external' in net_settings.enabled_network_list:
            uc_external = True
        if args.virtual:
            # create all overcloud VMs
            build_vms(inventory, net_settings, args.deploy_dir)
        else:
            # Attach interfaces to jumphost for baremetal deployment
            jump_networks = ['admin']
            if uc_external:
                jump_networks.append('external')
            for network in jump_networks:
                if network == 'external':
                    # TODO(trozet): enable vlan secondary external networks
                    iface = net_settings['networks'][network][0][
                        'installer_vm']['members'][0]
                else:
                    iface = net_settings['networks'][network]['installer_vm'][
                        'members'][0]
                bridge = "br-{}".format(network)
                jumphost.attach_interface_to_ovs(bridge, iface, network)
        instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
        with open(instackenv_json, 'w') as fh:
            json.dump(inventory, fh)

        # Create and configure undercloud
        if args.debug:
            root_pw = constants.DEBUG_OVERCLOUD_PW
        else:
            root_pw = None

        upstream = (os_version != constants.DEFAULT_OS_VERSION
                    or args.upstream)
        if os_version == 'master':
            branch = 'master'
        else:
            branch = "stable/{}".format(os_version)
        if upstream:
            logging.info("Deploying with upstream artifacts for OpenStack "
                         "{}".format(os_version))
            args.image_dir = os.path.join(args.image_dir, os_version)
            upstream_url = constants.UPSTREAM_RDO.replace(
                constants.DEFAULT_OS_VERSION, os_version)
            upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
            utils.fetch_upstream_and_unpack(args.image_dir,
                                            upstream_url,
                                            upstream_targets,
                                            fetch=not args.no_fetch)
            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
            # copy undercloud so we don't taint upstream fetch
            uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
            uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
            shutil.copyfile(uc_fetch_img, uc_image)
            # prep undercloud with required packages
            uc_builder.add_upstream_packages(uc_image)
            # add patches from upstream to undercloud and overcloud
            logging.info('Adding patches to undercloud')
            patches = deploy_settings['global_params']['patches']
            c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                           APEX_TEMP_DIR, branch)
        else:
            sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
            uc_image = 'undercloud.qcow2'
            # patches are ignored in non-upstream deployments
            patches = {'overcloud': [], 'undercloud': []}
        # Create/Start Undercloud VM
        undercloud = uc_lib.Undercloud(args.image_dir,
                                       args.deploy_dir,
                                       root_pw=root_pw,
                                       external_network=uc_external,
                                       image_name=os.path.basename(uc_image),
                                       os_version=os_version)
        undercloud.start()
        undercloud_admin_ip = net_settings['networks'][
            constants.ADMIN_NETWORK]['installer_vm']['ip']

        if upstream and ds_opts['containers']:
            tag = constants.DOCKER_TAG
        else:
            tag = None

        # Generate nic templates
        for role in 'compute', 'controller':
            oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                       args.deploy_dir, APEX_TEMP_DIR)
        # Install Undercloud
        undercloud.configure(net_settings,
                             deploy_settings,
                             os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                          'configure_undercloud.yml'),
                             APEX_TEMP_DIR,
                             virtual_oc=args.virtual)

        # Prepare overcloud-full.qcow2
        logging.info("Preparing Overcloud for deployment...")
        if os_version != 'ocata':
            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
            net_data = network_data.create_network_data(
                net_settings, net_data_file)
        else:
            net_data = False
        if upstream and args.env_file == 'opnfv-environment.yaml':
            # Override the env_file if it is defaulted to opnfv
            # opnfv env file will not work with upstream
            args.env_file = 'upstream-environment.yaml'
        opnfv_env = os.path.join(args.deploy_dir, args.env_file)
        if not upstream:
            # TODO(trozet): Invoke with containers after Fraser migration
            oc_deploy.prep_env(deploy_settings, net_settings, inventory,
                               opnfv_env, net_env_target, APEX_TEMP_DIR)
        else:
            shutil.copyfile(
                opnfv_env,
                os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env)))
        patched_containers = oc_deploy.prep_image(deploy_settings,
                                                  net_settings,
                                                  sdn_image,
                                                  APEX_TEMP_DIR,
                                                  root_pw=root_pw,
                                                  docker_tag=tag,
                                                  patches=patches['overcloud'],
                                                  upstream=upstream)

        oc_deploy.create_deploy_cmd(deploy_settings,
                                    net_settings,
                                    inventory,
                                    APEX_TEMP_DIR,
                                    args.virtual,
                                    os.path.basename(opnfv_env),
                                    net_data=net_data)
        # Prepare undercloud with containers
        docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'prepare_overcloud_containers.yml')
        if ds_opts['containers']:
            ceph_version = constants.CEPH_VERSION_MAP[ds_opts['os_version']]
            ceph_docker_image = "ceph/daemon:tag-build-master-" \
                                "{}-centos-7".format(ceph_version)
            logging.info("Preparing Undercloud with Docker containers")
            if patched_containers:
                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
            container_vars = dict()
            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
            container_vars['patched_docker_services'] = list(
                patched_containers)
            container_vars['container_tag'] = constants.DOCKER_TAG
            container_vars['stackrc'] = 'source /home/stack/stackrc'
            container_vars['upstream'] = upstream
            container_vars['sdn'] = ds_opts['sdn_controller']
            container_vars['undercloud_ip'] = undercloud_admin_ip
            container_vars['os_version'] = os_version
            container_vars['ceph_docker_image'] = ceph_docker_image
            container_vars['sdn_env_file'] = \
                oc_deploy.get_docker_sdn_file(ds_opts)
            try:
                utils.run_ansible(container_vars,
                                  docker_playbook,
                                  host=undercloud.ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Container preparation complete")
            except Exception:
                logging.error("Unable to complete container prep on "
                              "Undercloud")
                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
                raise

        deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'deploy_overcloud.yml')
        virt_env = 'virtual-environment.yaml'
        bm_env = 'baremetal-environment.yaml'
        for p_env in virt_env, bm_env:
            shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                            os.path.join(APEX_TEMP_DIR, p_env))

        # Start Overcloud Deployment
        logging.info("Executing Overcloud Deployment...")
        deploy_vars = dict()
        deploy_vars['virtual'] = args.virtual
        deploy_vars['debug'] = args.debug
        deploy_vars['aarch64'] = platform.machine() == 'aarch64'
        deploy_vars['introspect'] = not (args.virtual or deploy_vars['aarch64']
                                         or not introspect)
        deploy_vars['dns_server_args'] = ''
        deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
        deploy_vars['stackrc'] = 'source /home/stack/stackrc'
        deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
        deploy_vars['upstream'] = upstream
        deploy_vars['os_version'] = os_version
        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
        for dns_server in net_settings['dns_servers']:
            deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                dns_server)
        try:
            utils.run_ansible(deploy_vars,
                              deploy_playbook,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Overcloud deployment complete")
        except Exception:
            logging.error("Deployment Failed.  Please check log")
            raise
        finally:
            os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))

        # Post install
        logging.info("Executing post deploy configuration")
        jumphost.configure_bridges(net_settings)
        nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
        deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
        deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
                                     'GlobalKnownHostsFile=/dev/null -o ' \
                                     'UserKnownHostsFile=/dev/null -o ' \
                                     'LogLevel=error'
        deploy_vars['external_network_cmds'] = \
            oc_deploy.external_network_cmds(net_settings, deploy_settings)
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['gluon'] = ds_opts['gluon']
        deploy_vars['sdn'] = ds_opts['sdn_controller']
        for dep_option in 'yardstick', 'dovetail', 'vsperf':
            if dep_option in ds_opts:
                deploy_vars[dep_option] = ds_opts[dep_option]
            else:
                deploy_vars[dep_option] = False
        deploy_vars['dataplane'] = ds_opts['dataplane']
        overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
        if ds_opts['congress']:
            deploy_vars['congress_datasources'] = \
                oc_deploy.create_congress_cmds(overcloudrc)
            deploy_vars['congress'] = True
        else:
            deploy_vars['congress'] = False
        deploy_vars['calipso'] = ds_opts.get('calipso', False)
        deploy_vars['calipso_ip'] = undercloud_admin_ip
        # overcloudrc.v3 removed and set as default in queens and later
        if os_version == 'pike':
            deploy_vars['overcloudrc_files'] = [
                'overcloudrc', 'overcloudrc.v3'
            ]
        else:
            deploy_vars['overcloudrc_files'] = ['overcloudrc']

        post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'post_deploy_undercloud.yml')
        logging.info("Executing post deploy configuration undercloud playbook")
        try:
            utils.run_ansible(deploy_vars,
                              post_undercloud,
                              host=undercloud.ip,
                              user='******',
                              tmp_dir=APEX_TEMP_DIR)
            logging.info("Post Deploy Undercloud Configuration Complete")
        except Exception:
            logging.error("Post Deploy Undercloud Configuration failed.  "
                          "Please check log")
            raise
        # Post deploy overcloud node configuration
        # TODO(trozet): just parse all ds_opts as deploy vars one time
        deploy_vars['sfc'] = ds_opts['sfc']
        deploy_vars['vpn'] = ds_opts['vpn']
        deploy_vars['l2gw'] = ds_opts.get('l2gw')
        deploy_vars['sriov'] = ds_opts.get('sriov')
        # TODO(trozet): pull all logs and store in tmp dir in overcloud
        # playbook
        post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                      'post_deploy_overcloud.yml')
        # Run per overcloud node
        for node, ip in deploy_vars['overcloud_nodes'].items():
            logging.info("Executing Post deploy overcloud playbook on "
                         "node {}".format(node))
            try:
                utils.run_ansible(deploy_vars,
                                  post_overcloud,
                                  host=ip,
                                  user='******',
                                  tmp_dir=APEX_TEMP_DIR)
                logging.info("Post Deploy Overcloud Configuration Complete "
                             "for node {}".format(node))
            except Exception:
                logging.error("Post Deploy Overcloud Configuration failed "
                              "for node {}. Please check log".format(node))
                raise
        logging.info("Apex deployment complete")
        logging.info("Undercloud IP: {}, please connect by doing "
                     "'opnfv-util undercloud'".format(undercloud.ip))