Ejemplo n.º 1
0
    def determine_patches(self):
        patches = self.ds_globals['patches']
        if not os.path.isfile(self.p_file):
            new_file = os.path.join(os.path.dirname(self.ds_file),
                                    'common-patches.yaml')
            if os.path.isfile(new_file):
                logging.warning('Patch file {} not found, falling back to '
                                '{}'.format(self.p_file, new_file))
                self.p_file = new_file
            else:
                logging.error('Unable to find common patch file: '
                              '{}'.format(self.p_file))
                raise ApexDeployException(
                    'Specified common patch file not found: {}'.format(
                        self.p_file))
        logging.info('Loading patches from common patch file {}'.format(
            self.p_file))
        common_patches = utils.parse_yaml(self.p_file)
        logging.debug('Content from common patch file is: {}'.format(
            pprint.pformat(common_patches)))
        os_version = self.ds['deploy_options']['os_version']
        try:
            common_patches = common_patches['patches'][os_version]
        except KeyError:
            logging.error('Error parsing common patches file, wrong format.')
            raise ApexDeployException('Invalid format of common patch file')

        for ptype in ('undercloud', 'overcloud'):
            if ptype in common_patches:
                patches[ptype] = utils.unique(patches[ptype] +
                                              common_patches[ptype])
        return patches
Ejemplo n.º 2
0
def validate_deploy_args(args):
    """
    Validates arguments for deploy
    :param args:
    :return: None
    """

    logging.debug('Validating arguments for deployment')
    if args.snapshot:
        logging.debug('Skipping inventory validation as it is not applicable'
                      'to snapshot deployments')
    elif args.virtual and args.inventory_file is not None:
        logging.error("Virtual enabled but inventory file also given")
        raise ApexDeployException('You should not specify an inventory file '
                                  'with virtual deployments')
    elif args.virtual:
        args.inventory_file = os.path.join(APEX_TEMP_DIR,
                                           'inventory-virt.yaml')
    elif not os.path.isfile(args.inventory_file):
        logging.error("Specified inventory file does not exist: {}".format(
            args.inventory_file))
        raise ApexDeployException('Specified inventory file does not exist')

    for settings_file in (args.deploy_settings_file,
                          args.network_settings_file):
        if settings_file == args.network_settings_file and args.snapshot:
            continue
        if os.path.isfile(settings_file) is False:
            logging.error("Specified settings file does not "
                          "exist: {}".format(settings_file))
            raise ApexDeployException('Specified settings file does not '
                                      'exist: {}'.format(settings_file))
Ejemplo n.º 3
0
def parse_nova_output(in_file):
    """
    Parses nova list output into a dictionary format for node name and ip
    :param in_file: json format from openstack server list
    :return: dictionary format for {"node name": "node ip"}
    """
    if not os.path.isfile(in_file):
        raise FileNotFoundError(in_file)
    node_dict = dict()
    with open(in_file, 'r') as fh:
        nova_list = json.load(fh)

    for server in nova_list:
        ip_match = re.search('([0-9]+\.){3}[0-9]+', server['Networks'])
        if ip_match is None:
            logging.error("Unable to find IP in nova output "
                          "{}".format(pprint.pformat(server, indent=4)))
            raise ApexDeployException("Unable to parse IP from nova output")
        else:
            node_dict[server['Name']] = ip_match.group(0)

    if not node_dict:
        raise ApexDeployException(
            "No overcloud nodes found in: {}".format(in_file))
    return node_dict
Ejemplo n.º 4
0
def prep_storage_env(ds, tmp_dir):
    """
    Creates storage environment file for deployment.  Source file is copied by
    undercloud playbook to host.
    :param ds:
    :param tmp_dir:
    :return:
    """
    ds_opts = ds['deploy_options']
    storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
    if not os.path.isfile(storage_file):
        logging.error("storage-environment file is not in tmp directory: {}. "
                      "Check if file was copied from "
                      "undercloud".format(tmp_dir))
        raise ApexDeployException("storage-environment file not copied from "
                                  "undercloud")
    for line in fileinput.input(storage_file, inplace=True):
        line = line.strip('\n')
        if 'CephClusterFSID' in line:
            print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
        elif 'CephMonKey' in line:
            print("  CephMonKey: {}".format(
                generate_ceph_key().decode('utf-8')))
        elif 'CephAdminKey' in line:
            print("  CephAdminKey: {}".format(
                generate_ceph_key().decode('utf-8')))
        else:
            print(line)
    if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
        with open(storage_file, 'a') as fh:
            fh.write('  ExtraConfig:\n')
            fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
                ds_opts['ceph_device']))
Ejemplo n.º 5
0
def create_nic_template(network_settings, deploy_settings, role, template_dir,
                        target_dir):
    """
    Creates NIC heat template files
    :param ns: Network settings
    :param ds: Deploy Settings
    :param role: controller or compute
    :param template_dir: directory where base templates are stored
    :param target_dir: to store rendered nic template
    :return:
    """
    # TODO(trozet): rather than use Jinja2 to build these files, use with py
    if role not in ['controller', 'compute']:
        raise ApexDeployException("Invalid type for overcloud node: {"
                                  "}".format(type))
    logging.info("Creating template for {}".format(role))
    template_file = 'nics-template.yaml.jinja2'
    nets = network_settings.get('networks')
    env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
    template = env.get_template(template_file)
    ds = deploy_settings.get('deploy_options')
    ext_net = 'br-ex'
    ovs_dpdk_br = ''
    if ds['dataplane'] == 'fdio':
        nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
        if ds['sdn_controller'] == 'opendaylight':
            if role == 'compute':
                nets['external'][0]['nic_mapping'][role]['phys_type'] = \
                    'vpp_interface'
                ext_net = 'vpp_interface'
            if ds.get('dvr') is True:
                nets['admin']['nic_mapping'][role]['phys_type'] = \
                    'linux_bridge'
    elif ds['dataplane'] == 'ovs_dpdk':
        ovs_dpdk_br = 'br-phy'
    if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
            .get('uio-driver')):
        nets['tenant']['nic_mapping'][role]['uio-driver'] =\
            ds['performance'][role.title()]['vpp']['uio-driver']
        if ds['sdn_controller'] == 'opendaylight' and role == 'compute':
            nets['external'][0]['nic_mapping'][role]['uio-driver'] =\
                ds['performance'][role.title()]['vpp']['uio-driver']
    if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
            .get('interface-options')):
        nets['tenant']['nic_mapping'][role]['interface-options'] =\
            ds['performance'][role.title()]['vpp']['interface-options']

    template_output = template.render(
        nets=nets,
        role=role,
        external_net_af=network_settings.get_ip_addr_family(),
        external_net_type=ext_net,
        ovs_dpdk_bridge=ovs_dpdk_br)

    logging.debug("Template output: {}".format(template_output))
    target = os.path.join(target_dir, "{}.yaml".format(role))
    with open(target, "w") as f:
        f.write(template_output)
    logging.info("Wrote template {}".format(target))
Ejemplo n.º 6
0
def create_deploy_cmd(ds,
                      ns,
                      inv,
                      tmp_dir,
                      virtual,
                      env_file='opnfv-environment.yaml',
                      net_data=False):

    logging.info("Creating deployment command")
    deploy_options = ['network-environment.yaml']

    if env_file:
        deploy_options.append(env_file)
    ds_opts = ds['deploy_options']
    deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)

    for k, v in OTHER_FILE_MAP.items():
        if k in ds_opts and ds_opts[k]:
            deploy_options.append(os.path.join(con.THT_ENV_DIR, v))

    if ds_opts['ceph']:
        prep_storage_env(ds, tmp_dir)
        deploy_options.append(
            os.path.join(con.THT_ENV_DIR, 'storage-environment.yaml'))
    if ds['global_params']['ha_enabled']:
        deploy_options.append(
            os.path.join(con.THT_ENV_DIR, 'puppet-pacemaker.yaml'))

    if virtual:
        deploy_options.append('virtual-environment.yaml')
    else:
        deploy_options.append('baremetal-environment.yaml')

    num_control, num_compute = inv.get_node_counts()
    if num_control == 0 or num_compute == 0:
        logging.error("Detected 0 control or compute nodes.  Control nodes: "
                      "{}, compute nodes{}".format(num_control, num_compute))
        raise ApexDeployException("Invalid number of control or computes")
    elif num_control > 1 and not ds['global_params']['ha_enabled']:
        num_control = 1
    cmd = "openstack overcloud deploy --templates --timeout {} " \
          "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
    # build cmd env args
    for option in deploy_options:
        cmd += " -e {}".format(option)
    cmd += " --ntp-server {}".format(ns['ntp'][0])
    cmd += " --control-scale {}".format(num_control)
    cmd += " --compute-scale {}".format(num_compute)
    cmd += ' --control-flavor control --compute-flavor compute'
    if net_data:
        cmd += ' --networks-file network_data.yaml'
    logging.info("Deploy command set: {}".format(cmd))

    with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
        fh.write(cmd)
    return cmd
Ejemplo n.º 7
0
def validate_cross_settings(deploy_settings, net_settings, inventory):
    """
    Used to validate compatibility across settings file.
    :param deploy_settings: parsed settings for deployment
    :param net_settings: parsed settings for network
    :param inventory: parsed inventory file
    :return: None
    """

    if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
            not in net_settings.enabled_network_list:
        raise ApexDeployException("Setting a DPDK based dataplane requires"
                                  "a dedicated NIC for tenant network")

    if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
        if deploy_settings['deploy_options']['dataplane'] != 'fdio':
            raise ApexDeployException("odl_vpp_routing_node should only be set"
                                      "when dataplane is set to fdio")
        if deploy_settings['deploy_options'].get('dvr') is True:
            raise ApexDeployException("odl_vpp_routing_node should only be set"
                                      "when dvr is not enabled")
Ejemplo n.º 8
0
Archivo: deploy.py Proyecto: radez/apex
def prep_sriov_env(ds, tmp_dir):
    """
    Creates SRIOV environment file for deployment. Source file is copied by
    undercloud playbook to host.
    :param ds:
    :param tmp_dir:
    :return:
    """
    ds_opts = ds['deploy_options']
    sriov_iface = ds_opts['sriov']
    sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
    if not os.path.isfile(sriov_file):
        logging.error("sriov-environment file is not in tmp directory: {}. "
                      "Check if file was copied from "
                      "undercloud".format(tmp_dir))
        raise ApexDeployException("sriov-environment file not copied from "
                                  "undercloud")
    # TODO(rnoriega): Instead of line editing, refactor this code to load
    # yaml file into a dict, edit it and write the file back.
    for line in fileinput.input(sriov_file, inplace=True):
        line = line.strip('\n')
        if 'NovaSchedulerDefaultFilters' in line:
            print("  {}".format(line[3:]))
        elif 'NovaSchedulerAvailableFilters' in line:
            print("  {}".format(line[3:]))
        elif 'NeutronPhysicalDevMappings' in line:
            print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\"".format(
                sriov_iface))
        elif 'NeutronSriovNumVFs' in line:
            print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
        elif 'NovaPCIPassthrough' in line:
            print("  NovaPCIPassthrough:")
        elif 'devname' in line:
            print("    - devname: \"{}\"".format(sriov_iface))
        elif 'physical_network' in line:
            print("      physical_network: \"nfv_sriov\"")
        else:
            print(line)
Ejemplo n.º 9
0
def prep_image(ds, img, tmp_dir, root_pw=None):
    """
    Locates sdn image and preps for deployment.
    :param ds: deploy settings
    :param img: sdn image
    :param tmp_dir: dir to store modified sdn image
    :param root_pw: password to configure for overcloud image
    :return: None
    """
    # TODO(trozet): Come up with a better way to organize this logic in this
    # function
    logging.info("Preparing image: {} for deployment".format(img))
    if not os.path.isfile(img):
        logging.error("Missing SDN image {}".format(img))
        raise ApexDeployException("Missing SDN image file: {}".format(img))

    ds_opts = ds['deploy_options']
    virt_cmds = list()
    sdn = ds_opts['sdn_controller']
    # we need this due to rhbz #1436021
    # fixed in systemd-219-37.el7
    if sdn is not False:
        logging.info("Neutron openvswitch-agent disabled")
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "rm -f /etc/systemd/system/multi-user.target.wants/"
            "neutron-openvswitch-agent.service"
        }, {
            con.VIRT_RUN_CMD:
            "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
            ".service"
        }])

    if ds_opts['vpn']:
        virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
            "/opt/quagga/etc/init.d/zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "chmod +x /opt/quagga/etc/init.d/"
            "zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.local "
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"
        })
        logging.info("ZRPCD process started")

    dataplane = ds_opts['dataplane']
    if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
        logging.info("Enabling kernel modules for dpdk")
        # file to module mapping
        uio_types = {
            os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
            os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
        }
        for mod_file, mod in uio_types.items():
            with open(mod_file, 'w') as fh:
                fh.write('#!/bin/bash\n')
                fh.write('exec /sbin/modprobe {}'.format(mod))
                fh.close()

            virt_cmds.extend([{
                con.VIRT_UPLOAD:
                "{}:/etc/sysconfig/modules/".format(mod_file)
            }, {
                con.VIRT_RUN_CMD:
                "chmod 0755 /etc/sysconfig/modules/"
                "{}".format(os.path.basename(mod_file))
            }])
    if root_pw:
        pw_op = "password:{}".format(root_pw)
        virt_cmds.append({con.VIRT_PW: pw_op})
    if ds_opts['sfc'] and dataplane == 'ovs':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "yum -y install "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_KMOD_RPM)
        }, {
            con.VIRT_RUN_CMD:
            "yum downgrade -y "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_RPM)
        }])
    if dataplane == 'fdio':
        # Patch neutron with using OVS external interface for router
        # and add generic linux NS interface driver
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "cd /usr/lib/python2.7/site-packages && patch "
            "-p1 < neutron-patch-NSDriver.patch"
        })
        if sdn is False:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum remove -y vpp-lib"
            }, {
                con.VIRT_RUN_CMD:
                "yum install -y "
                "/root/nosdn_vpp_rpms/*.rpm"
            }])

    if sdn == 'opendaylight':
        if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "rm -rf /etc/puppet/modules/opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "cd /etc/puppet/modules && tar xzf "
                "/root/puppet-opendaylight-"
                "{}.tar.gz".format(ds_opts['odl_version'])
            }])
            if ds_opts['odl_version'] == 'master':
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "rpm -ivh --nodeps /root/{}/*".format(
                        ds_opts['odl_version'])
                }])
            else:
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "yum -y install /root/{}/*".format(ds_opts['odl_version'])
                }])

        elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
                and ds_opts['odl_vpp_netvirt']:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "yum -y install /root/{}/*".format(ODL_NETVIRT_VPP_RPM)
            }])

    if sdn == 'ovn':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum update -y "
            "*openvswitch*"
        }, {
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum downgrade -y "
            "*openvswitch*"
        }])

    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
    shutil.copyfile(img, tmp_oc_image)
    logging.debug(
        "Temporary overcloud image stored as: {}".format(tmp_oc_image))
    virt_utils.virt_customize(virt_cmds, tmp_oc_image)
    logging.info("Overcloud image customization complete")
Ejemplo n.º 10
0
Archivo: deploy.py Proyecto: radez/apex
def prep_storage_env(ds, ns, virtual, tmp_dir):
    """
    Creates storage environment file for deployment.  Source file is copied by
    undercloud playbook to host.
    :param ds:
    :param ns:
    :param virtual:
    :param tmp_dir:
    :return:
    """
    ds_opts = ds['deploy_options']
    storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
    if not os.path.isfile(storage_file):
        logging.error("storage-environment file is not in tmp directory: {}. "
                      "Check if file was copied from "
                      "undercloud".format(tmp_dir))
        raise ApexDeployException("storage-environment file not copied from "
                                  "undercloud")
    for line in fileinput.input(storage_file, inplace=True):
        line = line.strip('\n')
        if 'CephClusterFSID' in line:
            print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
        elif 'CephMonKey' in line:
            print("  CephMonKey: {}".format(
                generate_ceph_key().decode('utf-8')))
        elif 'CephAdminKey' in line:
            print("  CephAdminKey: {}".format(
                generate_ceph_key().decode('utf-8')))
        elif 'CephClientKey' in line:
            print("  CephClientKey: {}".format(
                generate_ceph_key().decode('utf-8')))
        else:
            print(line)

    if ds_opts['containers']:
        undercloud_admin_ip = ns['networks'][
            con.ADMIN_NETWORK]['installer_vm']['ip']
        ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
        docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
                       "{}-centos-7".format(undercloud_admin_ip,
                                            ceph_version)
        ceph_params = {
            'DockerCephDaemonImage': docker_image,
        }
        if not ds['global_params']['ha_enabled']:
            ceph_params['CephPoolDefaultSize'] = 1

        if virtual:
            ceph_params['CephAnsibleExtraConfig'] = {
                'centos_package_dependencies': [],
                'ceph_osd_docker_memory_limit': '1g',
                'ceph_mds_docker_memory_limit': '1g',
            }
            ceph_params['CephPoolDefaultPgNum'] = 32
        if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
            ceph_device = ds_opts['ceph_device']
        else:
            # TODO(trozet): make this DS default after Fraser
            ceph_device = '/dev/loop3'

        ceph_params['CephAnsibleDisksConfig'] = {
            'devices': [ceph_device],
            'journal_size': 512,
            'osd_scenario': 'collocated'
        }
        utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
    elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
        with open(storage_file, 'a') as fh:
            fh.write('  ExtraConfig:\n')
            fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
                ds_opts['ceph_device']))
Ejemplo n.º 11
0
Archivo: deploy.py Proyecto: radez/apex
def prep_image(ds,
               ns,
               img,
               tmp_dir,
               root_pw=None,
               docker_tag=None,
               patches=None,
               upstream=False):
    """
    Locates sdn image and preps for deployment.
    :param ds: deploy settings
    :param ns: network settings
    :param img: sdn image
    :param tmp_dir: dir to store modified sdn image
    :param root_pw: password to configure for overcloud image
    :param docker_tag: Docker image tag for RDO version (default None)
    :param patches: List of patches to apply to overcloud image
    :param upstream: (boolean) Indicates if upstream deployment or not
    :return: None
    """
    # TODO(trozet): Come up with a better way to organize this logic in this
    # function
    logging.info("Preparing image: {} for deployment".format(img))
    if not os.path.isfile(img):
        logging.error("Missing SDN image {}".format(img))
        raise ApexDeployException("Missing SDN image file: {}".format(img))

    ds_opts = ds['deploy_options']
    virt_cmds = list()
    sdn = ds_opts['sdn_controller']
    patched_containers = set()
    # we need this due to rhbz #1436021
    # fixed in systemd-219-37.el7
    if sdn is not False:
        logging.info("Neutron openvswitch-agent disabled")
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "rm -f /etc/systemd/system/multi-user.target.wants/"
            "neutron-openvswitch-agent.service"
        }, {
            con.VIRT_RUN_CMD:
            "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
            ".service"
        }])

    if ns.get('http_proxy', ''):
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'http_proxy={}' >> /etc/environment".format(ns['http_proxy'])
        })

    if ns.get('https_proxy', ''):
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'https_proxy={}' >> /etc/environment".format(
                ns['https_proxy'])
        })

    if ds_opts['vpn']:
        virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
            "/opt/quagga/etc/init.d/zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "chmod +x /opt/quagga/etc/init.d/"
            "zrpcd_start.sh"
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.local "
        })
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "sed -i '$a /opt/quagga/etc/"
            "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"
        })
        logging.info("ZRPCD process started")

    dataplane = ds_opts['dataplane']
    if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
        logging.info("Enabling kernel modules for dpdk")
        # file to module mapping
        uio_types = {
            os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
            os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
        }
        for mod_file, mod in uio_types.items():
            with open(mod_file, 'w') as fh:
                fh.write('#!/bin/bash\n')
                fh.write('exec /sbin/modprobe {}'.format(mod))
                fh.close()

            virt_cmds.extend([{
                con.VIRT_UPLOAD:
                "{}:/etc/sysconfig/modules/".format(mod_file)
            }, {
                con.VIRT_RUN_CMD:
                "chmod 0755 /etc/sysconfig/modules/"
                "{}".format(os.path.basename(mod_file))
            }])
    if root_pw:
        pw_op = "password:{}".format(root_pw)
        virt_cmds.append({con.VIRT_PW: pw_op})
    if ds_opts['sfc'] and dataplane == 'ovs':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "yum -y install "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_KMOD_RPM)
        }, {
            con.VIRT_RUN_CMD:
            "yum downgrade -y "
            "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
            "{}".format(OVS_NSH_RPM)
        }])
    if dataplane == 'fdio':
        # Patch neutron with using OVS external interface for router
        # and add generic linux NS interface driver
        virt_cmds.append({
            con.VIRT_RUN_CMD:
            "cd /usr/lib/python2.7/site-packages && patch "
            "-p1 < neutron-patch-NSDriver.patch"
        })
        if sdn is False:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum remove -y vpp-lib"
            }, {
                con.VIRT_RUN_CMD:
                "yum install -y "
                "/root/nosdn_vpp_rpms/*.rpm"
            }])

    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
    shutil.copyfile(img, tmp_oc_image)
    logging.debug(
        "Temporary overcloud image stored as: {}".format(tmp_oc_image))

    # TODO (trozet): remove this if block after Fraser
    if sdn == 'opendaylight' and not upstream:
        if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "rm -rf /etc/puppet/modules/opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "cd /etc/puppet/modules && tar xzf "
                "/root/puppet-opendaylight-"
                "{}.tar.gz".format(ds_opts['odl_version'])
            }])
            if ds_opts['odl_version'] == 'master':
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "rpm -ivh --nodeps /root/{}/*".format(
                        ds_opts['odl_version'])
                }])
            else:
                virt_cmds.extend([{
                    con.VIRT_RUN_CMD:
                    "yum -y install /root/{}/*".format(ds_opts['odl_version'])
                }])

        elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
                and ds_opts['odl_vpp_netvirt']:
            virt_cmds.extend([{
                con.VIRT_RUN_CMD: "yum -y remove opendaylight"
            }, {
                con.VIRT_RUN_CMD:
                "yum -y install /root/{}/*".format(ODL_NETVIRT_VPP_RPM)
            }])
    elif sdn == 'opendaylight':
        undercloud_admin_ip = ns['networks'][
            con.ADMIN_NETWORK]['installer_vm']['ip']
        oc_builder.inject_opendaylight(
            odl_version=ds_opts['odl_version'],
            image=tmp_oc_image,
            tmp_dir=tmp_dir,
            uc_ip=undercloud_admin_ip,
            os_version=ds_opts['os_version'],
            docker_tag=docker_tag,
        )
        if docker_tag:
            patched_containers = patched_containers.union({'opendaylight'})

    if sdn == 'ovn':
        virt_cmds.extend([{
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum update -y "
            "*openvswitch*"
        }, {
            con.VIRT_RUN_CMD:
            "cd /root/ovs28 && yum downgrade -y "
            "*openvswitch*"
        }])

    if patches:
        if ds_opts['os_version'] == 'master':
            branch = ds_opts['os_version']
        else:
            branch = "stable/{}".format(ds_opts['os_version'])
        logging.info('Adding patches to overcloud')
        patched_containers = patched_containers.union(
            c_builder.add_upstream_patches(patches,
                                           tmp_oc_image,
                                           tmp_dir,
                                           branch,
                                           uc_ip=undercloud_admin_ip,
                                           docker_tag=docker_tag))
    # if containers with ceph, and no ceph device we need to use a
    # persistent loop device for Ceph OSDs
    if docker_tag and not ds_opts.get('ceph_device', None):
        tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
        with open(tmp_losetup, 'w') as fh:
            fh.write(LOSETUP_SERVICE)
        virt_cmds.extend([
            {
                con.VIRT_UPLOAD:
                "{}:/usr/lib/systemd/system/".format(tmp_losetup)
            },
            {
                con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'
            },
            {
                con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'
            },
            {
                con.VIRT_RUN_CMD: 'systemctl daemon-reload'
            },
            {
                con.VIRT_RUN_CMD: 'systemctl enable losetup.service'
            },
        ])
    virt_utils.virt_customize(virt_cmds, tmp_oc_image)
    logging.info("Overcloud image customization complete")
    return patched_containers
Ejemplo n.º 12
0
Archivo: deploy.py Proyecto: radez/apex
def create_deploy_cmd(ds,
                      ns,
                      inv,
                      tmp_dir,
                      virtual,
                      env_file='opnfv-environment.yaml',
                      net_data=False):

    logging.info("Creating deployment command")
    deploy_options = ['network-environment.yaml']

    ds_opts = ds['deploy_options']

    if ds_opts['containers']:
        deploy_options.append(os.path.join(con.THT_ENV_DIR, 'docker.yaml'))

    if ds['global_params']['ha_enabled']:
        if ds_opts['containers']:
            deploy_options.append(
                os.path.join(con.THT_ENV_DIR, 'docker-ha.yaml'))
        else:
            deploy_options.append(
                os.path.join(con.THT_ENV_DIR, 'puppet-pacemaker.yaml'))

    if env_file:
        deploy_options.append(env_file)

    if ds_opts['containers']:
        deploy_options.append('docker-images.yaml')
        sdn_docker_file = get_docker_sdn_file(ds_opts)
        if sdn_docker_file:
            deploy_options.append(sdn_docker_file)
            deploy_options.append('sdn-images.yaml')
    else:
        deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)

    for k, v in OTHER_FILE_MAP.items():
        if k in ds_opts and ds_opts[k]:
            if ds_opts['containers']:
                deploy_options.append(
                    os.path.join(con.THT_DOCKER_ENV_DIR, "{}.yaml".format(k)))
            else:
                deploy_options.append(os.path.join(con.THT_ENV_DIR, v))

    if ds_opts['ceph']:
        prep_storage_env(ds, ns, virtual, tmp_dir)
        deploy_options.append(
            os.path.join(con.THT_ENV_DIR, 'storage-environment.yaml'))
    if ds_opts['sriov']:
        prep_sriov_env(ds, tmp_dir)

    if virtual:
        deploy_options.append('virtual-environment.yaml')
    else:
        deploy_options.append('baremetal-environment.yaml')

    num_control, num_compute = inv.get_node_counts()
    if num_control == 0 or num_compute == 0:
        logging.error("Detected 0 control or compute nodes.  Control nodes: "
                      "{}, compute nodes{}".format(num_control, num_compute))
        raise ApexDeployException("Invalid number of control or computes")
    elif num_control > 1 and not ds['global_params']['ha_enabled']:
        num_control = 1
    if platform.machine() == 'aarch64':
        # aarch64 deploys were not completing in the default 90 mins.
        # Not sure if this is related to the hardware the OOO support
        # was developed on or the virtualization support in CentOS
        # Either way it will probably get better over time  as the aarch
        # support matures in CentOS and deploy time should be tested in
        # the future so this multiplier can be removed.
        con.DEPLOY_TIMEOUT *= 2
    cmd = "openstack overcloud deploy --templates --timeout {} " \
          .format(con.DEPLOY_TIMEOUT)
    # build cmd env args
    for option in deploy_options:
        cmd += " -e {}".format(option)
    cmd += " --ntp-server {}".format(ns['ntp'][0])
    cmd += " --control-scale {}".format(num_control)
    cmd += " --compute-scale {}".format(num_compute)
    cmd += ' --control-flavor control --compute-flavor compute'
    if net_data:
        cmd += ' --networks-file network_data.yaml'
    libvirt_type = 'kvm'
    if virtual:
        with open('/sys/module/kvm_intel/parameters/nested') as f:
            nested_kvm = f.read().strip()
            if nested_kvm != 'Y':
                libvirt_type = 'qemu'
    cmd += ' --libvirt-type {}'.format(libvirt_type)
    logging.info("Deploy command set: {}".format(cmd))

    with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
        fh.write(cmd)
    return cmd