def inject_auth(self): virt_ops = list() # virt-customize keys/pws if self.root_pw: pw_op = "password:{}".format(self.root_pw) virt_ops.append({constants.VIRT_PW: pw_op}) # ssh key setup virt_ops.append({constants.VIRT_RUN_CMD: 'mkdir -p /root/.ssh'}) virt_ops.append({ constants.VIRT_UPLOAD: '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys' }) run_cmds = [ 'chmod 600 /root/.ssh/authorized_keys', 'restorecon -R -v /root/.ssh', 'id -u stack || useradd -m stack', 'mkdir -p /home/stack/.ssh', 'chown stack:stack /home/stack/.ssh', 'cp /root/.ssh/authorized_keys /home/stack/.ssh/', 'chown stack:stack /home/stack/.ssh/authorized_keys', 'chmod 600 /home/stack/.ssh/authorized_keys', 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers', 'touch /etc/cloud/cloud-init.disabled' ] for cmd in run_cmds: virt_ops.append({constants.VIRT_RUN_CMD: cmd}) virt_utils.virt_customize(virt_ops, self.volume)
def inject_opendaylight(odl_version, image, tmp_dir): assert odl_version in con.VALID_ODL_VERSIONS # add repo if odl_version == 'master': odl_pkg_version = con.VALID_ODL_VERSIONS[-2] branch = odl_version else: odl_pkg_version = odl_version branch = "stable/{}".format(odl_version) odl_url = "https://nexus.opendaylight.org/content/repositories" \ "/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version) repo_name = "opendaylight-{}".format(odl_pkg_version) c_builder.add_repo(odl_url, repo_name, image, tmp_dir) # download puppet-opendaylight archive = c_builder.create_git_archive(repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight', tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/') # install ODL, puppet-odl virt_ops = [{ con.VIRT_INSTALL: 'opendaylight' }, { con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive) }, { con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight' }, { con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf " "puppet-opendaylight.tar" }] virt_utils.virt_customize(virt_ops, image) logging.info("OpenDaylight injected into {}".format(image))
def inject_opendaylight(odl_version, image, tmp_dir, uc_ip, os_version, docker_tag=None): assert odl_version in con.VALID_ODL_VERSIONS # add repo if odl_version == 'master': # last version in the constants is "master" so select 2nd to last # odl package version has no "master" version odl_pkg_version = con.VALID_ODL_VERSIONS[-2] # branch will be used to pull puppet-opendaylight. Since puppet-odl # does not pull branch until later, we need to use master version of # that if master ODL version is specified branch = odl_version else: odl_pkg_version = odl_version branch = "stable/{}".format(odl_version) odl_url = "https://nexus.opendaylight.org/content/repositories" \ "/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version) repo_name = "opendaylight-{}".format(odl_pkg_version) apex.builders.common_builder.add_repo(odl_url, repo_name, image, tmp_dir) # download puppet-opendaylight archive = apex.builders.common_builder.create_git_archive( repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight', tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/') # install ODL, puppet-odl virt_ops = [{ con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive) }, { con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight' }, { con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf " "puppet-opendaylight.tar" }, { con.VIRT_INSTALL: "java-1.8.0-openjdk" }] if docker_tag: docker_cmds = [ "RUN yum remove opendaylight -y", "RUN echo $'[opendaylight]\\n\\", "baseurl={}\\n\\".format(odl_url), "gpgcheck=0\\n\\", "enabled=1' > /etc/yum.repos.d/opendaylight.repo", "RUN yum -y install opendaylight" ] src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \ "{}".format(uc_ip, os_version, 'opendaylight', docker_tag) build_dockerfile('opendaylight', tmp_dir, docker_cmds, src_img_uri) else: virt_ops.append({con.VIRT_INSTALL: 'opendaylight'}) virt_utils.virt_customize(virt_ops, image) logging.info("OpenDaylight injected into {}".format(image))
def _update_delorean_repo(self): if utils.internet_connectivity(): logging.info('Updating delorean repo on Undercloud') delorean_repo = ("https://trunk.rdoproject.org/centos7-{}" "/current-tripleo/delorean.repo".format( self.os_version)) cmd = ("curl -L -f -o " "/etc/yum.repos.d/deloran.repo {}".format(delorean_repo)) try: virt_utils.virt_customize([{ constants.VIRT_RUN_CMD: cmd }], self.volume) except Exception: logging.warning("Failed to download and update delorean repo " "for Undercloud")
def inject_calipso_installer(tmp_dir, image): """ Downloads calipso installer script from artifacts.opnfv.org and puts it under /root/ for further installation process. :return: """ calipso_file = os.path.basename(con.CALIPSO_INSTALLER_URL) calipso_url = con.CALIPSO_INSTALLER_URL.replace(calipso_file, '') utils.fetch_upstream_and_unpack(tmp_dir, calipso_url, [calipso_file]) virt_ops = [{ con.VIRT_UPLOAD: "{}/{}:/root/".format(tmp_dir, calipso_file) }] virt_utils.virt_customize(virt_ops, image) logging.info("Calipso injected into {}".format(image))
def update_repos(image, branch): virt_ops = [{ con.VIRT_RUN_CMD: "rm -f /etc/yum.repos.d/delorean*" }, { con.VIRT_RUN_CMD: "yum-config-manager --add-repo " "https://trunk.rdoproject.org/centos7/{}" "/delorean.repo".format(con.RDO_TAG) }, { con.VIRT_RUN_CMD: "yum clean all" }, { con.VIRT_INSTALL: "python2-tripleo-repos" }, { con.VIRT_RUN_CMD: "tripleo-repos -b {} {} ceph".format(branch, con.RDO_TAG) }] virt_utils.virt_customize(virt_ops, image)
def add_repo(repo_url, repo_name, image, tmp_dir): assert repo_name is not None assert repo_url is not None repo_file = "{}.repo".format(repo_name) repo_file_path = os.path.join(tmp_dir, repo_file) content = [ "[{}]".format(repo_name), "name={}".format(repo_name), "baseurl={}".format(repo_url), "gpgcheck=0" ] logging.debug("Creating repo file {}".format(repo_name)) with open(repo_file_path, 'w') as fh: fh.writelines("{}\n".format(line) for line in content) logging.debug("Adding repo {} to {}".format(repo_file, image)) virt_utils.virt_customize( [{ con.VIRT_UPLOAD: "{}:/etc/yum.repos.d/".format(repo_file_path) }], image)
def add_upstream_patches(patches, image, tmp_dir, default_branch=os.path.join('stable', con.DEFAULT_OS_VERSION)): """ Adds patches from upstream OpenStack gerrit to Undercloud for deployment :param patches: list of patches :param image: undercloud image :param tmp_dir: to store temporary patch files :param default_branch: default branch to fetch commit (if not specified in patch) :return: None """ virt_ops = [{con.VIRT_INSTALL: 'patch'}] logging.debug("Evaluating upstream patches:\n{}".format(patches)) for patch in patches: assert isinstance(patch, dict) assert all(i in patch.keys() for i in ['project', 'change-id']) if 'branch' in patch.keys(): branch = patch['branch'] else: branch = default_branch patch_diff = build_utils.get_patch(patch['change-id'], patch['project'], branch) if patch_diff: patch_file = "{}.patch".format(patch['change-id']) patch_file_path = os.path.join(tmp_dir, patch_file) with open(patch_file_path, 'w') as fh: fh.write(patch_diff) project_path = project_to_path(patch['project']) virt_ops.extend([{ con.VIRT_UPLOAD: "{}:{}".format(patch_file_path, project_path) }, { con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(project_path, patch_file) }]) logging.info("Adding patch {} to {}".format(patch_file, image)) else: logging.info("Ignoring patch:\n{}".format(patch)) if len(virt_ops) > 1: virt_utils.virt_customize(virt_ops, image)
def inject_ovs_nsh(image, tmp_dir): """ Downloads OpenVswitch, compiles it and installs it on the overcloud image on the fly. :param image: :param tmp_dir: :return: """ ovs_filename = os.path.basename(con.OVS_URL) ovs_folder = ovs_filename.replace(".tar.gz", "") utils.fetch_upstream_and_unpack(tmp_dir, os.path.split(con.OVS_URL)[0] + "/", [ovs_filename]) (ovs_dist_name, ovs_version) = ovs_folder.split("-") virt_ops = [{ con.VIRT_UPLOAD: "{}:/root/".format(tmp_dir + "/" + ovs_filename) }, { con.VIRT_INSTALL: "rpm-build,autoconf,automake,libtool,openssl," "openssl-devel,python,python-twisted-core,python-six,groff,graphviz," "python-zope-interface,desktop-file-utils,procps-ng,PyQt4," "libcap-ng,libcap-ng-devel,selinux-policy-devel,kernel-devel," "kernel-headers,kernel-tools,rpmdevtools,systemd-units,python-devel," "python-sphinx" }, { con.VIRT_RUN_CMD: "cd /root/ && tar xzf {}".format(ovs_filename) }, { con.VIRT_UPLOAD: "{}/build_ovs_nsh.sh:/root/{}".format(tmp_dir, ovs_folder) }, { con.VIRT_RUN_CMD: "cd /root/{0} && chmod -R 777 * && chown -R root:root * && " "./build_ovs_nsh.sh && rpm -Uhv --force rpm/rpmbuild/RPMS/x86_64/{0}" "-1.el7.x86_64.rpm && rpm -Uhv --force rpm/rpmbuild/RPMS/x86_64" "/openvswitch-kmod-{1}-1.el7.x86_64.rpm".format( ovs_folder, ovs_version) }] virt_utils.virt_customize(virt_ops, image) logging.info("OVS injected into {}".format(image))
def add_upstream_packages(image): """ Adds required base upstream packages to Undercloud for deployment :param image: :return: None """ virt_ops = list() pkgs = [ 'epel-release', 'openstack-utils', 'ceph-common', 'python2-networking-sfc', 'openstack-ironic-inspector', 'subunit-filters', 'docker-distribution', 'openstack-tripleo-validations', 'libguestfs-tools', 'ceph-ansible', 'python-tripleoclient', 'openstack-tripleo-heat-templates' ] # Remove incompatible python-docker version virt_ops.append({con.VIRT_RUN_CMD: "yum remove -y python-docker-py"}) for pkg in pkgs: virt_ops.append({con.VIRT_INSTALL: pkg}) virt_utils.virt_customize(virt_ops, image)
def add_upstream_packages(image): """ Adds required base upstream packages to Undercloud for deployment :param image: :return: None """ virt_ops = list() pkgs = [ 'openstack-utils', 'ceph-common', 'python2-networking-sfc', 'openstack-ironic-inspector', 'subunit-filters', 'docker-distribution', 'openstack-tripleo-validations', 'libguestfs-tools', ] for pkg in pkgs: virt_ops.append({con.VIRT_INSTALL: pkg}) virt_utils.virt_customize(virt_ops, image)
def inject_auth(self): virt_ops = list() # virt-customize keys/pws if self.root_pw: pw_op = "password:{}".format(self.root_pw) virt_ops.append({constants.VIRT_PW: pw_op}) # ssh key setup virt_ops.append({constants.VIRT_RUN_CMD: 'mkdir -p /root/.ssh'}) virt_ops.append({constants.VIRT_UPLOAD: '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'}) run_cmds = [ 'chmod 600 /root/.ssh/authorized_keys', 'restorecon /root/.ssh/authorized_keys', 'cp /root/.ssh/authorized_keys /home/stack/.ssh/', 'chown stack:stack /home/stack/.ssh/authorized_keys', 'chmod 600 /home/stack/.ssh/authorized_keys' ] for cmd in run_cmds: virt_ops.append({constants.VIRT_RUN_CMD: cmd}) virt_utils.virt_customize(virt_ops, self.volume)
def inject_quagga(image, tmp_dir): """ Downloads quagga tarball from artifacts.opnfv.org and install it on the overcloud image on the fly. :param image: :param tmp_dir: :return: """ utils.fetch_upstream_and_unpack(tmp_dir, os.path.split(con.QUAGGA_URL)[0] + "/", [os.path.basename(con.QUAGGA_URL)]) virt_ops = [{ con.VIRT_UPLOAD: "{}/quagga-4.tar.gz:/root/".format(tmp_dir) }, { con.VIRT_RUN_CMD: "cd /root/ && tar xzf quagga-4.tar.gz" }, { con.VIRT_RUN_CMD: "cd /root/quagga;packages=$(ls |grep -vE 'debug" "info|devel|contrib');yum -y install $packages" }] virt_utils.virt_customize(virt_ops, image) logging.info("Quagga injected into {}".format(image))
def add_upstream_packages(image): """ Adds required base upstream packages to Undercloud for deployment :param image: :return: None """ virt_ops = list() # FIXME(trozet): we have to lock to this beta ceph ansible package because # the current RPM versioning is wrong and an older package has a higher # version than this package. We should change to just 'ceph-ansible' # once the package/repo has been fixed. Note: luminous is fine here # because Apex will only support container deployment for Queens and later pkgs = [ 'openstack-utils', 'ceph-common', 'python2-networking-sfc', 'openstack-ironic-inspector', 'subunit-filters', 'docker-distribution', 'openstack-tripleo-validations', 'libguestfs-tools', 'http://mirror.centos.org/centos/7/storage/x86_64/ceph-luminous' + '/ceph-ansible-3.1.0-0.beta3.1.el7.noarch.rpm' ] for pkg in pkgs: virt_ops.append({con.VIRT_INSTALL: pkg}) virt_utils.virt_customize(virt_ops, image)
def prep_image(ds, img, tmp_dir, root_pw=None): """ Locates sdn image and preps for deployment. :param ds: deploy settings :param img: sdn image :param tmp_dir: dir to store modified sdn image :param root_pw: password to configure for overcloud image :return: None """ # TODO(trozet): Come up with a better way to organize this logic in this # function logging.info("Preparing image: {} for deployment".format(img)) if not os.path.isfile(img): logging.error("Missing SDN image {}".format(img)) raise ApexDeployException("Missing SDN image file: {}".format(img)) ds_opts = ds['deploy_options'] virt_cmds = list() sdn = ds_opts['sdn_controller'] # we need this due to rhbz #1436021 # fixed in systemd-219-37.el7 if sdn is not False: logging.info("Neutron openvswitch-agent disabled") virt_cmds.extend([{ con.VIRT_RUN_CMD: "rm -f /etc/systemd/system/multi-user.target.wants/" "neutron-openvswitch-agent.service" }, { con.VIRT_RUN_CMD: "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent" ".service" }]) if ds_opts['vpn']: virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ con.VIRT_RUN_CMD: "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > " "/opt/quagga/etc/init.d/zrpcd_start.sh" }) virt_cmds.append({ con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/" "zrpcd_start.sh" }) virt_cmds.append({ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/" "init.d/zrpcd_start.sh' /etc/rc.local " }) virt_cmds.append({ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/" "init.d/zrpcd_start.sh' /etc/rc.d/rc.local" }) logging.info("ZRPCD process started") dataplane = ds_opts['dataplane'] if dataplane == 'ovs_dpdk' or dataplane == 'fdio': logging.info("Enabling kernel modules for dpdk") # file to module mapping uio_types = { os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci', os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic' } for mod_file, mod in uio_types.items(): with open(mod_file, 'w') as fh: fh.write('#!/bin/bash\n') fh.write('exec /sbin/modprobe {}'.format(mod)) fh.close() virt_cmds.extend([{ con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(mod_file) }, { con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/" "{}".format(os.path.basename(mod_file)) }]) if root_pw: pw_op = "password:{}".format(root_pw) virt_cmds.append({con.VIRT_PW: pw_op}) if ds_opts['sfc'] and dataplane == 'ovs': virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y install " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_KMOD_RPM) }, { con.VIRT_RUN_CMD: "yum downgrade -y " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_RPM) }]) if dataplane == 'fdio': # Patch neutron with using OVS external interface for router # and add generic linux NS interface driver virt_cmds.append({ con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch " "-p1 < neutron-patch-NSDriver.patch" }) if sdn is False: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum remove -y vpp-lib" }, { con.VIRT_RUN_CMD: "yum install -y " "/root/nosdn_vpp_rpms/*.rpm" }]) if sdn == 'opendaylight': if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y remove opendaylight" }, { con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight" }, { con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " "/root/puppet-opendaylight-" "{}.tar.gz".format(ds_opts['odl_version']) }]) if ds_opts['odl_version'] == 'master': virt_cmds.extend([{ con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( ds_opts['odl_version']) }]) else: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(ds_opts['odl_version']) }]) elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ and ds_opts['odl_vpp_netvirt']: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y remove opendaylight" }, { con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(ODL_NETVIRT_VPP_RPM) }]) if sdn == 'ovn': virt_cmds.extend([{ con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " "*openvswitch*" }, { con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " "*openvswitch*" }]) tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') shutil.copyfile(img, tmp_oc_image) logging.debug( "Temporary overcloud image stored as: {}".format(tmp_oc_image)) virt_utils.virt_customize(virt_cmds, tmp_oc_image) logging.info("Overcloud image customization complete")
def add_upstream_patches(patches, image, tmp_dir, default_branch=os.path.join('stable', con.DEFAULT_OS_VERSION), uc_ip=None, docker_tag=None): """ Adds patches from upstream OpenStack gerrit to Undercloud for deployment :param patches: list of patches :param image: undercloud image :param tmp_dir: to store temporary patch files :param default_branch: default branch to fetch commit (if not specified in patch) :param uc_ip: undercloud IP (required only for docker patches) :param docker_tag: Docker Tag (required only for docker patches) :return: Set of docker services patched (if applicable) """ virt_ops = [{con.VIRT_INSTALL: 'patch'}] logging.debug("Evaluating upstream patches:\n{}".format(patches)) docker_services = set() for patch in patches: assert isinstance(patch, dict) assert all(i in patch.keys() for i in ['project', 'change-id']) if 'branch' in patch.keys(): branch = patch['branch'] else: branch = default_branch patch_diff = build_utils.get_patch(patch['change-id'], patch['project'], branch) project_path = project_to_path(patch['project'], patch_diff) # If docker tag and python we know this patch belongs on docker # container for a docker service. Therefore we build the dockerfile # and move the patch into the containers directory. We also assume # this builder call is for overcloud, because we do not support # undercloud containers if platform.machine() == 'aarch64': docker_url = con.DOCKERHUB_AARCH64 else: docker_url = con.DOCKERHUB_OOO if docker_tag and 'python' in project_path: # Projects map to multiple THT services, need to check which # are supported ooo_docker_services = project_to_docker_image( patch['project'], docker_url) docker_img = ooo_docker_services[0] else: ooo_docker_services = [] docker_img = None change = build_utils.get_change(con.OPENSTACK_GERRIT, patch['project'], branch, patch['change-id']) patch_promoted = is_patch_promoted(change, branch.replace('stable/', ''), docker_url, docker_img) if patch_diff and not patch_promoted: patch_file = "{}.patch".format(patch['change-id']) # If we found services, then we treat the patch like it applies to # docker only if ooo_docker_services: os_version = default_branch.replace('stable/', '') for service in ooo_docker_services: docker_services = docker_services.union({service}) docker_cmds = [ "WORKDIR {}".format(project_path), "ADD {} {}".format(patch_file, project_path), "RUN patch -p1 < {}".format(patch_file) ] src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \ "{}".format(uc_ip, os_version, service, docker_tag) oc_builder.build_dockerfile(service, tmp_dir, docker_cmds, src_img_uri) patch_file_path = os.path.join(tmp_dir, 'containers', patch_file) else: patch_file_path = os.path.join(tmp_dir, patch_file) virt_ops.extend([{ con.VIRT_UPLOAD: "{}:{}".format(patch_file_path, project_path) }, { con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(project_path, patch_file) }]) logging.info("Adding patch {} to {}".format(patch_file, image)) with open(patch_file_path, 'w') as fh: fh.write(patch_diff) else: logging.info("Ignoring patch:\n{}".format(patch)) if len(virt_ops) > 1: virt_utils.virt_customize(virt_ops, image) return docker_services
def test_virt_customize(self, mock_subprocess, mock_os_path): virt_customize([{'--operation': 'arg'}], 'target')
def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, patches=None, upstream=False): """ Locates sdn image and preps for deployment. :param ds: deploy settings :param ns: network settings :param img: sdn image :param tmp_dir: dir to store modified sdn image :param root_pw: password to configure for overcloud image :param docker_tag: Docker image tag for RDO version (default None) :param patches: List of patches to apply to overcloud image :param upstream: (boolean) Indicates if upstream deployment or not :return: None """ # TODO(trozet): Come up with a better way to organize this logic in this # function logging.info("Preparing image: {} for deployment".format(img)) if not os.path.isfile(img): logging.error("Missing SDN image {}".format(img)) raise ApexDeployException("Missing SDN image file: {}".format(img)) ds_opts = ds['deploy_options'] virt_cmds = list() sdn = ds_opts['sdn_controller'] patched_containers = set() # we need this due to rhbz #1436021 # fixed in systemd-219-37.el7 if sdn is not False: logging.info("Neutron openvswitch-agent disabled") virt_cmds.extend([{ con.VIRT_RUN_CMD: "rm -f /etc/systemd/system/multi-user.target.wants/" "neutron-openvswitch-agent.service" }, { con.VIRT_RUN_CMD: "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent" ".service" }]) if ns.get('http_proxy', ''): virt_cmds.append({ con.VIRT_RUN_CMD: "echo 'http_proxy={}' >> /etc/environment".format(ns['http_proxy']) }) if ns.get('https_proxy', ''): virt_cmds.append({ con.VIRT_RUN_CMD: "echo 'https_proxy={}' >> /etc/environment".format( ns['https_proxy']) }) if ds_opts['vpn']: virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ con.VIRT_RUN_CMD: "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > " "/opt/quagga/etc/init.d/zrpcd_start.sh" }) virt_cmds.append({ con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/" "zrpcd_start.sh" }) virt_cmds.append({ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/" "init.d/zrpcd_start.sh' /etc/rc.local " }) virt_cmds.append({ con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/" "init.d/zrpcd_start.sh' /etc/rc.d/rc.local" }) logging.info("ZRPCD process started") dataplane = ds_opts['dataplane'] if dataplane == 'ovs_dpdk' or dataplane == 'fdio': logging.info("Enabling kernel modules for dpdk") # file to module mapping uio_types = { os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci', os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic' } for mod_file, mod in uio_types.items(): with open(mod_file, 'w') as fh: fh.write('#!/bin/bash\n') fh.write('exec /sbin/modprobe {}'.format(mod)) fh.close() virt_cmds.extend([{ con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(mod_file) }, { con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/" "{}".format(os.path.basename(mod_file)) }]) if root_pw: pw_op = "password:{}".format(root_pw) virt_cmds.append({con.VIRT_PW: pw_op}) if ds_opts['sfc'] and dataplane == 'ovs': virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y install " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_KMOD_RPM) }, { con.VIRT_RUN_CMD: "yum downgrade -y " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_RPM) }]) if dataplane == 'fdio': # Patch neutron with using OVS external interface for router # and add generic linux NS interface driver virt_cmds.append({ con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch " "-p1 < neutron-patch-NSDriver.patch" }) if sdn is False: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum remove -y vpp-lib" }, { con.VIRT_RUN_CMD: "yum install -y " "/root/nosdn_vpp_rpms/*.rpm" }]) tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') shutil.copyfile(img, tmp_oc_image) logging.debug( "Temporary overcloud image stored as: {}".format(tmp_oc_image)) # TODO (trozet): remove this if block after Fraser if sdn == 'opendaylight' and not upstream: if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y remove opendaylight" }, { con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight" }, { con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " "/root/puppet-opendaylight-" "{}.tar.gz".format(ds_opts['odl_version']) }]) if ds_opts['odl_version'] == 'master': virt_cmds.extend([{ con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( ds_opts['odl_version']) }]) else: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(ds_opts['odl_version']) }]) elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ and ds_opts['odl_vpp_netvirt']: virt_cmds.extend([{ con.VIRT_RUN_CMD: "yum -y remove opendaylight" }, { con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(ODL_NETVIRT_VPP_RPM) }]) elif sdn == 'opendaylight': undercloud_admin_ip = ns['networks'][ con.ADMIN_NETWORK]['installer_vm']['ip'] oc_builder.inject_opendaylight( odl_version=ds_opts['odl_version'], image=tmp_oc_image, tmp_dir=tmp_dir, uc_ip=undercloud_admin_ip, os_version=ds_opts['os_version'], docker_tag=docker_tag, ) if docker_tag: patched_containers = patched_containers.union({'opendaylight'}) if sdn == 'ovn': virt_cmds.extend([{ con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " "*openvswitch*" }, { con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " "*openvswitch*" }]) if patches: if ds_opts['os_version'] == 'master': branch = ds_opts['os_version'] else: branch = "stable/{}".format(ds_opts['os_version']) logging.info('Adding patches to overcloud') patched_containers = patched_containers.union( c_builder.add_upstream_patches(patches, tmp_oc_image, tmp_dir, branch, uc_ip=undercloud_admin_ip, docker_tag=docker_tag)) # if containers with ceph, and no ceph device we need to use a # persistent loop device for Ceph OSDs if docker_tag and not ds_opts.get('ceph_device', None): tmp_losetup = os.path.join(tmp_dir, 'losetup.service') with open(tmp_losetup, 'w') as fh: fh.write(LOSETUP_SERVICE) virt_cmds.extend([ { con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup) }, { con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G' }, { con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img' }, { con.VIRT_RUN_CMD: 'systemctl daemon-reload' }, { con.VIRT_RUN_CMD: 'systemctl enable losetup.service' }, ]) virt_utils.virt_customize(virt_cmds, tmp_oc_image) logging.info("Overcloud image customization complete") return patched_containers
def expand_root_fs(self): # there is a lib called vminspect which has some dependencies and is # not yet available in pip. Consider switching to this lib later. logging.debug("Expanding root filesystem on /dev/sda partition") virt_ops = [{constants.VIRT_RUN_CMD: 'xfs_growfs /dev/sda'}] virt_utils.virt_customize(virt_ops, self.volume)