コード例 #1
0
 def _start_service_if_enabled(self, node, service):
     # rc 3 means service inactive
     # rc 5 mean no service available
     status, _ = node.execute('systemctl status {}'.format(service),
                              check_exit_code=[0, 3, 5])
     if 'service; enabled' in status:
         LOG.info('Starting {}'.format(service))
         node.execute('systemctl start {}'.format(service), as_root=True)
コード例 #2
0
    def reinstall_odl(self, node, odl_artifact):
        # Check for Quagga
        self._start_service_if_enabled(node, 'zrpcd')
        self._start_service_if_enabled(node, 'bgpd')

        # Install odl
        tar_tmp_path = '/tmp/odl-artifact/'
        node.copy('to', odl_artifact, tar_tmp_path + odl_artifact)
        node.execute('rm -rf /opt/opendaylight/', as_root=True)
        node.execute('mkdir -p /opt/opendaylight/', as_root=True)
        if 'tar.gz' in odl_artifact:
            # check if systemd service exists (may not if this was a docker
            # deployment)
            if not node.is_file(ODL_SYSTEMD):
                LOG.info("Creating odl user, group, and systemd file")
                # user/group may already exist so just ignore errors here
                node.execute('groupadd odl', as_root=True,
                             check_exit_code=False)
                node.execute('useradd -g odl odl', as_root=True,
                             check_exit_code=False)
                systemd_file = os.path.join(os.getcwd(),
                                            'opendaylight.service')
                node.copy('to', systemd_file, '/tmp/opendaylight.service',
                          check_exit_code=True)
                node.execute('mv /tmp/opendaylight.service %s' % ODL_SYSTEMD,
                             as_root=True)
                node.execute('systemctl daemon-reload', as_root=True)
            LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
                     % (odl_artifact, node.name))
            node.execute('tar -zxf %s --strip-components=1 -C '
                         '/opt/opendaylight/'
                         % (tar_tmp_path + odl_artifact), as_root=True)
            # AAA CLI jar for creating ODL user will be missing in regular
            # netvirt distro. Only part of full distro.
            if not node.is_file(ODL_AAA_JAR):
                LOG.info("ODL AAA CLI jar missing, will copy")
                aaa_cli_file = os.path.join(os.getcwd(),
                                            'aaa-cli-jar.jar')
                node.copy('to', aaa_cli_file, ODL_AAA_JAR)
            node.execute('chown -R odl:odl /opt/opendaylight', as_root=True)
        if '.rpm' in odl_artifact:
            LOG.info('Installing %s on node %s'
                     % (odl_artifact, node.name))
            node.execute('yum remove -y opendaylight', as_root=True)
            node.execute('yum install -y %s'
                         % (tar_tmp_path + odl_artifact), as_root=True)
        node.execute('rm -rf ' + tar_tmp_path, as_root=True)
        LOG.info('Starting Opendaylight on node %s' % node.name)
        # we do not want puppet-odl to install the repo or the package, so we
        # use tags to ignore those resources
        node.execute('puppet apply -e "include opendaylight" '
                     '--tags file,concat,file_line,augeas,odl_user,'
                     'odl_keystore,service '
                     '--modulepath=/etc/puppet/modules/ '
                     '--verbose --debug --trace --detailed-exitcodes',
                     check_exit_code=[2], as_root=True)
コード例 #3
0
 def get_ovs_controller(node):
     # find ovs controller and manager
     ovs_controller, _ = node.execute('ovs-vsctl get-controller '
                                      'br-int',
                                      as_root=True)
     ovs_controller = ovs_controller.rstrip()
     if ovs_controller == '':
         LOG.warning("OVS controller for node {} is empty!".format(
             node.address))
     else:
         return ovs_controller
コード例 #4
0
ファイル: tripleo_manager.py プロジェクト: rski/sdnvpn
 def get_address_of_node(self, server_name=None, server=None):
     if not (server_name or server):
         raise Exception('Either server_name or server needs to be given')
     if server_name:
         try:
             for server in self.novacl.servers.list():
                 if server.name == server_name:
                     return server.addresses['ctlplane'][0]['addr']
         except Exception as ex:
             LOG.error('Unsupported installer platform.')
             raise ex
     if server:
         return server.addresses['ctlplane'][0]['addr']
コード例 #5
0
 def cleanup(self):
     for i in range(MAX_NODES):
         rv, (_, rc) = execute('virsh destroy %(name)s%(i)s-%(env)s'
                               % {'i': i, 'env': self.env,
                                  'name': self.NODE_NAME},
                               check_exit_code=[0, 1])
         if rc == 0:
             LOG.info(rv)
         rv, (_, rc) = execute('virsh undefine %(name)s%(i)s-%(env)s'
                               % {'i': i, 'env': self.env,
                                  'name': self.NODE_NAME},
                               check_exit_code=[0, 1])
         if rc == 0:
             LOG.info(rv)
     execute('rm -rf ' + self.BUILD_DIR)
コード例 #6
0
ファイル: tripleo_manager.py プロジェクト: rski/sdnvpn
 def get_node_name_by_ilo_address(self, ilo_address):
     try:
         node_name = None
         for node in self.ironiccl.node.list():
             nova_uuid = node.instance_uuid
             if ilo_address == self.ironiccl.node.get_by_instance_uuid(
                     nova_uuid).driver_info['ilo_address']:
                 node_name = self.novacl.servers.find(id=nova_uuid).name
                 break
         if not node_name:
             raise Exception('Cannot get nova instance for ilo address %s'
                             % ilo_address)
         return node_name
     except Exception as ex:
         LOG.error('Unsupported installer platform.')
         raise ex
コード例 #7
0
    def run(self, sys_args, config):
        cloner_info_path = sys_args.cloner_info
        SSH_CONFIG['ID_RSA_PATH'] = (cloner_info_path + CONFIG.ID_RSA_PATH +
                                     'id_rsa')
        node_config = utils_yaml.read_dict_from_yaml(
            cloner_info_path + CONFIG.NODE_YAML_PATH)
        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            LOG.info('Disconnecting OpenVSwitch from controller on node %s'
                     % node.name)
            node.execute('ovs-vsctl del-controller br-int', as_root=True)

        for node in self.nodes:
            if 'ODL' in node.config:
                tar_tmp_path = '/tmp/odl-artifact/'
                if node.config['ODL'].get('active'):
                    tarball_name = os.path.basename(sys_args.odl_artifact)
                    node.copy('to', sys_args.odl_artifact,
                              '/tmp/odl-artifact/' + tarball_name)
                    node.execute('rm -rf /opt/opendaylight/*', as_root=True)
                    node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
                    LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
                             % (tarball_name, node.name))
                    node.execute('tar -zxf %s --strip-components=1 -C '
                                 '/opt/opendaylight/'
                                 % (tar_tmp_path + tarball_name), as_root=True)
                    node.execute('chown -R odl:odl /opt/opendaylight',
                                 as_root=True)
                    node.execute('rm -rf ' + tar_tmp_path, as_root=True)
                    LOG.info('Installing and Starting Opendaylight on node %s'
                             % node.name)
                    node.copy('to', 'odl_reinstaller/install_odl.pp',
                              tar_tmp_path)
                    node.execute('puppet apply --modulepath='
                                 '/etc/puppet/modules/ %sinstall_odl.pp '
                                 '--verbose --debug --trace '
                                 '--detailed-exitcodes'
                                 % tar_tmp_path, check_exit_code=[2],
                                 as_root=True)
        # --detailed-exitcodes: Provide extra information about the run via
        # exit codes. If enabled, 'puppet apply' will use the following exit
        # codes:
        # 0: The run succeeded with no changes or failures; the system was
        #    already in the desired state.
        # 1: The run failed.
        # 2: The run succeeded, and some resources were changed.
        # 4: The run succeeded, and some resources failed.
        # 6: The run succeeded, and included both changes and failures.

        for node in self.nodes:
            LOG.info('Connecting OpenVSwitch to controller on node %s'
                     % node.name)
            ovs_controller = node.config.get('ovs-controller')
            if ovs_controller:
                node.execute('ovs-vsctl set-controller br-int %s'
                             % ovs_controller, as_root=True)
コード例 #8
0
 def reinstall_odl(node, odl_tarball):
     tar_tmp_path = '/tmp/odl-artifact/'
     node.copy('to', odl_tarball, tar_tmp_path + odl_tarball)
     node.execute('rm -rf /opt/opendaylight/*', as_root=True)
     node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
     LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
              % (odl_tarball, node.name))
     node.execute('tar -zxf %s --strip-components=1 -C '
                  '/opt/opendaylight/'
                  % (tar_tmp_path + odl_tarball), as_root=True)
     node.execute('chown -R odl:odl /opt/opendaylight', as_root=True)
     node.execute('rm -rf ' + tar_tmp_path, as_root=True)
     LOG.info('Installing and Starting Opendaylight on node %s' % node.name)
     node.execute('puppet apply -e "include opendaylight" '
                  '--modulepath=/etc/puppet/modules/ '
                  '--verbose --debug --trace --detailed-exitcodes',
                  check_exit_code=[2], as_root=True)
コード例 #9
0
 def disconnect_ovs(node):
     LOG.info('Disconnecting OpenVSwitch from controller on node %s'
              % node.name)
     node.execute('ovs-vsctl del-controller br-int', as_root=True)
     node.execute('ovs-vsctl del-manager', as_root=True)
     LOG.info('Deleting Tunnel and Patch interfaces')
     # Note this is required because ODL fails to reconcile pre-created
     # ports
     for br in 'br-int', 'br-ex':
         LOG.info("Checking for ports on {}".format(br))
         try:
             out, _ = node.execute('ovs-vsctl list-ports {} | grep -E '
                                   '"tun|patch"'.format(br),
                                   as_root=True, shell=True)
             ports = out.rstrip().split("\n")
             for port in ports:
                 LOG.info('Deleting port: {}'.format(port))
                 node.execute('ovs-vsctl del-port {} {}'.format(br, port),
                              as_root=True)
         except ProcessExecutionError:
             LOG.info("No tunnel or patch ports configured")
コード例 #10
0
    def reinstall_odl(self, node, odl_artifact):
        # Check for Quagga
        self._start_service_if_enabled(node, 'zrpcd')
        self._start_service_if_enabled(node, 'bgpd')

        # Install odl
        tar_tmp_path = '/tmp/odl-artifact/'
        node.copy('to', odl_artifact, tar_tmp_path + odl_artifact)
        node.execute('rm -rf /opt/opendaylight/', as_root=True)
        node.execute('mkdir -p /opt/opendaylight/', as_root=True)
        if 'tar.gz' in odl_artifact:
            LOG.info('Extracting %s to /opt/opendaylight/ on node %s' %
                     (odl_artifact, node.name))
            node.execute('tar -zxf %s --strip-components=1 -C '
                         '/opt/opendaylight/' % (tar_tmp_path + odl_artifact),
                         as_root=True)
            node.execute('chown -R odl:odl /opt/opendaylight', as_root=True)
        if '.rpm' in odl_artifact:
            LOG.info('Installing %s on node %s' % (odl_artifact, node.name))
            node.execute('yum remove -y opendaylight', as_root=True)
            node.execute('yum install -y %s' % (tar_tmp_path + odl_artifact),
                         as_root=True)
        node.execute('rm -rf ' + tar_tmp_path, as_root=True)
        LOG.info('Starting Opendaylight on node %s' % node.name)
        node.execute(
            'puppet apply -e "include opendaylight" '
            '--modulepath=/etc/puppet/modules/ '
            '--verbose --debug --trace --detailed-exitcodes',
            check_exit_code=[2],
            as_root=True)
コード例 #11
0
    def gen_node_info(self):
        overcloud_ip_list = TripleoHelper.find_overcloud_ips()

        for node_ip in overcloud_ip_list:
            LOG.info('Introspecting node %s' % node_ip)
            node = Node('intro-%s' % node_ip,
                        address=node_ip,
                        user=self.overcloud_user)
            node_mac = None
            virsh_domain = None
            server_name, _ = node.execute('hostname')
            server_name = server_name.rstrip()
            if 'overcloud-controller' in server_name:
                node_type = 'controller'
            elif 'overcloud-novacompute' in server_name:
                node_type = 'compute'
            else:
                raise TripleOInspectorException('Unknown type '
                                                '(controller/compute) %s ' %
                                                server_name)
            try:
                processutils.execute('ping -c 1 %s' % node_ip)
                res, _ = processutils.execute('/usr/sbin/arp -a '
                                              '%s' % node_ip)
                node_mac = \
                    re.search('([0-9a-z]+:){5}[0-9a-z]+', res).group(0)
                virsh_domain = \
                    TripleoHelper.get_virtual_node_name_from_mac(node_mac)
            except AttributeError:
                LOG.warning("Unable to find MAC address for node {"
                            "}".format(node_ip))

            # find ovs controller and manager
            ovs_controller = self.get_ovs_controller(node)
            out, _ = node.execute('ovs-vsctl get-manager', as_root=True)
            ovs_managers = out.rstrip().split("\n")
            if all(ovs_manager == '' for ovs_manager in ovs_managers):
                LOG.warning(
                    "OVS managers for node {} is empty!".format(node_ip))
            self.node_info['servers'][server_name] = {
                'address': node_ip,
                'user': self.overcloud_user,
                'type': node_type,
                'orig-ctl-mac': node_mac,
                'vNode-name': virsh_domain,
                'ovs-controller': ovs_controller,
                'ovs-managers': ovs_managers
            }
コード例 #12
0
    def validate_ovs(node):
        LOG.info("Validating OVS configuration for node: {}".format(node.name))
        # Validate ovs manager is connected
        out, _ = node.execute('ovs-vsctl show ', as_root=True)
        mgr_search = \
            re.search('Manager\s+\"tcp:[0-9.]+:6640\"\n\s*'
                      'is_connected:\s*true', out)
        if mgr_search is None:
            raise ODLReinstallerException("OVS Manager is not connected")
        else:
            LOG.info("OVS is connected to OVSDB manager")

        # Validate ovs controller is configured
        cfg_controller = node.config['ovs-controller']
        ovs_controller = TripleOIntrospector().get_ovs_controller(node)
        if cfg_controller == '' or cfg_controller is None:
            if ovs_controller is None or ovs_controller == '':
                raise ODLReinstallerException("OVS controller is not set "
                                              "for node: {}"
                                              "".format(node.address))
        elif ovs_controller != cfg_controller:
            raise ODLReinstallerException("OVS controller is not set to the "
                                          "correct pod config value on {}. "
                                          "Config controller: {}, current "
                                          "controller: {}"
                                          "".format(node.address,
                                                    cfg_controller,
                                                    ovs_controller))
        LOG.info("OVS Controller set correctly")
        # Validate ovs controller is connected
        ctrl_search = \
            re.search('Controller\s+\"tcp:[0-9\.]+:6653\"\n\s*'
                      'is_connected:\s*true', out)
        if ctrl_search is None:
            raise ODLReinstallerException("OVS Controller is not connected")
        else:
            LOG.info("OVS is connected to OpenFlow controller")
コード例 #13
0
    def run(self, sys_args, config):
        pod_config = sys_args.pod_config
        odl_artifact = sys_args.odl_artifact
        node_config = utils_yaml.read_dict_from_yaml(pod_config)
        # TODO Add validation of incoming node config
        # self.check_node_config()

        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            node.execute('ovs-vsctl del-controller br-int', as_root=True)
        first_controller = None
        for node in self.nodes:
            if not first_controller:
                if 'controller' in node.execute('echo $HOSTNAME')[0]:
                    first_controller = node
            # Check if ODL runs on this node
            jrv, _ = node.execute('ps aux |grep -v grep |grep karaf',
                                  as_root=True, check_exit_code=[0, 1])
            rv, (_, rc) = node.execute('docker ps | grep opendaylight_api',
                                       as_root=True, check_exit_code=[0, 1])
            if rc == 0:
                LOG.info("ODL is running as docker container")
                node.execute('docker stop opendaylight_api', as_root=True)
                self.odl_node = node
            elif 'java' in jrv:
                LOG.info("ODL is running as systemd service")
                self.odl_node = node
                node.execute('systemctl stop opendaylight', as_root=True)

            if self.odl_node is not None:
                LOG.info("ODL node found: {}".format(self.odl_node.name))
                # rc 5 means the service is not there.
                # rc 4 means the service cannot be found
                node.execute('systemctl stop bgpd', as_root=True,
                             check_exit_code=[0, 4, 5])
                node.execute('systemctl stop zrpcd', as_root=True,
                             check_exit_code=[0, 4, 5])

            self.disconnect_ovs(node)

        # Upgrade ODL
        if not self.odl_node:
            self.odl_node = first_controller
        self.reinstall_odl(self.odl_node, odl_artifact)

        # Wait for ODL to come back up
        full_netvirt_url = "http://{}:8081/diagstatus".format(
            self.odl_node.config['address'])
        counter = 1
        while counter <= 10:
            try:
                self.odl_node.execute("curl --fail {}".format(
                    full_netvirt_url))
                LOG.info("New OpenDaylight NetVirt is Up")
                break
            except processutils.ProcessExecutionError:
                LOG.warning("NetVirt not up. Attempt: {}".format(counter))
                if counter >= 10:
                    LOG.warning("NetVirt not detected as up after 10 "
                                "attempts...deployment may be unstable!")
            counter += 1
            time.sleep(15)

        # Reconnect OVS instances
        LOG.info("Reconnecting OVS instances")
        for node in self.nodes:
            self.connect_ovs(node)
        # Sleep for a few seconds to allow TCP connections to come up
        time.sleep(5)
        # Validate OVS instances
        LOG.info("Validating OVS configuration")
        for node in self.nodes:
            self.validate_ovs(node)
        LOG.info("OpenDaylight Upgrade Successful!")
コード例 #14
0
    def run(self, sys_args, config):
        pod_config = sys_args.pod_config
        odl_artifact = sys_args.odl_artifact
        node_config = utils_yaml.read_dict_from_yaml(pod_config)
        # TODO Add validation of incoming node config
        # self.check_node_config()

        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            node.execute('ovs-vsctl del-controller br-int', as_root=True)
        for node in self.nodes:
            # Check if ODL runs on this node
            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
                                 as_root=True, check_exit_code=[0, 1])
            if 'java' in rv:
                self.odl_node = node
                LOG.info("ODL node found: {}".format(self.odl_node.name))
                node.execute('systemctl stop opendaylight', as_root=True)

            self.disconnect_ovs(node)

        # Upgrade ODL
        self.reinstall_odl(self.odl_node, odl_artifact)

        # Wait for ODL to come back up
        full_netvirt_url = "http://{}:8081/{}".format(
            self.odl_node.config['address'], self.netvirt_url)
        counter = 1
        while counter <= 10:
            try:
                self.odl_node.execute("curl --fail -u admin:admin {}".format(
                    full_netvirt_url))
                LOG.info("New OpenDaylight NetVirt is Up")
                break
            except processutils.ProcessExecutionError:
                LOG.warning("NetVirt not up. Attempt: {}".format(counter))
                if counter >= 10:
                    LOG.warning("NetVirt not detected as up after 10 "
                                "attempts...deployment may be unstable!")
            counter += 1
            time.sleep(10)

        # Reconnect OVS instances
        LOG.info("Reconnecting OVS instances")
        for node in self.nodes:
            self.connect_ovs(node)
        # Sleep for a few seconds to allow TCP connections to come up
        time.sleep(5)
        # Validate OVS instances
        LOG.info("Validating OVS configuration")
        for node in self.nodes:
            self.validate_ovs(node)
        LOG.info("OpenDaylight Upgrade Successful!")
コード例 #15
0
 def create_snapshot(self, orig, path):
     LOG.info('Creating snapshot of %s in %s' % (orig, path))
     execute('qemu-img create -f qcow2 -b %s %s' % (orig, path),
             as_root=True)
コード例 #16
0
    def run(self, sys_args, config):
        self.env = str(sys_args.env_number).replace('"', '')
        self.BUILD_DIR = '../build/apex-%s' % self.env
        self.cleanup()
        if sys_args.cleanup:
            return
        if not sys_args.cloner_info or not sys_args.snapshot_disks\
                or not sys_args.vjump_hosts:
            LOG.error('--cloner-info, --snapshot-disks and --vjump-hosts '
                      ' have to be given if not  only --cleanup.')
            exit(1)
        node_info = utils_yaml.read_dict_from_yaml(sys_args.cloner_info +
                                                   '/node.yaml')
        nodes = node_info['servers']
        number_of_nodes = len(nodes)
        disk_home = self.BUILD_DIR + '/disks/'
        shutil.mkdir_if_not_exsist(disk_home)
        # Create Snapshots
        for i in range(number_of_nodes):
            disk_name = '%s%s.qcow2' % (self.NODE_NAME, i)
            self.create_snapshot(
                '%s/%s' % (sys_args.snapshot_disks, disk_name),
                '%s/%s' % (disk_home, disk_name))

        # Create Bridges if not existing
        for net in self.BRIGES:
            bridge_name = '%s-%s' % (net, self.env)
            if not self.check_if_br_exists(bridge_name):
                LOG.info('Creating bridge %s' % bridge_name)
                execute('ovs-vsctl add-br %s' % bridge_name, as_root=True)

        # Create virtual Nodes
        dom_template = self.TEMPLATES + '/nodes/baremetalX.xml'
        dom_config = self.BUILD_DIR + '/nodes/baremetalX.xml'
        shutil.mkdir_if_not_exsist(self.BUILD_DIR + '/nodes/')
        LOG.info('Creating virtual Nodes')
        for name, node in nodes.iteritems():
            orig_node_name = node['vNode-name']
            node_name = orig_node_name + '-' + self.env
            LOG.info('Create node %s' % node_name)
            type = node['type']
            if type == 'compute':
                cpu = 4
                mem = 4
            elif type == 'controller':
                cpu = 8
                mem = 10
            else:
                raise Exception('Unknown node type! %s' % type)
            shutil.copy('to', dom_template, dom_config)
            shutil.replace_string_in_file(dom_config, 'NaMe', node_name)
            disk_full_path = os.path.abspath('%s/%s.qcow2' %
                                             (disk_home, orig_node_name))
            shutil.replace_string_in_file(dom_config, 'DiSk', disk_full_path)
            shutil.replace_string_in_file(dom_config, 'vCpU', str(cpu))
            shutil.replace_string_in_file(dom_config, 'MeMoRy', str(mem))
            shutil.replace_string_in_file(dom_config, 'InDeX', self.env)

            execute('virsh define ' + dom_config)
            execute('virsh start ' + node_name)

            cores_per_environment = 8
            cores = '%s-%s' % (int(self.env) * 8,
                               int(self.env) * 8 + cores_per_environment - 1)
            LOG.info('Pining vCPU of node %s to cores %s' % (node_name, cores))
            for i in range(cpu):
                execute('virsh vcpupin %(node)s %(nodes_cpu)s %(host_cpu)s' % {
                    'node': node_name,
                    'nodes_cpu': i,
                    'host_cpu': cores
                })

        # Upload cloner_info to jenkins slave
        node_name = 'jenkins%s' % self.env
        jenkins_node_config = utils_yaml.read_dict_from_yaml(
            sys_args.vjump_hosts)['servers']
        if node_name not in jenkins_node_config:
            raise Exception('Jenkins host %s not provided in %s' %
                            (node_name, sys_args.vjump_hosts))
        jenkins_slave = NodeManager(jenkins_node_config).get_node(node_name)
        if 'CLONER_INFO' in os.environ:
            cloner_info_path = os.environ['CLONER_INFO']
        else:
            cloner_info_path = '/home/jenkins/cloner-info/'
        jenkins_slave.copy('to', sys_args.cloner_info, cloner_info_path)
コード例 #17
0
 def connect_ovs(node):
     LOG.info('Connecting OpenVSwitch to controller on node %s' % node.name)
     ovs_manager_str = ' '.join(node.config['ovs-managers'])
     node.execute('ovs-vsctl set-manager %s' % ovs_manager_str,
                  as_root=True)