Esempio n. 1
0
    def prepare_for_ci_pipeline(self):
        node_manager = NodeManager(config=self.node_info['servers'])
        for node in node_manager.get_nodes():

            # Check is ODL runs on this node
            self.node_info['servers'][node.name]['ODL'] = {}
            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
                                 as_root=True, check_exit_code=[0, 1])
            if 'java' in rv:
                self.node_info['servers'][node.name]['ODL']['active'] = True

            if (node.is_dir('/opt/opendaylight') or
                    node.is_file('/opt/opendaylight-was-there')):
                self.node_info['servers'][node.name]['ODL']['dir_exsist'] = \
                    True
                # Remove existing ODL version
                node.execute('touch /opt/opendaylight-was-there', as_root=True)
                node.execute('rm -rf /opt/opendaylight', as_root=True)

            # Store ovs controller info
            rv, _ = node.execute('ovs-vsctl get-controller br-int',
                                 as_root=True)
            self.node_info['servers'][node.name]['ovs-controller'] = \
                rv.replace('\n', '')

            # Disconnect ovs
            node.execute('ovs-vsctl del-controller br-int', as_root=True)
Esempio n. 2
0
    def run(self, sys_args, config):
        cloner_info_path = sys_args.cloner_info
        SSH_CONFIG['ID_RSA_PATH'] = (cloner_info_path + CONFIG.ID_RSA_PATH +
                                     'id_rsa')
        node_config = utils_yaml.read_dict_from_yaml(
            cloner_info_path + CONFIG.NODE_YAML_PATH)
        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            LOG.info('Disconnecting OpenVSwitch from controller on node %s'
                     % node.name)
            node.execute('ovs-vsctl del-controller br-int', as_root=True)

        for node in self.nodes:
            if 'ODL' in node.config:
                tar_tmp_path = '/tmp/odl-artifact/'
                if node.config['ODL'].get('active'):
                    tarball_name = os.path.basename(sys_args.odl_artifact)
                    node.copy('to', sys_args.odl_artifact,
                              '/tmp/odl-artifact/' + tarball_name)
                    node.execute('rm -rf /opt/opendaylight/*', as_root=True)
                    node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
                    LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
                             % (tarball_name, node.name))
                    node.execute('tar -zxf %s --strip-components=1 -C '
                                 '/opt/opendaylight/'
                                 % (tar_tmp_path + tarball_name), as_root=True)
                    node.execute('chown -R odl:odl /opt/opendaylight',
                                 as_root=True)
                    node.execute('rm -rf ' + tar_tmp_path, as_root=True)
                    LOG.info('Installing and Starting Opendaylight on node %s'
                             % node.name)
                    node.copy('to', 'odl_reinstaller/install_odl.pp',
                              tar_tmp_path)
                    node.execute('puppet apply --modulepath='
                                 '/etc/puppet/modules/ %sinstall_odl.pp '
                                 '--verbose --debug --trace '
                                 '--detailed-exitcodes'
                                 % tar_tmp_path, check_exit_code=[2],
                                 as_root=True)
        # --detailed-exitcodes: Provide extra information about the run via
        # exit codes. If enabled, 'puppet apply' will use the following exit
        # codes:
        # 0: The run succeeded with no changes or failures; the system was
        #    already in the desired state.
        # 1: The run failed.
        # 2: The run succeeded, and some resources were changed.
        # 4: The run succeeded, and some resources failed.
        # 6: The run succeeded, and included both changes and failures.

        for node in self.nodes:
            LOG.info('Connecting OpenVSwitch to controller on node %s'
                     % node.name)
            ovs_controller = node.config.get('ovs-controller')
            if ovs_controller:
                node.execute('ovs-vsctl set-controller br-int %s'
                             % ovs_controller, as_root=True)
Esempio n. 3
0
    def run(self, sys_args, config):
        pod_config = sys_args.pod_config
        odl_artifact = sys_args.odl_artifact
        node_config = utils_yaml.read_dict_from_yaml(pod_config)
        # TODO Add validation of incoming node config
        # self.check_node_config()

        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            node.execute('ovs-vsctl del-controller br-int', as_root=True)
        for node in self.nodes:
            # Check if ODL runs on this node
            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
                                 as_root=True, check_exit_code=[0, 1])
            if 'java' in rv:
                self.odl_node = node
                LOG.info("ODL node found: {}".format(self.odl_node.name))
                node.execute('systemctl stop opendaylight', as_root=True)

            self.disconnect_ovs(node)

        # Upgrade ODL
        self.reinstall_odl(self.odl_node, odl_artifact)

        # Wait for ODL to come back up
        full_netvirt_url = "http://{}:8081/{}".format(
            self.odl_node.config['address'], self.netvirt_url)
        counter = 1
        while counter <= 10:
            try:
                self.odl_node.execute("curl --fail -u admin:admin {}".format(
                    full_netvirt_url))
                LOG.info("New OpenDaylight NetVirt is Up")
                break
            except processutils.ProcessExecutionError:
                LOG.warning("NetVirt not up. Attempt: {}".format(counter))
                if counter >= 10:
                    LOG.warning("NetVirt not detected as up after 10 "
                                "attempts...deployment may be unstable!")
            counter += 1
            time.sleep(10)

        # Reconnect OVS instances
        LOG.info("Reconnecting OVS instances")
        for node in self.nodes:
            self.connect_ovs(node)
        # Sleep for a few seconds to allow TCP connections to come up
        time.sleep(5)
        # Validate OVS instances
        LOG.info("Validating OVS configuration")
        for node in self.nodes:
            self.validate_ovs(node)
        LOG.info("OpenDaylight Upgrade Successful!")
Esempio n. 4
0
 def run(self, sys_args, config):
     dest_dir = sys_args.dest_dir if sys_args.dest_dir[:-1] == '/'\
         else (sys_args.dest_dir + '/')
     self.node_manager = NodeManager()
     underlcloud = self.node_manager.add_node(
         'undercloud', self.undercloud_dict(sys_args.undercloud_ip))
     # copy all files to undercloud
     underlcloud.copy('to', '.', self.undercloud_root_dir)
     # generate the undercloud yaml
     underlcloud.execute(
         'cd %s; ./tripleo_manager.sh --out ./cloner-info/' %
         self.undercloud_root_dir,
         log_true=True)
     underlcloud.copy('from', dest_dir,
                      self.undercloud_root_dir + '/cloner-info/')
     node_yaml_path = dest_dir + '/cloner-info/' + CONFIG.NODE_YAML_PATH
     node_yaml = utils_yaml.read_dict_from_yaml(node_yaml_path)
     for name, node in node_yaml['servers'].iteritems():
         node['vNode-name'] = self.get_virtual_node_name_from_mac(
             node['orig-ctl-mac'])
     utils_yaml.write_dict_to_yaml(node_yaml, node_yaml_path)
Esempio n. 5
0
class DeploymentCloner(Service):

    undercloud_root_dir = '~/DeploymentCloner/'

    def create_cli_parser(self, parser):
        parser.add_argument('--undercloud-ip',
                            help="ip of undercloud",
                            required=True)
        parser.add_argument('--dest-dir',
                            help="where everything should go to",
                            required=True)
        return parser

    def undercloud_dict(self, undercloud_ip):
        return {'address': undercloud_ip, 'user': '******'}

    def run(self, sys_args, config):
        dest_dir = sys_args.dest_dir if sys_args.dest_dir[:-1] == '/'\
            else (sys_args.dest_dir + '/')
        self.node_manager = NodeManager()
        underlcloud = self.node_manager.add_node(
            'undercloud', self.undercloud_dict(sys_args.undercloud_ip))
        # copy all files to undercloud
        underlcloud.copy('to', '.', self.undercloud_root_dir)
        # generate the undercloud yaml
        underlcloud.execute(
            'cd %s; ./tripleo_manager.sh --out ./cloner-info/' %
            self.undercloud_root_dir,
            log_true=True)
        underlcloud.copy('from', dest_dir,
                         self.undercloud_root_dir + '/cloner-info/')
        node_yaml_path = dest_dir + '/cloner-info/' + CONFIG.NODE_YAML_PATH
        node_yaml = utils_yaml.read_dict_from_yaml(node_yaml_path)
        for name, node in node_yaml['servers'].iteritems():
            node['vNode-name'] = self.get_virtual_node_name_from_mac(
                node['orig-ctl-mac'])
        utils_yaml.write_dict_to_yaml(node_yaml, node_yaml_path)
        # TODO copy qcow and tar it

    def get_virtual_node_name_from_mac(self, mac):
        vNode_names, _ = execute('virsh list|awk \'{print $2}\'', shell=True)
        for node in vNode_names.split('\n'):
            if 'baremetal' in node:
                admin_net_mac, _ = execute(
                    'virsh domiflist %s |grep admin |awk \'{print $5}\'' %
                    node,
                    shell=True)
                if admin_net_mac.replace('\n', '') == mac:
                    return node
        raise Exception(
            'Could not find corresponding virtual node for MAC: %s' % mac)
Esempio n. 6
0
    def run(self, sys_args, config):
        self.env = str(sys_args.env_number).replace('"', '')
        self.BUILD_DIR = '../build/apex-%s' % self.env
        self.cleanup()
        if sys_args.cleanup:
            return
        if not sys_args.cloner_info or not sys_args.snapshot_disks\
                or not sys_args.vjump_hosts:
            LOG.error('--cloner-info, --snapshot-disks and --vjump-hosts '
                      ' have to be given if not  only --cleanup.')
            exit(1)
        node_info = utils_yaml.read_dict_from_yaml(sys_args.cloner_info +
                                                   '/node.yaml')
        nodes = node_info['servers']
        number_of_nodes = len(nodes)
        disk_home = self.BUILD_DIR + '/disks/'
        shutil.mkdir_if_not_exsist(disk_home)
        # Create Snapshots
        for i in range(number_of_nodes):
            disk_name = '%s%s.qcow2' % (self.NODE_NAME, i)
            self.create_snapshot(
                '%s/%s' % (sys_args.snapshot_disks, disk_name),
                '%s/%s' % (disk_home, disk_name))

        # Create Bridges if not existing
        for net in self.BRIGES:
            bridge_name = '%s-%s' % (net, self.env)
            if not self.check_if_br_exists(bridge_name):
                LOG.info('Creating bridge %s' % bridge_name)
                execute('ovs-vsctl add-br %s' % bridge_name, as_root=True)

        # Create virtual Nodes
        dom_template = self.TEMPLATES + '/nodes/baremetalX.xml'
        dom_config = self.BUILD_DIR + '/nodes/baremetalX.xml'
        shutil.mkdir_if_not_exsist(self.BUILD_DIR + '/nodes/')
        LOG.info('Creating virtual Nodes')
        for name, node in nodes.iteritems():
            orig_node_name = node['vNode-name']
            node_name = orig_node_name + '-' + self.env
            LOG.info('Create node %s' % node_name)
            type = node['type']
            if type == 'compute':
                cpu = 4
                mem = 4
            elif type == 'controller':
                cpu = 8
                mem = 10
            else:
                raise Exception('Unknown node type! %s' % type)
            shutil.copy('to', dom_template, dom_config)
            shutil.replace_string_in_file(dom_config, 'NaMe', node_name)
            disk_full_path = os.path.abspath('%s/%s.qcow2' %
                                             (disk_home, orig_node_name))
            shutil.replace_string_in_file(dom_config, 'DiSk', disk_full_path)
            shutil.replace_string_in_file(dom_config, 'vCpU', str(cpu))
            shutil.replace_string_in_file(dom_config, 'MeMoRy', str(mem))
            shutil.replace_string_in_file(dom_config, 'InDeX', self.env)

            execute('virsh define ' + dom_config)
            execute('virsh start ' + node_name)

            cores_per_environment = 8
            cores = '%s-%s' % (int(self.env) * 8,
                               int(self.env) * 8 + cores_per_environment - 1)
            LOG.info('Pining vCPU of node %s to cores %s' % (node_name, cores))
            for i in range(cpu):
                execute('virsh vcpupin %(node)s %(nodes_cpu)s %(host_cpu)s' % {
                    'node': node_name,
                    'nodes_cpu': i,
                    'host_cpu': cores
                })

        # Upload cloner_info to jenkins slave
        node_name = 'jenkins%s' % self.env
        jenkins_node_config = utils_yaml.read_dict_from_yaml(
            sys_args.vjump_hosts)['servers']
        if node_name not in jenkins_node_config:
            raise Exception('Jenkins host %s not provided in %s' %
                            (node_name, sys_args.vjump_hosts))
        jenkins_slave = NodeManager(jenkins_node_config).get_node(node_name)
        if 'CLONER_INFO' in os.environ:
            cloner_info_path = os.environ['CLONER_INFO']
        else:
            cloner_info_path = '/home/jenkins/cloner-info/'
        jenkins_slave.copy('to', sys_args.cloner_info, cloner_info_path)
Esempio n. 7
0
    def run(self, sys_args, config):
        pod_config = sys_args.pod_config
        odl_artifact = sys_args.odl_artifact
        node_config = utils_yaml.read_dict_from_yaml(pod_config)
        # TODO Add validation of incoming node config
        # self.check_node_config()

        # copy ODL to all nodes where it need to be copied
        self.nodes = NodeManager(node_config['servers']).get_nodes()
        for node in self.nodes:
            node.execute('ovs-vsctl del-controller br-int', as_root=True)
        first_controller = None
        for node in self.nodes:
            if not first_controller:
                if 'controller' in node.execute('echo $HOSTNAME')[0]:
                    first_controller = node
            # Check if ODL runs on this node
            jrv, _ = node.execute('ps aux |grep -v grep |grep karaf',
                                  as_root=True, check_exit_code=[0, 1])
            rv, (_, rc) = node.execute('docker ps | grep opendaylight_api',
                                       as_root=True, check_exit_code=[0, 1])
            if rc == 0:
                LOG.info("ODL is running as docker container")
                node.execute('docker stop opendaylight_api', as_root=True)
                self.odl_node = node
            elif 'java' in jrv:
                LOG.info("ODL is running as systemd service")
                self.odl_node = node
                node.execute('systemctl stop opendaylight', as_root=True)

            if self.odl_node is not None:
                LOG.info("ODL node found: {}".format(self.odl_node.name))
                # rc 5 means the service is not there.
                # rc 4 means the service cannot be found
                node.execute('systemctl stop bgpd', as_root=True,
                             check_exit_code=[0, 4, 5])
                node.execute('systemctl stop zrpcd', as_root=True,
                             check_exit_code=[0, 4, 5])

            self.disconnect_ovs(node)

        # Upgrade ODL
        if not self.odl_node:
            self.odl_node = first_controller
        self.reinstall_odl(self.odl_node, odl_artifact)

        # Wait for ODL to come back up
        full_netvirt_url = "http://{}:8081/diagstatus".format(
            self.odl_node.config['address'])
        counter = 1
        while counter <= 10:
            try:
                self.odl_node.execute("curl --fail {}".format(
                    full_netvirt_url))
                LOG.info("New OpenDaylight NetVirt is Up")
                break
            except processutils.ProcessExecutionError:
                LOG.warning("NetVirt not up. Attempt: {}".format(counter))
                if counter >= 10:
                    LOG.warning("NetVirt not detected as up after 10 "
                                "attempts...deployment may be unstable!")
            counter += 1
            time.sleep(15)

        # Reconnect OVS instances
        LOG.info("Reconnecting OVS instances")
        for node in self.nodes:
            self.connect_ovs(node)
        # Sleep for a few seconds to allow TCP connections to come up
        time.sleep(5)
        # Validate OVS instances
        LOG.info("Validating OVS configuration")
        for node in self.nodes:
            self.validate_ovs(node)
        LOG.info("OpenDaylight Upgrade Successful!")