Пример #1
0
def enable_nat_network():
    """Enable NAT network for VMs

    This function creates a NAT network to allow VM external connection
    needed for download docker images.

    """

    nat_xml = os.path.join(THIS_PATH, 'nat-network.xml')
    nat_net_name = 'stx-nat'
    bash.run_command('sudo virsh net-define {}'.format(nat_xml))
    bash.run_command('sudo virsh net-start {}'.format(nat_net_name))
Пример #2
0
def isfile(path, sudo=True):
    """Validates if a file exist in a host.

    :param path: the absolute path of the file to be validated
    :param sudo: this needs to be set to True for files that require
    root permission
    :return: True if the file exists, False otherwise
    """
    status, _ = bash.run_command('{prefix}test -f {path}'.format(
        path=path, prefix='sudo ' if sudo else ''))
    exist = True if not status else False
    return exist
Пример #3
0
def setup_controller_computes(iso_file, configurations):
    # define the module's variables
    libvirt_images_path = '/var/lib/libvirt/images'
    default_xml = '/etc/libvirt/some_foldernetworks/autostart/default.xml'
    conf_file = '/etc/libvirt/configuration_file.conf'

    if os.path.isfile(default_xml):
        # deleting default libvirt networks configuration
        bash.run_command('sudo rm -rff {}'.format(default_xml),
                         raise_exception=True)

    parameters = ['user = "******"', 'group = "root"']

    for param in parameters:
        status, output = bash.run_command(
            "sudo cat {0} | grep -w '^{1}'".format(conf_file, param))
        if status:
            # this mean that the param is not in conf_file
            bash.run_command(
                "echo '{0}' | sudo tee -a {1}".format(param, conf_file),
                raise_exception=True)

    # ===================================
    # configuring the network interfaces
    # ===================================
    network.delete_network_interfaces()
    network.configure_network_interfaces()

    if os.path.exists(os.path.join(THIS_PATH, 'vms')):
        rmtree(os.path.join(THIS_PATH, 'vms'))

    os.mkdir(os.path.join(THIS_PATH, 'vms'))

    # checking how many configurations the yaml file has
    configurations_keys = configurations.keys()
    regex = re.compile('configuration_.')
    total_configurations = list(filter(regex.match, configurations_keys))

    # ----------------------------------------------------------
    # iterating over the total configurations setup in yaml file
    # ----------------------------------------------------------

    for configuration in range(0, len(total_configurations)):
        # iterating over the configurations
        current_controller_name = 'controller-{}'.format(configuration)
        # controller will return NoneType if either key1 or key2 does
        # not exists
        controller = configurations.get(
            'configuration_{}'.format(configuration), {}).get(
                'controller-{}'.format(configuration), {})
        controller_partition_a = int(controller.get(
            'controller_{}_partition_a'.format(configuration)))
        controller_partition_b = int(controller.get(
            'controller_{}_partition_b'.format(configuration)))
        controller_memory = int(controller.get(
            'controller_{}_memory_size'.format(configuration)))
        controller_system_cores = int(controller.get(
            'controller_{}_system_cores'.format(configuration)))

        # checking if the current controller exists in order to delete it
        output, command = bash.run_command('sudo virsh domstate {}'.format(
            current_controller_name))

        if not output:
            LOG.info('{}: is running, shutting down and destroy it...'.format(
                current_controller_name))
            bash.run_command('sudo virsh destroy {} > /dev/null 2>&1'.format(
                current_controller_name))
            bash.run_command('sudo virsh undefine {} > /dev/null 2>&1'.format(
                current_controller_name), raise_exception=True)

        # deleting both controller's partitions from the system (if any)
        LOG.info('deleting: {0}/{1}-0.img'.format(
            libvirt_images_path, current_controller_name))
        bash.run_command('sudo rm -rf {0}/{1}-0.img'.format(
            libvirt_images_path, current_controller_name),
                         raise_exception=True)
        LOG.info('deleting: {0}/{1}-1.img'.format(
            libvirt_images_path, current_controller_name))
        bash.run_command('sudo rm -rf {0}/{1}-1.img'.format(
            libvirt_images_path, current_controller_name),
                         raise_exception=True)

        # creating both controller's partitions in the system
        bash.run_command(
            'sudo te-img create -f qcow2 {0}/{1}-0.img {2}G'.format(
                libvirt_images_path, current_controller_name,
                controller_partition_a), raise_exception=True)
        bash.run_command(
            'sudo te-img create -f qcow2 {0}/{1}-1.img {2}G'.format(
                libvirt_images_path, current_controller_name,
                controller_partition_b), raise_exception=True)

        # Only controller-0 needs to have the ISO file in order to boot the
        # subsequent controllers
        check_controller = False if configuration else True

        if check_controller:
            # this mean that is the controller-0
            bash.run_command(
                'sed -e "s,NAME,{0}," '
                '-e "s,ISO,{1}," '
                '-e "s,UNIT,MiB," '
                '-e "s,MEMORY,{2}," '
                '-e "s,CORES,{3}," '
                '-e "s,DISK0,{4}/{0}-0.img," '
                '-e "s,DISK1,{4}/{0}-1.img," '
                '-e "s,destroy,restart," {5}/master_controller.xml > '
                '{5}/vms/{0}.xml'.format(
                    current_controller_name, iso_file, controller_memory,
                    controller_system_cores, libvirt_images_path, THIS_PATH),
                raise_exception=True)
        else:
            # this mean that is the controller-N
            # modifying xml parameters for the current controller
            bash.run_command(
                'sed -e "s,NAME,{0}," '
                '-e "s,UNIT,MiB," '
                '-e "s,MEMORY,{1}," '
                '-e "s,CORES,{2}," '
                '-e "s,DISK0,{3}/{0}-0.img," '
                '-e "s,DISK1,{3}/{0}-1.img," '
                '-e "s,destroy,restart," {4}/slave_controller.xml > '
                '{4}/vms/{0}.xml'.format(
                    current_controller_name, controller_memory,
                    controller_system_cores, libvirt_images_path, THIS_PATH),
                raise_exception=True)

        # checking how many computes the current controller has
        compute_keys = configurations.get('configuration_{}'.format(
            configuration), {}).keys()
        regex = re.compile('controller-{0}-compute-.'.format(configuration))
        total_computes = list(filter(regex.match, compute_keys))

        for compute_number in range(0, len(total_computes)):
            current_compute_number = 'controller-{0}-compute-{1}'.format(
                configuration, compute_number)
            # compute will return NoneType if either key1 or key2 does
            # not exists
            compute = configurations.get('configuration_{}'.format(
                configuration), {}).get(
                    'controller-{0}-compute-{1}'.format(
                        configuration, compute_number), {})
            compute_partition_a = int(compute.get(
                'controller_{0}_compute_{1}_partition_a'.format(
                    configuration, compute_number)))
            compute_partition_b = int(compute.get(
                'controller_{0}_compute_{1}_partition_b'.format(
                    configuration, compute_number)))
            compute_memory = int(compute.get(
                'controller_{0}_compute_{1}_memory_size'.format(
                    configuration, compute_number)))
            compute_system_cores = int(compute.get(
                'controller_{0}_compute_{1}_system_cores'.format(
                    configuration, compute_number)))

            # checking if the compute exists
            code, output = bash.run_command('sudo virsh domstate {}'.format(
                current_compute_number))

            if not code and output != 'shut off':
                LOG.info('{}: is running, shutting down and destroy it'.format(
                    current_compute_number))
                bash.run_command('sudo virsh destroy {}'.format(
                    current_compute_number))
                bash.run_command('sudo virsh undefine {}'.format(
                    current_compute_number), raise_exception=True)

            # removing the compute's partitions (if any)
            bash.run_command('sudo rm -rf {0}/{1}.img'.format(
                libvirt_images_path, current_compute_number),
                             raise_exception=True)
            bash.run_command(
                'cp {0}/compute.xml {0}/vms/{1}.xml'.format(
                    THIS_PATH, current_compute_number),
                raise_exception=True)

            # creating both compute's partitions in the system
            # Notes:
            # 1. The partitions to be create are hardcoded in the following
            #    lines
            bash.run_command(
                'sudo te-img create -f qcow2 {0}/{1}-0.img {2}G'.format(
                    libvirt_images_path, current_compute_number,
                    compute_partition_a), raise_exception=True)
            bash.run_command(
                'sudo te-img create -f qcow2 {0}/{1}-1.img {2}G'.format(
                    libvirt_images_path, current_compute_number,
                    compute_partition_b), raise_exception=True)

            # modifying xml compute parameters
            bash.run_command(
                'sed -i -e "s,NAME,{0}," '
                '-e "s,UNIT,MiB," '
                '-e "s,MEMORY,{1}," '
                '-e "s,CORES,{2}," '
                '-e "s,destroy,restart," '
                '-e "s,DISK0,{3}/{0}-0.img," '
                '-e "s,DISK1,{3}/{0}-1.img," '
                '{4}/vms/{0}.xml'.format(
                    current_compute_number, compute_memory,
                    compute_system_cores, libvirt_images_path, THIS_PATH),
                raise_exception=True)

            # creating the computes according to the XML
            # the following command create a domain but it does not start it
            # and makes it non-persistent
            # bash.run_command('sudo virsh create vms/{}.xml'.format(
            #     current_compute_number))

            # creating the computes according to the XML
            # the following command create a domain but it does not start it
            # and makes it persistent even after shutdown
            bash.run_command('sudo virsh define {0}/vms/{1}.xml'.format(
                THIS_PATH, current_compute_number))

        # creating the controller according the XML
        # the following command create a domain but it does not start it and
        # and makes it non-persistent
        # bash.run_command('sudo virsh create vms/{}.xml'.format(
        #   current_controller_name))

        # the following command define a domain and it does not start it
        # and makes it persistent even after shutdown
        bash.run_command('sudo virsh define {0}/vms/{1}.xml'.format(
            THIS_PATH, current_controller_name))

        # starting only the controller-0 which is the one with ISO in the xml
        start_controller = False if configuration else True
        if start_controller:
            # the following command start a domain
            bash.run_command('sudo virsh start {}'.format(
                current_controller_name), raise_exception=True)

    # opening the graphical interface
    if bash.is_process_running('virt-manager'):
        # in order that virt-manager takes the new configurations from the
        # yaml file, is needed to kill it and start again.
        LOG.info('Virtual Machine Manager is active, killing it ...')
        bash.run_command('sudo kill -9 $(pgrep -x virt-manager)',
                         raise_exception=True)

    # opening Virtual Machine Manager
    bash.run_command('sudo virt-manager', raise_exception=True)
    # opening the controller console
    bash.run_command('virt-manager -c te:///system --show-domain-console '
                     'controller-0', raise_exception=True)
    exit_dict_status(0)
Пример #4
0
def setup_controller_computes(iso_file, configurations):
    """Setup the configurations in the host

    This function setup the network and the configurations from yaml in the
    current host.

    :param iso_file: which is the absolute/relative path to the iso file which
    will be setup in the controller(s) node(s).
    :param configurations: which is the object that has all the configurations
        to be setup in the system.
    """
    # define the module's variables
    libvirt_images_path = '/var/lib/libvirt/images'

    # ----------------------------------
    # customize Qemu configuration files
    # ----------------------------------
    utils.qemu_configuration_files()

    # ----------------------------------
    # configuring the network interfaces
    # ----------------------------------
    network.delete_network_interfaces()
    enable_nat_network()
    network.configure_network_interfaces()

    # ------------------------------
    # clean qemu/libvirt environment
    # ------------------------------
    utils.clean_qemu_environment()

    if os.path.exists(os.path.join(THIS_PATH, 'vms')):
        rmtree(os.path.join(THIS_PATH, 'vms'))

    os.mkdir(os.path.join(THIS_PATH, 'vms'))

    # ----------------------------------------------------------
    # iterating over the total configurations setup in yaml file
    # ----------------------------------------------------------

    for controller, values in configurations.get('Controllers').items():
        # iterating over the controllers
        controller_partition_a = int(values.get('partition_a'))
        controller_partition_b = int(values.get('partition_b'))
        controller_partition_d = int(values.get('partition_d'))
        controller_memory = int(values.get('memory_size'))
        controller_system_cores = int(values.get('system_cores'))

        # creating controller's partitions in the system
        bash.run_command(
            'sudo qemu-img create -f qcow2 {0}/{1}-0.img {2}G'.format(
                libvirt_images_path, controller, controller_partition_a),
            raise_exception=True)
        bash.run_command(
            'sudo qemu-img create -f qcow2 {0}/{1}-1.img {2}G'.format(
                libvirt_images_path, controller, controller_partition_b),
            raise_exception=True)
        bash.run_command(
            'sudo qemu-img create -f qcow2 {0}/{1}-2.img {2}G'.format(
                libvirt_images_path, controller, controller_partition_d),
            raise_exception=True)

        # Only controller-0 needs to have the ISO file in order to boot the
        # subsequent controllers
        # heck_controller = False if configuration == 'controller-0' else True

        if controller == 'controller-0':
            bash.run_command(
                'sed -e "s,NAME,{0}," '
                '-e "s,ISO,{1}," '
                '-e "s,UNIT,MiB," '
                '-e "s,MEMORY,{2}," '
                '-e "s,CORES,{3}," '
                '-e "s,DISK0,{4}/{0}-0.img," '
                '-e "s,DISK1,{4}/{0}-1.img," '
                '-e "s,DISK2,{4}/{0}-2.img," '
                '-e "s,destroy,restart," {5}/master_controller.xml > '
                '{5}/vms/{0}.xml'.format(controller, iso_file,
                                         controller_memory,
                                         controller_system_cores,
                                         libvirt_images_path, THIS_PATH),
                raise_exception=True)
        else:
            # this mean that is the controller-N
            # modifying xml parameters for the current controller
            bash.run_command(
                'sed -e "s,NAME,{0}," '
                '-e "s,UNIT,MiB," '
                '-e "s,MEMORY,{1}," '
                '-e "s,CORES,{2}," '
                '-e "s,DISK0,{3}/{0}-0.img," '
                '-e "s,DISK1,{3}/{0}-1.img," '
                '-e "s,DISK2,{3}/{0}-2.img," '
                '-e "s,destroy,restart," {4}/slave_controller.xml > '
                '{4}/vms/{0}.xml'.format(controller, controller_memory,
                                         controller_system_cores,
                                         libvirt_images_path, THIS_PATH),
                raise_exception=True)

        # the following command define a domain and it does not start it and
        # makes it persistent even after shutdown
        bash.run_command('sudo virsh define {0}/vms/{1}.xml'.format(
            THIS_PATH, controller))

        # starting only the controller-0 which is the one with ISO in the xml
        if controller == 'controller-0':
            # the following command start a domain
            bash.run_command('sudo virsh start {}'.format(controller),
                             raise_exception=True)

    if 'Computes' in configurations:
        for compute, values in configurations.get('Computes').items():
            # iterating over the computes
            compute_partition_a = int(values.get('partition_a'))
            compute_partition_b = int(values.get('partition_b'))
            compute_memory = int(values.get('memory_size'))
            compute_system_cores = int(values.get('system_cores'))

            # copy the compute.xml to vms folder
            origin = os.path.join(THIS_PATH, 'compute.xml')
            destination = os.path.join(THIS_PATH, 'vms',
                                       '{}.xml'.format(compute))

            copy(origin, destination)

            # creating both compute's partitions in the system
            bash.run_command(
                'sudo qemu-img create -f qcow2 {0}/{1}-0.img {2}G'.format(
                    libvirt_images_path, compute, compute_partition_a),
                raise_exception=True)
            bash.run_command(
                'sudo qemu-img create -f qcow2 {0}/{1}-1.img {2}G'.format(
                    libvirt_images_path, compute, compute_partition_b),
                raise_exception=True)

            # modifying xml compute parameters
            bash.run_command('sed -i -e "s,NAME,{0}," '
                             '-e "s,UNIT,MiB," '
                             '-e "s,MEMORY,{1}," '
                             '-e "s,CORES,{2}," '
                             '-e "s,destroy,restart," '
                             '-e "s,DISK0,{3}/{0}-0.img," '
                             '-e "s,DISK1,{3}/{0}-1.img," '
                             '{4}/vms/{0}.xml'.format(compute, compute_memory,
                                                      compute_system_cores,
                                                      libvirt_images_path,
                                                      THIS_PATH),
                             raise_exception=True)

            # creating the computes according to the XML, the following command
            # create a domain but it does not start it and makes it persistent
            # even after shutdown
            bash.run_command('sudo virsh define {0}/vms/{1}.xml'.format(
                THIS_PATH, compute))

    if 'Storages' in configurations:
        for storage, values in configurations.get('Storages').items():
            # iterating over the storage
            storage_partition_a = int(values.get('partition_a'))
            storage_partition_b = int(values.get('partition_b'))
            storage_memory = int(values.get('memory_size'))
            storage_system_cores = int(values.get('system_cores'))

            # copy the storage.xml to vms folder
            origin = os.path.join(THIS_PATH, 'storage.xml')
            destination = os.path.join(THIS_PATH, 'vms',
                                       '{}.xml'.format(storage))

            copy(origin, destination)

            # creating both storage's partitions in the system
            bash.run_command(
                'sudo qemu-img create -f qcow2 {0}/{1}-0.img {2}G'.format(
                    libvirt_images_path, storage, storage_partition_a),
                raise_exception=True)
            bash.run_command(
                'sudo qemu-img create -f qcow2 {0}/{1}-1.img {2}G'.format(
                    libvirt_images_path, storage, storage_partition_b),
                raise_exception=True)

            # modifying xml storage parameters
            bash.run_command('sed -i -e "s,NAME,{0}," '
                             '-e "s,UNIT,MiB," '
                             '-e "s,MEMORY,{1}," '
                             '-e "s,CORES,{2}," '
                             '-e "s,destroy,restart," '
                             '-e "s,DISK0,{3}/{0}-0.img," '
                             '-e "s,DISK1,{3}/{0}-1.img," '
                             '{4}/vms/{0}.xml'.format(storage, storage_memory,
                                                      storage_system_cores,
                                                      libvirt_images_path,
                                                      THIS_PATH),
                             raise_exception=True)

            # creating the storage according to the XML, the following command
            # create a domain but it does not start it and makes it persistent
            # even after shutdown
            bash.run_command('sudo virsh define {0}/vms/{1}.xml'.format(
                THIS_PATH, storage))

    # opening the graphical interface
    if bash.is_process_running('virt-manager'):
        # in order that virt-manager takes the new configurations from the yaml
        # file, is needed to kill it and start again.
        LOG.info('Virtual Machine Manager is active, killing it ...')
        bash.run_command('sudo kill -9 $(pgrep -x virt-manager)',
                         raise_exception=True)

    # opening Virtual Machine Manager
    bash.run_command('sudo virt-manager', raise_exception=True)
    # opening the controller console
    bash.run_command(
        'virt-manager -c qemu:///system --show-domain-console '
        'controller-0',
        raise_exception=True)
    exit_dict_status(0)
Пример #5
0
def grub_checker(iso, mode, grub_option, grub_cmd):
    """Check a grub cmd boot line against the ones in the StarlingX ISO file

    This function compare the grub cmd boot line built from get_cmd_boot_line
    function against a StarlingX ISO file in order to check if this is still
    valid.
    Basically check if all the arguments from the ISO contains them in the
    built one from the get_cmd_boot_line function.

    :param iso: the iso to mount.
    :param mode: the mode to check the grub cmd line, this can be vbios/uefi.
    :param grub_option: the boot line to compare which could have the
        following values:
        - 0: Standard Controller Configuration > Serial Console >
             Standard Security Boot Profile.
        - S0: Standard Controller Configuration > Serial Console > Extended
              Security Boot Profile
        - 1: Standard Controller Configuration > Graphical Console >
             Standard Security Boot Profile
        - S1: Standard Controller Configuration > Graphical Console >
              Extended Security Boot Profile
        - 2: All-in-one Controller Configuration > Serial Console >
             Standard Security Boot Profile
        - S2: All-in-one Controller Configuration > Serial Console >
              Extended Security Boot Profile
        - 3: All-in-one Controller Configuration > Graphical Console >
             Standard Security Boot Profile
        - S3 All-in-one Controller Configuration > Graphical Console >
             Extended Security Boot Profile
        - 4: All-in-one (lowlatency) Controller Configuration >
             Serial Console > Standard Security Boot Profile
        - S4: All-in-one (lowlatency) Controller Configuration >
              Serial Console > Extended Security Boot Profile
        - 5: All-in-one (lowlatency) Controller Configuration >
             Graphical Console > Standard Security Boot Profile
        - S5: All-in-one (lowlatency) Controller Configuration >
              Graphical Console > Extended Security Boot Profile
    :param grub_cmd: the cmd line built from get_cmd_boot_line function
    :return
        - match: if the grub_cmd has all the elements from the iso
        - mismatch: if the grub_cmd does not have all the elements from the iso
    """
    allowed_grub_options = [
        '0', 'S0', '1', 'S1', '2', 'S2', '3', 'S3', '4', 'S4', '5', 'S5']

    if grub_option not in allowed_grub_options:
        raise KeyError('grub boot number does not exists')

    mount_point = '/tmp/cdrom'

    if os.path.exists(mount_point) and os.path.ismount(mount_point):
        bash.run_command('sudo umount -l {}'.format(mount_point),
                         raise_exception=True)
    elif not os.path.exists(mount_point):
        os.makedirs(mount_point)

    # mounting the iso file
    bash.run_command('sudo mount -o loop {} {}'.format(iso, mount_point),
                     raise_exception=True)

    if mode == 'vbios':
        grub = '{}/syslinux.cfg'.format(mount_point)
        regex = '-e "label [0-9]" -e "label [A-Z][0-9]" -e append'
        grub_extracted_lines = bash.run_command('grep {} {}'.format(
            regex, grub))
        grub_option_list = grub_extracted_lines[1].decode('utf-8').split('\n')

        key_dict = []
        values_dict = []

        # Filling the lists
        for line in grub_option_list:
            current_line = line.strip()
            if current_line.startswith('label'):
                key_dict.append(current_line.replace('label ', ''))
            elif current_line.startswith('append'):
                values_dict.append(current_line)

        # zipping the list in only one as a list of tuples
        grub_list = zip(key_dict, values_dict)
        grub_dict = dict()

        # creating a dict with the grub entries
        for key, value in grub_list:
            grub_dict[key] = value

        # comparing the grub boot line from the ISO with the one obtained from
        # get_cmd_boot_line function
        iso_boot_line = grub_dict[grub_option].split()

        # removing blacklist elements from iso_boot_line
        blacklist = [
            i for i, word in enumerate(iso_boot_line)
            if word.startswith('console')
        ]

        for index in blacklist:
            del iso_boot_line[index]

        if set(grub_cmd.split()).issuperset(set(iso_boot_line)):
            status = 'match'
        else:
            status = 'mismatch'
            diff = [
                element for element in iso_boot_line
                if element not in grub_cmd.split()]
            LOG.warning('missed params from cmd grub line')
            for element in diff:
                LOG.warning(element)

    elif mode == 'uefi':
        raise NotImplementedError
    else:
        raise IndexError('{}: not allowed'.format(mode))

    # dismount the mount_point
    bash.run_command('sudo umount -l {}'.format(mount_point),
                     raise_exception=True)

    return status