Esempio n. 1
0
class timer(object):
    """
    Timer is a context-manager that prints the time it took to execute a piece of code.
    """
    def __init__(self, identification, force_ms=False):
        """
        Initializes the context
        """
        self.start = None
        self._logger = Logger('extensions')
        self.identification = identification
        self.force_ms = force_ms

    def __enter__(self):
        self.start = time.time()

    def __exit__(self, *args):
        _ = args
        duration = time.time() - self.start
        if duration > 2 and self.force_ms is not True:
            self._logger.debug('{0} took {1:.5f}s'.format(
                self.identification, duration))
        else:
            self._logger.debug('{0} took {1:.5f}ms'.format(
                self.identification, duration * 1000))
Esempio n. 2
0
 def __init__(self, identification, force_ms=False):
     """
     Initializes the context
     """
     self.start = None
     self._logger = Logger('extensions')
     self.identification = identification
     self.force_ms = force_ms
 def __init__(self, name, wait=None):
     """
     Creates a volatile mutex object
     """
     self.name = name
     self._wait = wait
     self._start = 0
     self._logger = Logger('extensions')  # Instantiated by classes inheriting this class
     self._has_lock = False
     self._volatile = self._get_volatile_client()
 def __init__(self, name, wait=None):
     """
     Creates a file mutex object
     """
     self.name = name
     self._has_lock = False
     self._start = 0
     self._logger = Logger('extensions')
     self._handle = open(self.key(), 'w')
     self._wait = wait
     try:
         os.chmod(
             self.key(), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
             | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
     except OSError:
         pass
Esempio n. 5
0
class PackageManagerBase(object):
    """
    Contains all logic related to Debian packages (used in e.g. Debian, Ubuntu)
    """
    __metaclass__ = ABCMeta

    _logger = Logger('extensions')

    def __init__(self, package_info):
        self.package_info = package_info

    @staticmethod
    def get_release_name(client=None):
        # type: (SSHClient) -> str
        """
        Get the release name based on the name of the repository
        :param client: Client on which to check the release name
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :return: Release name
        :rtype: str
        """
        raise NotImplementedError()

    @abstractmethod
    def get_installed_versions(self, client=None, package_names=None):
        # type: (SSHClient, List[str]) -> Dict[str, str]
        """
        Retrieve currently installed versions of the packages provided (or all if none provided)
        :param client: Client on which to check the installed versions
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param package_names: Name of the packages to check
        :type package_names: list
        :return: Package installed versions
        :rtype: dict
        """

    @classmethod
    def get_candidate_versions(cls, client, package_names):
        # type: (SSHClient, List[str]) -> Dict[str, str]
        """
        Retrieve the versions candidate for installation of the packages provided
        :param client: Root client on which to check the candidate versions
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param package_names: Name of the packages to check
        :type package_names: list
        :return: Package candidate versions
        :rtype: dict
        """
        raise NotImplementedError()

    def get_binary_versions(self, client, package_names=None):
        """
        Retrieve the versions for the binaries related to the package_names
        :param client: Root client on which to retrieve the binary versions
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param package_names: Names of the packages
        :type package_names: list
        :return: Binary versions
        :rtype: dict
        """
        if package_names is None:
            package_names = set()
            for names in self.package_info['binaries'].itervalues():
                package_names = package_names.union(names)

        versions = collections.OrderedDict()
        version_commands = self.package_info['version_commands']
        for package_name in sorted(package_names):
            if package_name not in version_commands:
                raise ValueError(
                    'Only the following packages in the OpenvStorage repository have a binary file: "{0}"'
                    .format('", "'.join(sorted(version_commands.keys()))))
            versions[package_name] = LooseVersion(
                client.run(version_commands[package_name],
                           allow_insecure=True))
        return versions

    @abstractmethod
    def install(self, package_name, client):
        # type: (str, SSHClient) -> None
        """
        Install the specified package
        :param package_name: Name of the package to install
        :type package_name: str
        :param client: Root client on which to execute the installation of the package
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :return: None
        """

    @staticmethod
    def update(client):
        # type: (SSHClient) -> None
        """
        Update the package information
        :param client: Root client on which to update the package information
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :return: None
        """
        raise NotImplementedError()

    @staticmethod
    def validate_client(client,
                        msg='Only the "root" user can manage packages'):
        # type: (SSHClient, str) -> None
        """
        Validate if the client can manage packages
        :return: None
        :raises RuntimeError if the client cannot manage any packages
        """
        if client.username != 'root':
            raise RuntimeError(msg)
class ServiceFactory(object):
    """
    Factory class returning specialized classes
    """
    # Singleton holder
    manager = None

    RUN_FILE_DIR = None
    MONITOR_PREFIXES = None
    SERVICE_CONFIG_KEY = None
    CONFIG_TEMPLATE_DIR = None
    DEFAULT_UPDATE_ENTRY = {
        'packages': {},
        'downtime': [],
        'prerequisites': [],
        'services_stop_start': {
            10: [],
            20: []
        },  # Lowest get stopped first and started last
        'services_post_update': {
            10: [],
            20: []
        }
    }  # Lowest get restarted first

    _logger = Logger('extensions-service_factory')

    TYPE_IMPLEMENTATION_MAP = {
        UPSTART: Upstart,
        SYSTEMD: Systemd,
        MOCK: SystemdMock
    }

    @classmethod
    def get_service_type(cls):
        # type: () -> str
        """
        Gets the service manager type
        """
        if os.environ.get('RUNNING_UNITTESTS') == 'True':
            return MOCK

        init_info = check_output(['cat', '/proc/1/comm'])
        if INIT in init_info:
            version_info = check_output(['init', '--version'])
            if UPSTART in version_info:
                return UPSTART
        elif SYSTEMD in init_info:
            return SYSTEMD
        raise EnvironmentError('Unable to determine service management type')

    @classmethod
    def get_manager(cls):
        # type: () -> ServiceAbstract
        """
        Returns a service manager
        """
        if cls.manager is None:
            service_type = cls.get_service_type()
            implementation_class = cls.TYPE_IMPLEMENTATION_MAP.get(
                service_type)
            if implementation_class:
                cls.manager = implementation_class(
                    system=cls._get_system(),
                    logger=cls._get_logger_instance(),
                    configuration=cls._get_configuration(),
                    run_file_dir=cls.RUN_FILE_DIR,
                    monitor_prefixes=cls.MONITOR_PREFIXES,
                    service_config_key=cls.SERVICE_CONFIG_KEY,
                    config_template_dir=cls.CONFIG_TEMPLATE_DIR)

        if cls.manager is None:
            raise RuntimeError('Unknown ServiceManager')
        return cls.manager

    @classmethod
    def _get_system(cls):
        raise NotImplementedError()

    @classmethod
    def _get_configuration(cls):
        raise NotImplementedError()

    @classmethod
    def _get_logger_instance(cls):
        return Logger('extensions-services')

    @classmethod
    def change_service_state(cls, client, name, state, logger=None):
        """
        Starts/stops/restarts a service
        :param client: SSHClient on which to connect and change service state
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param name: Name of the service
        :type name: str
        :param state: State to put the service in
        :type state: str
        :param logger: Logger Object
        :type logger: ovs_extensions.log.logger.Logger
        :return: None
        :rtype: NoneType
        """
        service_manager = cls.get_manager()
        action = None
        status = service_manager.get_service_status(name, client=client)
        if status != 'active' and state in ['start', 'restart']:
            if logger is not None:
                logger.info('{0}: Starting service {1}'.format(
                    client.ip, name))
            service_manager.start_service(name, client=client)
            action = 'Started'
        elif status == 'active' and state == 'stop':
            if logger is not None:
                logger.info('{0}: Stopping service {1}'.format(
                    client.ip, name))
            service_manager.stop_service(name, client=client)
            action = 'Stopped'
        elif status == 'active' and state == 'restart':
            if logger is not None:
                logger.info('{0}: Restarting service {1}'.format(
                    client.ip, name))
            service_manager.restart_service(name, client=client)
            action = 'Restarted'

        if action is None:
            print '  [{0}] {1} already {2}'.format(
                client.ip, name, 'running' if status == 'active' else 'halted')
        else:
            if logger is not None:
                logger.info('{0}: {1} service {2}'.format(
                    client.ip, action, name))
            print '  [{0}] {1} {2}'.format(client.ip, name, action.lower())

    @classmethod
    def wait_for_service(cls, client, name, status, logger, wait=10):
        """
        Wait for service to enter status
        :param client: SSHClient to run commands
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param name: Name of service
        :type name: str
        :param status: 'active' if running, 'inactive' if halted
        :type status: str
        :param logger: Logger object
        :type logger: ovs_extensions.log.logger.Logger
        :param wait: Time to wait for the service to enter the specified state
        :type wait: int
        :return: None
        :rtype: NoneType
        """
        max_wait = 10 if wait <= 10 else wait
        service_manager = cls.get_manager()
        service_status = service_manager.get_service_status(name, client)
        while wait > 0:
            if service_status == status:
                return
            logger.debug('... waiting for service {0}'.format(name))
            wait -= 1
            time.sleep(max_wait - wait)
            service_status = service_manager.get_service_status(name, client)
        raise RuntimeError(
            'Service {0} does not have expected status: Expected: {1} - Actual: {2}'
            .format(name, status, service_status))

    @classmethod
    def get_service_update_versions(cls,
                                    client,
                                    service_name,
                                    binary_versions,
                                    package_name=None):
        """
        Validate whether the service requires a restart, based upon the currently installed binary version
        :param client: Client on which to execute the validation
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param service_name: Name of the service to check
        :type service_name: str
        :param binary_versions: Mapping between the package_names and their available binary version. E.g.: {'arakoon': 1.9.22}
        :type binary_versions: dict
        :param package_name: Name of the package to match for in the service run file (Only applicable if the service depends on multiple packages)
        :type package_name: str
        :return: The services which require a restart
        :rtype: dict
        """
        version_file = '{0}/{1}.version'.format(cls.RUN_FILE_DIR, service_name)
        if not client.file_exists(version_file):
            ServiceFactory._logger.error(
                'No service file found for service {0} in {1} on node with IP {2}'
                .format(service_name, cls.RUN_FILE_DIR, client.ip))
            return

        # Verify whether a restart is required based on the content of the file and binary_versions passed
        for version in client.file_read(version_file).strip().split(';'):
            if not version:
                continue
            pkg_name = version.strip().split('=')[0]
            running_version = version.strip().split('=')[1]
            if (package_name is None
                    or pkg_name == package_name) and running_version:
                if LooseVersion(running_version) < binary_versions[pkg_name]:
                    return {
                        'installed': running_version,
                        'candidate': str(binary_versions[pkg_name])
                    }
                if '-reboot' in running_version:
                    return {
                        'installed': 'service_restart',
                        'candidate': str(binary_versions[pkg_name])
                    }

    @classmethod
    def remove_services_marked_for_removal(cls, client, package_names):
        """
        During update we potentially mark services for removal because they have been updated, replaced, became obsolete, ...
        These services linked to the packages which have been updated need to be removed
        :param client: SSHClient on which to remove the services
        :type client: ovs_extensions.generic.sshclient.SSHClient
        :param package_names: The packages which have been updated and thus need to be checked
        :type package_names: set[str]
        :return: None
        :rtype: NoneType
        """
        for version_file in client.file_list(directory=cls.RUN_FILE_DIR):
            if not version_file.endswith('.remove'):
                continue

            service_manager = cls.get_manager()
            file_name = '{0}/{1}'.format(cls.RUN_FILE_DIR, version_file)
            contents = client.file_read(filename=file_name)
            for part in contents.split(';'):
                if part.split('=')[0] in package_names:
                    service_name = version_file.replace('.remove', '').replace(
                        '.version', '')
                    cls._logger.warning('{0}: Removing service {1}'.format(
                        client.ip, service_name))
                    service_manager.stop_service(name=service_name,
                                                 client=client)
                    service_manager.remove_service(name=service_name,
                                                   client=client)
                    client.file_delete(filenames=[file_name])
                    break
Esempio n. 7
0
class PyrakoonPool(object):
    """
    Pyrakoon pool.
    Keeps a number of Pyrakoon clients queued up to avoid waiting too long on a socket lock of a single instance
    Uses PyrakoonClient as it has retries on master loss
    """

    _logger = Logger('extensions')

    # Frequency with which the pool is populated at startup
    SPAWN_FREQUENCY = 0.1

    def __init__(self,
                 cluster,
                 nodes,
                 pool_size=10,
                 retries=10,
                 retry_back_off_multiplier=2,
                 retry_interval_sec=2):
        # type: (str, Dict[str, Tuple[str, int]], int, int, int, int) -> None
        """
        Initializes the client
        :param cluster: Identifier of the cluster
        :type cluster: str
        :param nodes: Dict with all node sockets. {name of the node: (ip of node, port of node)}
        :type nodes: dict
        :param pool_size: Number of clients to keep in the pool
        :type pool_size: int
        :param retries: Number of retries to do
        :type retries: int
        :param retry_back_off_multiplier: Back off multiplier. Multiplies the retry_interval_sec with this number ** retry
        :type retry_back_off_multiplier: int
        :param retry_interval_sec: Seconds to wait before retrying. Exponentially increases with every retry.
        :type retry_interval_sec: int
        """
        self.pool_size = pool_size
        self._pyrakoon_args = (cluster, nodes, retries,
                               retry_back_off_multiplier, retry_interval_sec)
        self._sequences = {}

        self._lock = BoundedSemaphore(pool_size)

        self._clients = deque()
        for i in xrange(pool_size):
            # No clients as of yet. Decrease the count
            self._lock.acquire()
        for i in xrange(pool_size):
            gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._add_client)

    def _create_new_client(self):
        # type: () -> PyrakoonClient
        """
        Create a new Arakoon client
        Using PyrakoonClient as it has retries on master loss
        :return: The created PyrakoonClient client
        :rtype: PyrakoonClient
        """
        return PyrakoonClient(*self._pyrakoon_args)

    def _add_client(self):
        # type: () -> None
        """
        Add a new client to the pool
        :return: None
        """
        sleep_time = 0.1
        while True:
            client = self._create_new_client()
            if client:
                break
            gevent.sleep(sleep_time)

        self._clients.append(client)
        self._lock.release()

    @contextmanager
    def get_client(self):
        # type: () -> Iterable[PyrakoonClient]
        """
        Get a client from the pool. Used as context manager
        """
        self._lock.acquire()
        # Client should always be present as we acquire the semaphore which would block until the are clients the in the
        # queue but checking if it's None makes the IDE not complain
        client = None
        try:
            client = self._clients.popleft()
            yield client
        # Possible catch exception that require a new client to be spawned.
        # When creating a new client, the semaphore will have to be released when spawning a new one
        # You won't be able to use the finally statement then but will have to rely on try except else
        finally:
            if client:
                self._clients.append(client)
                self._lock.release()
Esempio n. 8
0
class MockedSSHClient(object):
    """
    Class
    """
    _logger = Logger('extensions')
    _file_system = {}
    _run_returns = {}
    _run_recordings = {}

    @staticmethod
    def _clean():
        """
        Clean everything up related to the unittests
        """
        MockedSSHClient._file_system = {}
        MockedSSHClient._run_returns = {}
        MockedSSHClient._run_recordings = {}

    @staticmethod
    def _split_last_part_from(path):
        """
        Split the path in parts divided by '/' and return the last part and the combined parts of the rest
        """
        if not path.startswith('/'):
            raise ValueError('In unittest mode, the paths must be absolute')

        parts = [part for part in path.strip('/').split('/') if part]
        first_part = '/{0}'.format('/'.join(parts[:-1]))
        last_part = parts[-1] if len(parts) > 0 else None
        return first_part, last_part

    @staticmethod
    def traverse_file_system(client, path):
        """
        Traverse the filesystem until 'path' has been reached
        """
        if client.ip not in MockedSSHClient._file_system:
            return None

        parts = [part for part in path.strip('/').split('/') if part]
        pointer = MockedSSHClient._file_system[client.ip]['dirs']
        for index, part in enumerate(parts):
            if part not in pointer:
                return None
            if index == len(parts) - 1:
                return pointer[part]
            pointer = pointer[part]['dirs']
        return MockedSSHClient._file_system[client.ip]

    @staticmethod
    def run(client, command, *args, **kwargs):
        """
        Mocked run method
        """
        if isinstance(command, list):
            original_command = command[:]
            command = ' '.join(command)
        else:
            original_command = command
        MockedSSHClient._logger.debug('Executing: {0}'.format(command))
        if client.ip not in MockedSSHClient._run_recordings:
            MockedSSHClient._run_recordings[client.ip] = []
        MockedSSHClient._run_recordings[client.ip].append(command)
        if command in MockedSSHClient._run_returns.get(client.ip, {}):
            MockedSSHClient._logger.debug('Emulating return value')
            return MockedSSHClient._run_returns[client.ip][command]
        return client.original_function(client, original_command, *args, **kwargs)

    @staticmethod
    def dir_create(client, directories):
        """
        Mocked dir_create method
        """
        if isinstance(directories, basestring):
            directories = [directories]

        for directory in directories:
            if not directory.startswith('/'):
                raise ValueError('In unittest mode, the paths must be absolute')
        if client.ip not in MockedSSHClient._file_system:
            MockedSSHClient._file_system[client.ip] = {'info': {}, 'dirs': {}, 'files': {}}
        for directory in directories:
            parts = [part for part in directory.strip('/').split('/') if part]
            pointer = MockedSSHClient._file_system[client.ip]['dirs']
            for index, part in enumerate(parts):
                if part in pointer:
                    pointer = pointer[part]['dirs']
                else:
                    pointer[part] = {'info': {}, 'dirs': {}, 'files': {}}
                    pointer = pointer[part]['dirs']

    @staticmethod
    def dir_delete(client, directories, follow_symlinks=False):
        """
        Mocked dir_delete method
        """
        _ = follow_symlinks
        if isinstance(directories, basestring):
            directories = [directories]

        for directory in directories:
            first_part, last_part = MockedSSHClient._split_last_part_from(directory)
            pointer = MockedSSHClient.traverse_file_system(client=client,
                                                           path=first_part)
            if pointer is not None:
                if last_part is None:  # Root filesystem
                    MockedSSHClient._file_system[client.ip]['dirs'] = {}
                    MockedSSHClient._file_system[client.ip]['files'] = {}
                else:
                    pointer['dirs'].pop(last_part, None)

    @staticmethod
    def dir_exists(client, directory):
        """
        Mocked dir_exists method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(directory)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or (last_part is not None and last_part not in pointer['dirs']):
            return False
        return True

    @staticmethod
    def dir_chmod(client, directories, mode, recursive=False):
        """
        Mocked dir_chmod method
        """
        if isinstance(directories, basestring):
            directories = [directories]

        for directory in directories:
            first_part, last_part = MockedSSHClient._split_last_part_from(directory)
            pointer = MockedSSHClient.traverse_file_system(client=client,
                                                           path=first_part)
            if pointer is None or (last_part is not None and last_part not in pointer['dirs']):
                raise OSError("No such file or directory: '{0}'".format(directory))

            if last_part is not None:
                pointer = pointer['dirs'][last_part]

            pointer['info']['mode'] = str(mode)
            if recursive is True:
                for sub_dir in pointer['dirs']:
                    MockedSSHClient.dir_chmod(client=client,
                                              directories='/{0}/{1}'.format(directory, sub_dir),
                                              mode=mode,
                                              recursive=True)

    @staticmethod
    def dir_chown(client, directories, user, group, recursive=False):
        """
        Mocked dir_chown method
        """
        if isinstance(directories, basestring):
            directories = [directories]

        for directory in directories:
            first_part, last_part = MockedSSHClient._split_last_part_from(directory)
            pointer = MockedSSHClient.traverse_file_system(client=client,
                                                           path=first_part)
            if pointer is None or (last_part is not None and last_part not in pointer['dirs']):
                raise OSError("No such file or directory: '{0}'".format(directory))

            if last_part is not None:
                pointer = pointer['dirs'][last_part]

            pointer['info']['user'] = str(user)
            pointer['info']['group'] = str(group)
            if recursive is True:
                for sub_dir in pointer['dirs']:
                    MockedSSHClient.dir_chown(client=client,
                                              directories='/{0}/{1}'.format(directory, sub_dir),
                                              user=user,
                                              group=group,
                                              recursive=True)

    @staticmethod
    def dir_list(client, directory):
        """
        Mocked dir_list method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(directory)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or (last_part is not None and last_part not in pointer['dirs']):
            raise OSError("No such file or directory: '{0}'".format(directory))

        if last_part is not None:
            pointer = pointer['dirs'][last_part]
        return pointer['dirs'].keys() + pointer['files'].keys()

    @staticmethod
    def symlink(client, links):
        """
        Mocked symlink method
        """
        _ = client, links
        raise NotImplementedError('Symlink method has not been mocked yet')

    @staticmethod
    def file_create(client, filenames):
        """
        Mocked file_create method
        """
        if isinstance(filenames, basestring):
            filenames = [filenames]

        if client.ip not in MockedSSHClient._file_system:
            MockedSSHClient._file_system[client.ip] = {'info': {}, 'dirs': {}, 'files': {}}
        for file_location in filenames:
            parts = [part for part in file_location.strip('/').split('/') if part]
            pointer = MockedSSHClient._file_system[client.ip]
            for index, part in enumerate(parts):
                if index == len(parts) - 1:
                    pointer['files'][part] = {'contents': ''}
                    break

                pointer = pointer['dirs']
                if part in pointer:
                    pointer = pointer[part]
                else:
                    pointer[part] = {'info': {}, 'dirs': {}, 'files': {}}
                    pointer = pointer[part]

    @staticmethod
    def file_delete(client, filenames):
        """
        Mocked file_delete method
        """
        if client.ip not in MockedSSHClient._file_system:
            return

        if isinstance(filenames, basestring):
            filenames = [filenames]

        for file_location in filenames:
            if not file_location.startswith('/'):
                raise ValueError('In unittest mode, the paths must be absolute')

        for file_location in filenames:
            parts = [part for part in file_location.strip('/').split('/') if part]
            pointer = MockedSSHClient._file_system[client.ip]
            for index, part in enumerate(parts):
                regex = None if '*' not in part else re.compile('^{0}$'.format(part.replace('.', '\.').replace('*', '.*')))
                if index == len(parts) - 1:
                    pointer = pointer['files']
                    if regex is not None:
                        for sub_file in copy.deepcopy(pointer):
                            if regex.match(sub_file):
                                pointer.pop(sub_file)
                    elif part in pointer:
                        pointer.pop(part)
                    break

                pointer = pointer['dirs']
                if regex is not None:
                    for sub_dir in pointer:
                        if regex.match(sub_dir):
                            MockedSSHClient.file_delete(client=client, filenames='/{0}/{1}/{2}'.format('/'.join(parts[:index]), sub_dir, '/'.join(parts[-(len(parts) - index - 1):])))
                if part not in pointer:
                    break
                pointer = pointer[part]

    @staticmethod
    def file_unlink(client, path):
        """
        Mocked file_unlink method
        """
        _ = client, path
        raise NotImplementedError('file_unlink method has not been mocked yet')

    @staticmethod
    def file_read_link(client, path):
        """
        Mocked file_read_link method
        """
        _ = client, path
        raise NotImplementedError('file_read_link method has not been mocked yet')

    @staticmethod
    def file_read(client, filename):
        """
        Mocked file_read method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(filename)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or last_part not in pointer['files']:
            raise OSError("No such file or directory: '{0}'".format(filename))

        return pointer['files'][last_part]['contents']

    @staticmethod
    def file_write(client, filename, contents):
        """
        Mocked file_write method
        """
        if client.ip not in MockedSSHClient._file_system:
            MockedSSHClient._file_system[client.ip] = {'info': {}, 'dirs': {}, 'files': {}}

        if not filename.startswith('/'):
            raise ValueError('In unittest mode, the paths must be absolute')

        parts = [part for part in filename.strip('/').split('/') if part]
        pointer = MockedSSHClient._file_system[client.ip]
        if type(contents) not in [str, unicode, basestring]:
            raise TypeError('expected a string or other character buffer object')

        for index, part in enumerate(parts):
            if index == len(parts) - 1:
                pointer['files'][part] = {'contents': contents}
                return

            pointer = pointer['dirs']
            if part in pointer:
                pointer = pointer[part]
            else:
                pointer[part] = {'info': {}, 'dirs': {}, 'files': {}}
                pointer = pointer[part]

    @staticmethod
    def file_upload(client, remote_filename, local_filename):
        """
        Mocked file_upload method
        """
        _ = client, remote_filename, local_filename
        raise NotImplementedError('file_upload method has not been mocked yet')

    @staticmethod
    def file_exists(client, filename):
        """
        Mocked file_exists method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(filename)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or last_part not in pointer['files']:
            return False
        return True

    @staticmethod
    def file_chmod(client, filename, mode):
        """
        Mocked file_chmod method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(filename)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or last_part not in pointer['files']:
            raise CalledProcessError(1, 'chmod {0} {1}'.format(str(mode), filename))

        pointer['files'][last_part]['mode'] = str(mode)

    @staticmethod
    def file_chown(client, filenames, user, group):
        """
        Mocked file_chown method
        """
        if isinstance(filenames, basestring):
            filenames = [filenames]

        for filename in filenames:
            first_part, last_part = MockedSSHClient._split_last_part_from(filename)
            pointer = MockedSSHClient.traverse_file_system(client=client,
                                                           path=first_part)
            if pointer is None or last_part not in pointer['files']:
                continue

            pointer['files'][last_part]['user'] = str(user)
            pointer['files'][last_part]['group'] = str(group)

    @staticmethod
    def file_list(client, directory, abs_path=False, recursive=False):
        """
        Mocked file_list method
        """
        first_part, last_part = MockedSSHClient._split_last_part_from(directory)
        pointer = MockedSSHClient.traverse_file_system(client=client,
                                                       path=first_part)
        if pointer is None or (last_part is not None and last_part not in pointer['dirs']):
            return []

        all_files = []
        if last_part is not None:
            pointer = pointer['dirs'][last_part]
        if abs_path is True:
            directory = directory.rstrip('/')
            all_files.extend(['{0}/{1}'.format(directory, file_name) for file_name in pointer['files']])
        else:
            all_files.extend(pointer['files'].keys())

        if recursive is True:
            for sub_dir in pointer['dirs']:
                all_files.extend(MockedSSHClient.file_list(client=client, directory='{0}/{1}'.format(directory, sub_dir), abs_path=abs_path, recursive=True))
        return all_files

    @staticmethod
    def file_move(client, source_file_name, destination_file_name):
        """
        Mocked file_move method
        """
        raise NotImplementedError()

    @staticmethod
    def path_exists(client, file_path):
        """
        Mocked path_exists method
        """
        raise NotImplementedError()
class file_mutex(object):
    """
    This is mutex backed on the filesystem. It's cross thread and cross process. However
    its limited to the boundaries of a filesystem
    """
    def __init__(self, name, wait=None):
        """
        Creates a file mutex object
        """
        self.name = name
        self._has_lock = False
        self._start = 0
        self._logger = Logger('extensions')
        self._handle = open(self.key(), 'w')
        self._wait = wait
        try:
            os.chmod(
                self.key(), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
                | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
        except OSError:
            pass

    def __call__(self, wait):
        self._wait = wait
        return self

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, *args, **kwargs):
        _ = args, kwargs
        self.release()

    def acquire(self, wait=None):
        """
        Acquire a lock on the mutex, optionally given a maximum wait timeout
        :param wait: Time to wait for lock
        """
        if self._has_lock:
            return True
        self._start = time.time()
        if wait is None:
            wait = self._wait
        passed = 0
        if wait is None:
            fcntl.flock(self._handle, fcntl.LOCK_EX)
            passed = time.time() - self._start
        else:
            while True:
                passed = time.time() - self._start
                if passed > wait:
                    self._logger.error(
                        'Lock for {0} could not be acquired. {1} sec > {2} sec'
                        .format(self.key(), passed, wait))
                    raise NoLockAvailableException(
                        'Could not acquire lock %s' % self.key())
                try:
                    fcntl.flock(self._handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    break
                except IOError:
                    time.sleep(0.005)
        if passed > 1:  # More than 1 s is a long time to wait!
            self._logger.warning('Waited {0} sec for lock {1}'.format(
                passed, self.key()))
        self._start = time.time()
        self._has_lock = True
        return True

    def release(self):
        """
        Releases the lock
        """
        if self._has_lock:
            fcntl.flock(self._handle, fcntl.LOCK_UN)
            passed = time.time() - self._start
            if passed > 2.5:  # More than 2.5 s is a long time to hold a lock
                self._logger.warning(
                    'A lock on {0} was kept for {1} sec'.format(
                        self.key(), passed))
            self._has_lock = False

    def key(self):
        """
        Lock key
        """
        if '/' in self.name:
            return self.name  # Assuming a path
        return '/var/lock/ovs_flock_{0}'.format(self.name)

    def __del__(self):
        """
        __del__ hook, releasing the lock
        """
        self.release()
        self._handle.close()
Esempio n. 10
0
class VDiskRebalancer(object):

    _volume_potentials = {}
    logger = Logger('vdisk_rebalance')

    @classmethod
    def print_balances(cls, balances):
        # type: (List[VDiskBalance]) -> None
        """
        Prints out balances
        :return: None
        :rtype: NoneType
        """
        balances_by_vpool = {}
        for balance in balances:  # type: VDiskBalance
            vpool = balance.storagedriver.vpool
            if vpool not in balances_by_vpool:
                balances_by_vpool[vpool] = []
            balances_by_vpool[vpool].append(balance)
        for vpool, vpool_balances in balances_by_vpool.viewitems():
            print('Balance for VPool {0}'.format(vpool.name))
            for balance in vpool_balances:  # type: VDiskBalance
                storagerouter = balance.storagedriver.storagerouter
                print(
                    ' Storagerouter {0}, vdisks now: {1}, vdisks afterwards {2}, added {3}'
                    .format(storagerouter.name, len(balance.hosted_guids),
                            len(balance.balance), len(balance.added)))
                if balance.added:
                    added_source_overview = {}
                    for vdisk_guid in balance.added:
                        current_storagerouter = StorageRouter(
                            VDisk(vdisk_guid).storagerouter_guid)
                        if current_storagerouter not in added_source_overview:
                            added_source_overview[current_storagerouter] = []
                        added_source_overview[current_storagerouter].append(
                            vdisk_guid)
                    print('  Vdisks added from:')
                    for current_storagerouter, moved_vdisk_guids in added_source_overview.iteritems(
                    ):
                        print('    StorageRouter {0}: {1}'.format(
                            current_storagerouter.name,
                            len(moved_vdisk_guids)))

    @classmethod
    def get_rebalanced_layout(cls,
                              vpool_guid,
                              excluded_storagerouters=None,
                              ignore_domains=False,
                              evacuate_storagerouters=None,
                              base_on_volume_potential=True):
        # type: (str, Optional[List[str]], Optional[bool], Optional[List[str]], Optional[bool]) -> List[VDiskBalance]
        """
        Retrieve the layout of how to optimal spread would look like
        :param evacuate_storagerouters: Migrate all vdisks from this hosts
        :type evacuate_storagerouters: List[str]
        :param vpool_guid: Guid of the VPool to rebalance
        :type vpool_guid: str
        :param excluded_storagerouters: Guids of StorageRouters to avoid
        :type excluded_storagerouters: List[str]
        :param ignore_domains: Ignore the domains (rebalance across everything)
        :type ignore_domains: bool
        :param base_on_volume_potential: Base the movement of the volume potential instead of a linear distribution
        :type base_on_volume_potential: bool
        :return: List of balances
        :rtype: List[VDiskBalance]
        """
        if evacuate_storagerouters is None:
            evacuate_storagerouters = []
        if excluded_storagerouters is None:
            excluded_storagerouters = []

        vpool = VPool(vpool_guid)
        if ignore_domains:
            return cls._get_rebalances_layout(vpool, excluded_storagerouters,
                                              evacuate_storagerouters,
                                              base_on_volume_potential)
        return cls._get_rebalanced_layout_by_domain(vpool,
                                                    excluded_storagerouters,
                                                    evacuate_storagerouters,
                                                    base_on_volume_potential)

    @classmethod
    def get_volume_potentials(cls, storagedrivers, cache=True):
        potentials = {}
        for storagedriver in storagedrivers:
            if cache:
                potential = cls._volume_potentials.get(storagedriver, -1)
                if potential == -1:
                    potential = storagedriver.vpool.storagedriver_client.volume_potential(
                        str(storagedriver.storagedriver_id))
                    cls._volume_potentials[storagedriver] = potential
            else:
                potential = storagedriver.vpool.storagedriver_client.volume_potential(
                    str(storagedriver.storagedriver_id))
            potentials[storagedriver] = potential
        return potentials

    @classmethod
    def _get_rebalances_layout(cls, vpool, excluded_storagerouters,
                               evacuate_storagerouters,
                               base_on_volume_potential):
        # type: (VPool, List[str], List[str], bool) -> List[VDiskBalance]
        """
        Rebalance volumes and stay without domains
        :param vpool: VPool to rebalance
        :type vpool: VPool
        :param excluded_storagerouters: Guids of StorageRouters to avoid
        :type excluded_storagerouters: List[str]
        :param evacuate_storagerouters: Migrate all vdisks from this hosts
        :type evacuate_storagerouters: List[str]
        :param base_on_volume_potential: Base the limit calculation of the volume potential ratio
        :type base_on_volume_potential: bool
        :return: List of balances
        :rtype: List[VDiskBalance]
        """
        storagerouters_to_avoid = set(
            itertools.chain(excluded_storagerouters, evacuate_storagerouters))
        destination_storagedrivers = [
            std for std in vpool.storagedrivers
            if std.storagerouter_guid not in storagerouters_to_avoid
        ]
        destination_storagedrivers_by_ip = dict(
            (storagedriver.storagerouter.ip, storagedriver)
            for storagedriver in destination_storagedrivers)

        volume_potentials = {}
        if base_on_volume_potential:
            volume_potentials = cls.get_volume_potentials(
                destination_storagedrivers)
            total_potential = sum(p for p in volume_potentials.itervalues())
            vdisks_within_destination_storagedrivers = list(
                itertools.chain(*(sd.vdisks_guids
                                  for sd in destination_storagedrivers)))
            volume_total_capacity = total_potential + len(
                vdisks_within_destination_storagedrivers)

        # Default limit. Simple distribution
        storagedriver_vdisk_limit = int(
            ceil(len(vpool.vdisks_guids) / len(destination_storagedrivers)))
        balances = {}
        overflow = []
        for storagedriver in vpool.storagedrivers:
            if base_on_volume_potential:
                # Use the ratio between volume potential max and current to distribute
                volume_potential = volume_potentials[storagedriver]
                storagedriver_vdisk_limit = int(
                    ceil(
                        len(vpool.vdisks_guids) *
                        (volume_potential + len(storagedriver.vdisks_guids)) /
                        volume_total_capacity))

            limit = 0 if storagedriver.storagerouter_guid in evacuate_storagerouters else storagedriver_vdisk_limit
            balance = VDiskBalance(storagedriver, limit)
            overflow.extend(balance.overflow)
            balances[storagedriver] = balance
        # Attempt to move to current mds hosts
        for vdisk_guid in overflow:
            vdisk = VDisk(vdisk_guid)
            # If only set was ordered :D
            preferred_destinations = [
                destination_storagedrivers_by_ip[mds_entry['ip']]
                for mds_entry in vdisk.info['metadata_backend_config']
                if mds_entry['ip'] in destination_storagedrivers_by_ip
            ]
            # Try to fill in these storagedriver first
            destinations = preferred_destinations + [
                storagedriver for storagedriver in destination_storagedrivers
                if storagedriver not in preferred_destinations
            ]
            added = False
            for storagedriver in destinations:
                balance = balances[storagedriver]
                added = cls.add_to_balance(vdisk_guid, balance)
                if added:
                    try:
                        index = preferred_destinations.index(storagedriver)
                        mds_type = 'master' if index == 0 else 'slave'
                        cls.logger.info(
                            'Appointing {0} to {1} (index {2})'.format(
                                vdisk_guid, mds_type, index))
                    except ValueError:
                        # Index query didn't find the storagedriver
                        cls.logger.info('Appointing to non-mds host')
                    break
            if not added:
                raise NotImplementedError(
                    'Vdisk couldnt be added to any destination. Might be faulty implementation here'
                )
        return balances.values()

    @classmethod
    def _get_rebalanced_layout_by_domain(cls, vpool, excluded_storagerouters,
                                         evacuate_storagerouters,
                                         base_on_volume_potential):
        # type: (VPool, List[str], List[str], bool) -> List[VDiskBalance]
        """
        Rebalance volumes and stay within the primary domain
        :param vpool: VPool to rebalance
        :type vpool: VPool
        :param excluded_storagerouters: Guids of StorageRouters to avoid
        :type excluded_storagerouters: List[str]
        :param evacuate_storagerouters: Migrate all vdisks from this hosts
        :type evacuate_storagerouters: List[str]
        :param base_on_volume_potential: Base the limit calculation of the volume potential ratio
        :type base_on_volume_potential: bool
        :return: List of balances
        :rtype: List[VDiskBalance]
        """
        # Calculate balance cap for every storagedriver
        # Every storagedriver can share disks between other storagedriver within the same primary domain
        # Certain storagedrivers add their disks to the pool but can't take disks themselves
        balances = {}
        storagedriver_limits = {}
        storagedriver_domain_relation = {}
        for storagedriver in vpool.storagedrivers:
            cls.logger.info('Calculating the limit for {} in VPool {}'.format(
                storagedriver.storagerouter.name, vpool.name))
            # Create the disk pool for the current storagedriver in the domain
            storagedrivers_in_domain = cls.get_storagedrivers_in_same_primary_domain_as_storagedriver(
                storagedriver, excluded_storagerouters)
            cls.logger.info('{} shares primary domains with {}'.format(
                storagedriver.storagerouter.name,
                ', '.join(d.storagerouter.name
                          for d in storagedrivers_in_domain)))
            storagedriver_domain_relation[
                storagedriver] = storagedrivers_in_domain
            vdisks_within_domain = []
            for storagedriver_in_domain in storagedrivers_in_domain:
                vdisks_within_domain.extend(
                    storagedriver_in_domain.vdisks_guids)
            cls.logger.info(
                'VDisks within the primary domain of {}: {}'.format(
                    storagedriver.storagerouter.name,
                    len(vdisks_within_domain)))
            # Think about the disk distribution
            if storagedriver.storagerouter_guid in evacuate_storagerouters:
                limit = 0
            else:
                # Remove the evacuations from the limit
                usable_storagedrivers_in_domain = [
                    std for std in storagedrivers_in_domain
                    if std.storagerouter_guid not in evacuate_storagerouters
                ]
                cls.logger.info(
                    'Can move volumes to {} within the primary domain storagedrivers'
                    .format(', '.join(
                        d.storagerouter.name
                        for d in usable_storagedrivers_in_domain)))
                if base_on_volume_potential:
                    volume_potentials = cls.get_volume_potentials(
                        usable_storagedrivers_in_domain)
                    total_potential = sum(
                        p for p in volume_potentials.itervalues())
                    volume_potentials_sr = dict(
                        (storagedriver.storagerouter.name, potential)
                        for storagedriver, potential in
                        volume_potentials.iteritems())
                    cls.logger.info(
                        'Volume potential overview: {}. Total potential: {}'.
                        format(pprint.pformat(volume_potentials_sr),
                               total_potential))
                    # len should be adjusted with evacuates
                    vdisks_within_domain_usable = list(
                        itertools.chain(
                            *(sd.vdisks_guids
                              for sd in usable_storagedrivers_in_domain)))
                    volume_total_capacity = total_potential + len(
                        vdisks_within_domain_usable)
                    if len(vdisks_within_domain) > volume_total_capacity:
                        cls.logger.error(
                            'The total capacity with the usuable storagedrivers in the domain is not large enough. vdisks_within_domain {0} > volume_total_capacity {1}'
                            .format(len(vdisks_within_domain),
                                    volume_total_capacity))
                        raise RuntimeError(
                            'Migration with given params is not possible. Too many vdisks for the usuable storagedrivers within the domain .'
                        )
                    cls.logger.info(
                        'Total capacity within this domain subset is {}'.
                        format(volume_total_capacity))
                    # Use the ratio between volume potential max and current to distribute
                    volume_potential = volume_potentials[storagedriver]
                    volume_ratio = (volume_potential + len(
                        storagedriver.vdisks_guids)) / volume_total_capacity
                    cls.logger.info('{} can take {}% of the volumes'.format(
                        storagedriver.storagerouter.name, volume_ratio * 100))
                    limit = int(ceil(len(vdisks_within_domain) * volume_ratio))
                else:
                    limit = int(
                        ceil(
                            len(vdisks_within_domain) /
                            len(usable_storagedrivers_in_domain)))
            cls.logger.info('Limit imposed for {}: {}'.format(
                storagedriver.storagerouter.name, limit))
            storagedriver_limits[storagedriver] = limit

        for storagedriver in vpool.storagedrivers:
            balance = VDiskBalance(storagedriver,
                                   storagedriver_limits[storagedriver])
            balances[storagedriver] = balance
            cls.logger.info('Balance overview {}'.format(balance))

        for storagedriver in vpool.storagedrivers:
            storagedrivers_in_domain = [
                std for std in storagedriver_domain_relation[storagedriver]
                if std != storagedriver
            ]
            storagedrivers_in_domain_by_ip = dict(
                (storagedriver.storagerouter.ip, storagedriver)
                for storagedriver in storagedrivers_in_domain)
            balance = balances[storagedriver]
            cls.logger.info(
                'Migrating {} vdisks from {} of VPool {}. Limit: {}, hosting {}'
                .format(len(balance.overflow),
                        storagedriver.storagerouter.name, vpool.name,
                        balance.limit, len(balance.hosted_guids)))
            for vdisk_guid in balance.overflow:
                vdisk = VDisk(vdisk_guid)
                preferred_destinations = [
                    storagedrivers_in_domain_by_ip[mds_entry['ip']]
                    for mds_entry in vdisk.info['metadata_backend_config']
                    if mds_entry['ip'] in storagedrivers_in_domain_by_ip
                ]
                # Try to fill in these storagedriver first
                destinations = preferred_destinations + [
                    storagedriver for storagedriver in storagedrivers_in_domain
                    if storagedriver not in preferred_destinations
                ]
                cls.logger.info(
                    'Destination overview for migrations: {}'.format(', '.join(
                        d.storagerouter.name for d in destinations)))
                added = False
                while not added and destinations:
                    destination = destinations.pop()
                    balance = balances[destination]
                    added = cls.add_to_balance(vdisk_guid, balance)
                    if added:
                        cls.logger.info('Added vdisk {} to {}'.format(
                            vdisk_guid, destination.storagerouter.name))
                        if destination.storagedriver_id == vdisk.storagedriver_id:
                            raise RuntimeError('Moving to current host ERROR')
                        try:
                            index = preferred_destinations.index(destination)
                            mds_type = 'master' if index == 0 else 'slave'
                            cls.logger.info(
                                'Appointing {0} to {1} (index {2})'.format(
                                    vdisk_guid, mds_type, index))
                        except ValueError:
                            # Index query didn't find the storagedriver
                            cls.logger.info('Appointing to non-mds host')
                    else:
                        cls.logger.info(
                            'Did not add vdisks to {}. Its limit: {}, currently hosting {}'
                            .format(destination.storagerouter.name,
                                    balance.limit, len(balance.balance)))
                if not added:
                    raise NotImplementedError(
                        'Vdisk couldnt be added to any destination. Might be faulty implementation here'
                    )
        return balances.values()

    @classmethod
    def get_storagedrivers_in_same_primary_domain_as_storagedriver(
            cls, storagedriver, excluded_storagerouters=None):
        # type: (StorageDriver, Optional[List[str]]) -> List[StorageDriver]
        """
        Retrieve all storagedrivers within the same primary domain as the given storagedriver
        :param storagedriver: StorageDriver to check other domain relations for
        :param excluded_storagerouters: Storagerouters that are excluded for the search
        :type excluded_storagerouters: Optional[List[str]]
        :return: List of storagedrivers
        :rtype: List[StorageDriver]
        """
        if excluded_storagerouters is None:
            excluded_storagerouters = []
        primary_domains = cls.get_primary_domain_guids_storagedriver(
            storagedriver)
        if not primary_domains:
            return list(storagedriver.vpool.storagedrivers)
        return [
            std for std in storagedriver.vpool.storagedrivers
            if std.storagerouter_guid not in excluded_storagerouters and any(
                domain_guid in primary_domains
                for domain_guid in cls.get_primary_domain_guids_storagedriver(
                    std))
        ]

    @staticmethod
    def get_primary_domain_guids_storagedriver(storagedriver):
        # type: (StorageDriver) -> List[str]
        """
        Retrieve all primary domains of the StorageDriver
        :param storagedriver: Storagedriver to get domains from
        :type storagedriver: StorageDriver
        :return: List of primary domain guids
        :rtype: List[str]
        """
        primary_domains = []
        storagerouter = storagedriver.storagerouter
        for junction in storagerouter.domains:
            if not junction.backup:
                primary_domains.append(junction.domain_guid)
        return primary_domains

    @classmethod
    def add_to_balance(cls, vdisk_guid, balance):
        # type: (str, VDiskBalance) -> bool
        """
        Try to add a vdisk to a balance
        :param vdisk_guid: Guid to add
        :param balance: Balance to add guid to
        :return: True if vdisk was added, else False
        :rtype: bool
        """
        added, overflowed = balance.fill([vdisk_guid])
        return vdisk_guid in added
Esempio n. 11
0
class ConfigurationClientBase(object):
    """
    Base for all ConfigurationClients.
    These are built and used by the Configuration abstraction.
    Configuration is an abstraction of a filesystem-alike configuration management like ETCD.
    All inheriting classes must overrule lock, get_configuration_path, extract_key_from_path, get, set, dir_exists, list, delete, rename
    """

    _logger = Logger('extensions')

    def __init__(self, *args, **kwargs):
        _ = args, kwargs

    @property
    def assertion_exception(self):
        # type: () -> Any
        """
        Returns the used Exception class to indicate that an assertion failed
        :return: The underlying exception class
        """
        raise NotImplementedError()

    @property
    def key_not_found_exception(self):
        # type: () -> Any
        """
        Returns the use Exception class to indicate that a key was not found
        :return: The underlying exception class
        """
        raise NotImplementedError()

    def lock(self, name, wait=None, expiration=60):
        # type: (str, float, float) -> Any
        """
        Returns the lock implementation
        :param name: Name to give to the lock
        :type name: str
        :param wait: Wait time for the lock (in seconds)
        :type wait: float
        :param expiration: Expiration time for the lock (in seconds)
        :type expiration: float
        :return: The lock implementation
        :rtype: any
        """
        raise NotImplementedError()

    def dir_exists(self, key):
        # type: (str) -> bool
        """
        Returns if the given directory of the key exists
        :param key: Key to check for
        :type key: str
        :return: True when key exists else False
        :rtype: bool
        """
        # type: (str) -> bool
        raise NotImplementedError()

    def list(self, key, recursive):
        # type: (str, bool) -> Generator[str]
        """
        Lists all contents under the key.
        :param key: Key to list under
        :type key: str
        :param recursive: Indicate to list recursively
        :type recursive: bool
        :return: All contents under the list
        :rtype: Iterable
        """
        # type: (str, bool) -> Iterable(str)
        raise NotImplementedError()

    def delete(self, key, recursive, transaction=None):
        # type: (str, bool, str) -> None
        """
        Delete the specified key
        :param key: Key to delete
        :type key: str
        :param recursive: Delete the specified key recursively
        :type recursive: bool
        :param transaction: Transaction to apply the delete too
        :type transaction: str
        :return: None
        """
        raise NotImplementedError()

    def get(self, key, **kwargs):
        # type: (str, **kwargs) -> Union[dict, None]
        """
        Retrieve the value for specified key
        :param key: Key to retrieve
        :type key: str
        :return: Value of key
        :rtype: str
        """
        raise NotImplementedError()

    def get_client(self):
        # type: () -> Any
        """
        Returns the underlying client
        :return: A client to maintain configurations
        :rtype: any
        """
        raise NotImplementedError()

    def set(self, key, value, transaction=None):
        # type: (str, any) -> None
        """
        Set a value for specified key
        :param key: Key to set
        :type key: str
        :param value: Value to set for key
        :type value: str
        :param transaction: Transaction to apply the delete too
        :type transaction: str
        :return: None
        """
        raise NotImplementedError()

    def rename(self, key, new_key, max_retries):
        # type: (str, str, int) -> None
        """
        Rename a path
        :param key: Start of the path to rename
        :type key: str
        :param new_key: New key value
        :type new_key: str
        :param max_retries: Number of retries to attempt
        :type max_retries: int
        :return: None
        :rtype: NoneType
        :raises AssertException: when the assertion failed after 'max_retries' times
        """
        raise NotImplementedError()

    def begin_transaction(self):
        # type: () -> str
        """
        Starts a new transaction. All actions which support transactions can be used with this identifier
        :return: The ID of the started transaction
        :rtype: str
        """
        raise NotImplementedError()

    def apply_transaction(self, transaction):
        # type: (str) -> None
        """
        Applies a transaction. All registered actions are executed
        :param transaction: Transaction to apply
        :type transaction: str
        :return: None
        :rtype: NoneType
        :raises assertion_exception: when an assert failure was reached
        :raises key_not_found_exception: when a key could not be found
        """
        raise NotImplementedError()

    def assert_value(self, key, value, transaction=None):
        # type: (str, Any, str) -> None
        """
        Asserts a key-value pair
        :param key: Key to assert for
        :type key: str
        :param value: Value that the key should have
        :type value: any
        :param transaction: Transaction to apply this action too
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        raise NotImplementedError()

    def assert_exists(self, key, transaction=None):
        # type: (str, str) -> None
        """
        Asserts that a key exists
        :param key: Key to assert for
        :type key: str
        :param transaction: Transaction to apply this action too
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        raise NotImplementedError()

    @classmethod
    def extract_key_from_path(cls, path):
        # type: (str) -> str
        """
        Extract a key from a path.
        Only used during testing as of now
        :param path: Path to extract the key from
        :type path: str
        :return: The extracted key
        :rtype: str
        """
        raise NotImplementedError()

    @classmethod
    def get_configuration_path(cls, key):
        # type: (str) -> str
        """
        Retrieve the full configuration path for specified key
        :param key: Key to retrieve full configuration path for
        :type key: str
        :return: Configuration path
        :rtype: str
        """
        raise NotImplementedError()
class volatile_mutex(object):
    """
    This is a volatile, distributed mutex to provide cross thread, cross process and cross node
    locking. However, this mutex is volatile and thus can fail. You want to make sure you don't
    lock for longer than a few hundred milliseconds to prevent this.
    """

    def __init__(self, name, wait=None):
        """
        Creates a volatile mutex object
        """
        self.name = name
        self._wait = wait
        self._start = 0
        self._logger = Logger('extensions')  # Instantiated by classes inheriting this class
        self._has_lock = False
        self._volatile = self._get_volatile_client()

    def __call__(self, wait):
        self._wait = wait
        return self

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, *args, **kwargs):
        _ = args, kwargs
        self.release()

    def acquire(self, wait=None):
        """
        Acquire a lock on the mutex, optionally given a maximum wait timeout
        :param wait: Time to wait for lock
        """
        if self._has_lock:
            return True
        self._start = time.time()
        if wait is None:
            wait = self._wait
        while not self._volatile.add(self.key(), 1, 60):
            time.sleep(0.005)
            passed = time.time() - self._start
            if wait is not None and passed > wait:
                if self._logger is not None:
                    self._logger.error('Lock for {0} could not be acquired. {1} sec > {2} sec'.format(self.key(), passed, wait))
                raise NoLockAvailableException('Could not acquire lock {0}'.format(self.key()))
        passed = time.time() - self._start
        if passed > 0.2:  # More than 200 ms is a long time to wait
            if self._logger is not None:
                self._logger.warning('Waited {0} sec for lock {1}'.format(passed, self.key()))
        self._start = time.time()
        self._has_lock = True
        return True

    def release(self):
        """
        Releases the lock
        """
        if self._has_lock:
            self._volatile.delete(self.key())
            passed = time.time() - self._start
            if passed > 0.5:  # More than 500 ms is a long time to hold a lock
                if self._logger is not None:
                    self._logger.warning('A lock on {0} was kept for {1} sec'.format(self.key(), passed))
            self._has_lock = False

    def key(self):
        """
        Lock key
        """
        return 'ovs_lock_%s' % self.name

    def __del__(self):
        """
        __del__ hook, releasing the lock
        """
        self.release()

    @classmethod
    def _get_volatile_client(cls):
        raise NotImplementedError()
Esempio n. 13
0
 def __init__(self):
     """
     Init
     """
     self._logger = Logger('extensions')
     self.fstab_file = '/etc/fstab'
Esempio n. 14
0
class Fstab(object):
    """
    /etc/fstab manager
    """
    def __init__(self):
        """
        Init
        """
        self._logger = Logger('extensions')
        self.fstab_file = '/etc/fstab'

    def _slurp(self):
        """
        Read from /etc/fstab
        """
        f = open(self.fstab_file, 'r')
        dlist = []
        for line in f:
            if not re.match('^\s*$', line):
                dlist.append(line)
        f.close()
        dlist = [i.strip() for i in dlist if not i.startswith('#')]
        dlist = [re.split(' +|\t+', i) for i in dlist]
        keys = ['device', 'directory', 'fstype', 'options', 'dump', 'fsck']
        ldict = [dict(zip(keys, line)) for line in dlist]

        return ldict

    def show_config(self):
        """
        Print the content of /etc/fstab
        """
        l = self._slurp()
        for i in l:
            self._logger.debug("{0} {1} {2} {3} {4} {5}".format(
                i['device'], i['directory'], i['fstype'], i['options'],
                i['dump'], i['fsck']))

    def add_config(self,
                   fs_spec,
                   fs_file,
                   fs_vfstype,
                   fs_mntops='defaults',
                   fs_freq='0',
                   fs_passno='0'):
        """
        Add an entry to /etc/fstab

        :param fs_spec: device
        :param fs_file: directory or mount point
        :param fs_vfstype: Type of filesystem
        :param fs_mntops: options
        :param fs_freq: dump value
        :param fs_passno: fsck value
        """
        self._logger.debug(
            '/etc/fstab: appending entry {0} {1} {2} {3} {4} {5} to {6}'.
            format(fs_spec, fs_file, fs_vfstype, fs_mntops, fs_freq, fs_passno,
                   self.fstab_file))
        f = open(self.fstab_file, 'a')
        f.write('{0} {1} {2} {3} {4} {5}\n'.format(fs_spec, fs_file,
                                                   fs_vfstype, fs_mntops,
                                                   fs_freq, fs_passno))
        f.close()

    def modify_config_by_device(self,
                                device,
                                fs_file='',
                                fs_vfstype='',
                                fs_mntops='',
                                fs_freq='',
                                fs_passno=''):
        """
        Modify an entry to /etc/fstab

        :param device: device
        :param fs_file: directory or mount point
        :param fs_vfstype: Type of filesystem
        :param fs_mntops: options
        :param fs_freq: dump value
        :param fs_passno: fsck value
        """
        self._logger.debug(
            '{0}: modifying entry {1} to {2} {3} {4} {5} {6} to {7}'.format(
                self.fstab_file, device, fs_file, fs_vfstype, fs_mntops,
                fs_freq, fs_passno, self.fstab_file))

        def x_if_x_else_key(x, dictionary, key):
            """ Small helper function """
            return x if x else dictionary[key]

        l = self._slurp()
        f = open(self.fstab_file, 'w')
        for i in l:
            if i['device'] == device:
                new_fs_file = x_if_x_else_key(fs_file, i, 'directory')
                new_fs_vfstype = x_if_x_else_key(fs_vfstype, i, 'fstype')
                new_fs_mntops = x_if_x_else_key(fs_mntops, i, 'options')
                new_fs_freq = x_if_x_else_key(fs_freq, i, 'dump')
                new_fs_passno = x_if_x_else_key(fs_passno, i, 'fsck')

                f.write('{0} {1} {2} {3} {4} {5}\n'.format(
                    device, new_fs_file, new_fs_vfstype, new_fs_mntops,
                    new_fs_freq, new_fs_passno))
            else:
                f.write('{0} {1} {2} {3} {4} {5}\n'.format(
                    i['device'], i['directory'], i['fstype'], i['options'],
                    i['dump'], i['fsck']))
        f.close()

    def remove_config_by_device(self, device):
        """
        Remove an entry from /etc/fstab based on the device
        """
        return self._remove_config_by_('device', device)

    def remove_config_by_directory(self, directory):
        """
        Removes an entry from /etc/fstab based on directory
        """
        return self._remove_config_by_('directory', directory)

    def _remove_config_by_(self, match_type, match_value):
        """
        Remove a line from /etc/fstab
        """
        lines = self._slurp()
        line_removed = False
        for line in lines:
            if line[match_type] == match_value:
                lines.remove(line)
                line_removed = True
        if line_removed:
            with open(self.fstab_file, 'w') as fstab_file:
                for line in lines:
                    fstab_file.write('{0} {1} {2} {3} {4} {5}\n'.format(
                        line['device'], line['directory'], line['fstype'],
                        line['options'], line['dump'], line['fsck']))
        else:
            self._logger.debug('{0}: no such entry {1} found'.format(
                self.fstab_file, match_value))
Esempio n. 15
0
class DiskTools(object):
    """
    This class contains various helper methods wrt Disk maintenance
    """
    logger = Logger(
        'ovs_extensions')  # Instantiated by classes inheriting from this 1

    def __init__(self):
        raise Exception('Cannot instantiate, completely static class')

    @classmethod
    def create_partition(cls, disk_alias, disk_size, partition_start,
                         partition_size):
        """
        Creates a partition
        :param disk_alias: Path of the disk device
        :type disk_alias: str
        :param disk_size: Total size of disk
        :type disk_size: int
        :param partition_start: Start of partition in bytes
        :type partition_start: int
        :param partition_size: Size of partition in bytes
        :type partition_size: int
        :return: None
        """
        # Verify current label type and add GPT label if none present
        disk_alias = disk_alias.replace(r"'", r"'\''")
        try:
            command = "parted '{0}' print | grep 'Partition Table'".format(
                disk_alias)
            cls.logger.info(
                'Checking partition label-type with command: {0}'.format(
                    command))
            label_type = check_output(command,
                                      shell=True).strip().split(': ')[1]
        except CalledProcessError:
            label_type = 'error'
        if label_type in ('error', 'unknown'):
            try:
                cls.logger.info(
                    'Adding GPT label and trying to create partition again')
                check_output("parted '{0}' -s mklabel gpt".format(disk_alias),
                             shell=True)
                label_type = 'gpt'
            except Exception:
                cls.logger.error('Error during label creation')
                raise

        # Determine command to use based upon label type
        start = int(round(float(partition_start) / disk_size * 100))
        end = int(round(float(partition_size) / disk_size * 100)) + start
        if end > 100:
            end = 100

        if label_type == 'gpt':
            command = "parted '{0}' -a optimal -s mkpart '{1}' '{2}%' '{3}%'".format(
                disk_alias, uuid.uuid4(), start, end)
        elif label_type == 'msdos':
            command = "parted '{0}' -a optimal -s mkpart primary ext4 '{1}%' '{2}%'".format(
                disk_alias, start, end)
        elif label_type == 'bsd':
            command = "parted '{0}' -a optimal -s mkpart ext4 '{1}%' '{2}%'".format(
                disk_alias, start, end)
        else:
            raise ValueError(
                'Unsupported label-type detected: {0}'.format(label_type))

        # Create partition
        cls.logger.info('Label type detected: {0}'.format(label_type))
        cls.logger.info('Command to create partition: {0}'.format(command))
        check_output(command, shell=True)

    @classmethod
    def make_fs(cls, partition_alias):
        """
        Creates a filesystem
        :param partition_alias: Path of the partition
        :type partition_alias: str
        :return: None
        """
        try:
            check_output("mkfs.ext4 -q '{0}'".format(
                partition_alias.replace(r"'", r"'\''")),
                         shell=True)
        except Exception:
            cls.logger.error('Error during filesystem creation')
            raise

    @classmethod
    def add_fstab(cls, partition_aliases, mountpoint, filesystem):
        """
        Add entry to /etc/fstab for mountpoint
        :param partition_aliases: Possible aliases of the partition to add
        :type partition_aliases: list
        :param mountpoint: Mountpoint on which device is mounted
        :type mountpoint: str
        :param filesystem: Filesystem used
        :type filesystem: str
        :return: None
        """
        if len(partition_aliases) == 0:
            raise ValueError('No partition aliases provided')

        with open('/etc/fstab', 'r') as fstab_file:
            lines = [line.strip() for line in fstab_file.readlines()]

        osmanager = cls._get_os_manager()
        used_path = None
        used_index = None
        mount_line = None
        for device_alias in partition_aliases:
            for index, line in enumerate(lines):
                if line.startswith('#'):
                    continue
                if line.startswith(device_alias) and re.match(
                        '^{0}\s+'.format(re.escape(device_alias)), line):
                    used_path = device_alias
                    used_index = index
                if len(line.split()) == 6 and line.split(
                )[1] == mountpoint:  # Example line: 'UUID=40d99523-a1e7-4374-84f2-85b5d14b516e  /  swap  sw  0  0'
                    mount_line = line
            if used_path is not None:
                break

        if used_path is None:  # Partition not yet present with any of its possible aliases
            lines.append(
                osmanager.get_fstab_entry(partition_aliases[0], mountpoint,
                                          filesystem))
        else:  # Partition present, update information
            lines.pop(used_index)
            lines.insert(
                used_index,
                osmanager.get_fstab_entry(used_path, mountpoint, filesystem))

        if mount_line is not None:  # Mount point already in use by another device (potentially same device, but other device_path)
            lines.remove(mount_line)

        with file_mutex('ovs-fstab-lock'):
            with open('/etc/fstab', 'w') as fstab_file:
                fstab_file.write('{0}\n'.format('\n'.join(lines)))

    @staticmethod
    def mountpoint_exists(mountpoint):
        """
        Verify whether a mount point exists by browsing /etc/fstab
        :param mountpoint: Mount point to check
        :type mountpoint: str
        :return: True if mount point exists, False otherwise
        :rtype: bool
        """
        with open('/etc/fstab', 'r') as fstab_file:
            for line in fstab_file.readlines():
                if re.search('\s+{0}\s+'.format(re.escape(mountpoint)), line):
                    return True
        return False

    @classmethod
    def mount(cls, mountpoint):
        """
        Mount a partition
        :param mountpoint: Mount point on which to mount the partition
        :type mountpoint: str
        :return: None
        """
        try:
            mountpoint = mountpoint.replace(r"'", r"'\''")
            check_output("mkdir -p '{0}'".format(mountpoint), shell=True)
            check_output("mount '{0}'".format(mountpoint), shell=True)
        except Exception:
            cls.logger.exception('Error during mount')
            raise

    @classmethod
    def umount(cls, mountpoint):
        """
        Unmount a partition
        :param mountpoint: Mount point to un-mount
        :type mountpoint: str
        :return: None
        """
        try:
            check_output("umount '{0}'".format(
                mountpoint.replace(r"'", r"'\''")),
                         shell=True)
        except Exception:
            cls.logger.exception(
                'Unable to umount mount point {0}'.format(mountpoint))

    @classmethod
    def _get_os_manager(cls):
        raise NotImplementedError()

    @classmethod
    def retrieve_alias_mapping(cls, ssh_client=None):
        # type: (SSHClient) -> AliasMapping
        """
        Retrieve the alias mapping. Both ways
        :return: The AliasMapping
        :rtype: AliasMapping
        """
        ssh_client = ssh_client or SSHClient('127.0.0.1', username='******')
        name_alias_mapping = AliasMapping()
        for path_type in ssh_client.dir_list(directory='/dev/disk'):
            if path_type in [
                    'by-uuid', 'by-partuuid'
            ]:  # UUIDs can change after creating a filesystem on a partition
                continue
            directory = '/dev/disk/{0}'.format(path_type)
            for symlink in ssh_client.dir_list(directory=directory):
                symlink_path = '{0}/{1}'.format(directory, symlink)
                link = ssh_client.file_read_link(path=symlink_path)
                if link not in name_alias_mapping:
                    name_alias_mapping[link] = []
                name_alias_mapping[link].append(symlink_path)
        return name_alias_mapping

    @classmethod
    def model_devices(cls, ssh_client=None, name_alias_mapping=None, s3=False):
        # type: (Optional[SSHClient], Optional[AliasMapping], Optional[bool]) -> Tuple[List[Disk], AliasMapping]
        """
        Model all disks that are currently on this machine
        :param ssh_client: SSHClient instance
        :type ssh_client: SSHClient
        :param name_alias_mapping: The name to alias mapping (Optional)
        :type name_alias_mapping: dict
        :param s3: Whether or not to account for AWS ec2 instances
        :type s3: bool
        :return: A list of modeled disks, The name to alias mapping used, the alias to name mapping used
        :rtype: Tuple[List[Disk], dict, dict]
        """
        ssh_client = ssh_client or SSHClient('127.0.0.1', username='******')
        if not name_alias_mapping:
            name_alias_mapping = cls.retrieve_alias_mapping(ssh_client)

            if s3:
                name_alias_mapping.update(cls.map_s3_volumes())

        block_devices = cls._model_block_devices(ssh_client)
        cls.logger.info('Starting to iterate over disks')
        disks = cls._model_devices(ssh_client, name_alias_mapping,
                                   block_devices)
        return disks, name_alias_mapping

    @classmethod
    def rename_to_aws(cls, name):
        # type: (str) -> str
        """
        Rename a regular disk to aws disks.
        Sda -> xvda
        :param name: name of the disk to be renamed
        :type name: str
        :return: new diskname
        :rtype: str
        """
        name = os.path.rsplit(name)[
            -1]  # Last part of the path is the name of the device
        if name.startswith('sd'):
            name = name.replace('sd', 'xvd')
        return os.path.join('/dev', name)

    @classmethod
    def convert_to_virtual_id(cls, id):
        # type: (str) -> str
        """
        Add the path mapping to the ID
        :param id: Volume id to be formatted to path
        :type id: str
        :return: /dev/disk/by-virtual-id/<vol-id>
        """
        return os.path.join('/dev/disk/by-virtual-id', id)

    @classmethod
    def map_s3_volumes(cls):
        # type: () -> Dict[str,str]
        """
        Fetch all S3 volumes accessible on the environment
        :return: All S3 disk names with their mapped volume-IDs
        """
        try:
            from ec2_metadata import ec2_metadata
            import boto3
        except ImportError as ex:
            raise RuntimeError('Failed to load python package: {0}'.format(ex))

        filter = [{
            'Name': 'attachment.instance-id',
            'Values': [ec2_metadata.instance_id]
        }]
        ec2 = boto3.resource('ec2', region_name=ec2_metadata.region)
        volumes = ec2.volumes.filter(Filters=filter)
        name_map = {}
        for volume in volumes:
            for device in volume.attachments:
                name = cls.rename_to_aws(device['Device'])
                volume_id = cls.convert_to_virtual_id(device['VolumeId'])
                name_map[name] = [volume_id]
        return name_map

    @classmethod
    def _model_devices(cls, ssh_client, name_alias_mapping, entries):
        # type: (SSHClient, AliasMapping, List[LSBLKEntry]) -> List[Disk]
        """
        Model the devices
        :param ssh_client: The SSHClient instance
        :type ssh_client: SSHClient
        :param name_alias_mapping: The name to alias mapping
        :type name_alias_mapping: AliasMapping
        :param entries: List of LSBLKEntries
        :type entries: List[LSBLKEntry]
        :return: List of Disks
        :rtype: List[Disk]
        """
        def get_friendly_path(device_name):
            return '/dev/{0}'.format(device_name)

        parted_entries_by_device = {}
        disk_mapping = {}
        parsed_devices = []
        for device_entry in entries:  # type: LSBLKEntry
            if device_entry.type == LSBLKEntry.EntryTypes.ROM:
                continue

            is_device = cls.is_device(device_entry.kname, ssh_client)
            friendly_path = get_friendly_path(device_entry.kname)
            system_aliases = sorted(
                name_alias_mapping.get(friendly_path, [friendly_path]))
            device_is_also_partition = False
            device_state = 'OK'
            if is_device:
                disk = Disk.from_lsblk_entry(device_entry, system_aliases)
                disk_mapping[device_entry.kname] = disk
                device_state = disk.state
                # LVM, RAID1, ... have the tendency to be a device with a partition on it, but the partition is not reported by 'lsblk'
                device_is_also_partition = bool(device_entry.mountpoint)
                parsed_devices.append(disk)
            if not is_device or device_is_also_partition:
                current_device_name = None
                current_device_state = None
                if device_is_also_partition:
                    offset = 0
                    current_device_name = device_entry.kname
                    current_device_state = device_state
                else:
                    offset = 0
                    # Check from which block device the partition is from
                    for device in reversed(parsed_devices):  # type: Disk
                        try:
                            current_device_name = device.name
                            current_device_state = device.state
                            # Will throw exception if the partition is not part of that device
                            starting_block = cls.get_starting_block(
                                current_device_name, device_entry.kname,
                                ssh_client)
                            # The device was found. Let's try the parted output
                            if device not in parted_entries_by_device:
                                parted_entries_by_device[
                                    device] = PartedEntry.parse_partitions(
                                        get_friendly_path(device.name),
                                        ssh_client)
                            parted_entries = parted_entries_by_device[device]
                            if parted_entries:
                                for parted_entry in parted_entries_by_device[
                                        device]:  # type: PartedEntry
                                    if device_entry.kname.endswith(
                                            str(parted_entry.number)):
                                        offset = parted_entry.start
                            else:
                                offset = starting_block * device_entry.log_sec
                            break
                        except Exception:
                            pass
                if current_device_name is None:
                    raise RuntimeError(
                        'Failed to retrieve the device information for current partition'
                    )
                partition = Partition(
                    size=device_entry.size,
                    # Device state is either None or converted to OK or FAILURE at this point
                    state=current_device_state or 'FAILURE',
                    offset=offset,
                    aliases=system_aliases,
                    filesystem=device_entry.fstype,
                    mountpoint=device_entry.mountpoint)
                if device_entry.mountpoint and device_entry.fstype != LSBLKEntry.FSTypes.SWAP:
                    if not cls.mountpoint_usable(device_entry.mountpoint,
                                                 ssh_client):
                        partition.state = 'FAILURE'
                associated_disk = disk_mapping[
                    current_device_name]  # type: Disk
                associated_disk.add_partition_model(partition)
        return disk_mapping.values()

    @classmethod
    def is_device(cls, device_name, ssh_client):
        # type: (str, SSHClient) -> bool
        """
        Determine if the LSBLKEntry maps to a device or a partition
        :param device_name: Name of the device
        :type device_name: str
        :param ssh_client: SSHClient instance
        :type ssh_client: SSHClient
        :return: True if device
        :rtype: bool
        """
        # If this returns a different path, it means its a device and not a partition
        return bool(
            ssh_client.file_read_link('/sys/block/{0}'.format(device_name)))

    @classmethod
    def get_starting_block(cls, device_name, partition_name, ssh_client):
        # type: (str, str, SSHClient) -> int
        """
        Get the starting block number of the partition
        :param device_name: Name of the device the partition is on
        :param partition_name: Name of the partition
        :param ssh_client: SSHClient instance
        :type ssh_client: SSHClient
        :return: The starting block
        :rtype: int
        """
        starting_block_file = '/sys/block/{0}/{1}/start'.format(
            device_name, partition_name)
        return int(ssh_client.file_read(starting_block_file))

    @classmethod
    def _model_block_devices(cls, ssh_client):
        # type: (SSHClient) -> List[LSBLKEntry]
        """
        Models the block devices found on the system
        :param ssh_client: SSHClient instance
        :type ssh_client: SSHClient
        :return: List of block devices
        :rtype: List[LSBLKEntry]
        """
        # Parse 'lsblk' output
        # --exclude 1 for RAM devices, 2 for floppy devices, 11 for CD-ROM devices, 43 for nbd devices (See https://www.kernel.org/doc/html/v4.15/admin-guide/devices.html)
        command = [
            'lsblk', '--json', '--bytes', '--noheadings', '--exclude',
            '1,2,11,43'
        ]
        output = '--output=KNAME,SIZE,MODEL,STATE,MAJ:MIN,FSTYPE,TYPE,ROTA,MOUNTPOINT,LOG-SEC{0}'
        cls.logger.info(command + [output.format(',SERIAL')])
        try:
            devices = json.loads(
                ssh_client.run(command +
                               [output.format(',SERIAL')]))  # type: dict
        except Exception:
            devices = json.loads(
                ssh_client.run(command + [output.format('')]))  # type: dict
        block_devices = devices.get('blockdevices', [])  # type: list
        return [
            LSBLKEntry.from_lsblk_output(device) for device in block_devices
        ]

    @classmethod
    def mountpoint_usable(cls, mountpoint, ssh_client=None):
        # type: (str, SSHClient) -> bool
        """
        See if the mountpoint is usable
        :param mountpoint: Mountpoint to test
        :type mountpoint: str
        :param ssh_client: Client to use
        :type ssh_client: SSHClient
        :return: True if the mountpoint is usable
        :rtype: bool
        """
        ssh_client = ssh_client or SSHClient('127.0.0.1', username='******')
        try:
            filename = '{0}/{1}'.format(mountpoint, str(time.time()))
            ssh_client.run(['touch', filename])
            ssh_client.run(['rm', filename])
            return True
        except Exception:
            return False
Esempio n. 16
0
class PyrakoonClientPooled(PyrakoonBase):
    """
    Pooled arakoon client wrapper
    Exposes the same API as the base PyrakoonClient while using a pool underneath

    NOTE: this client won't work in a thread except the main thread when not monkey patching gevent
    Monkey patching can be done using
    from gevent import monkey
    monkey.patch_all()

    This has to be called at the start of the application.
    """

    _logger = Logger('extensions')

    def __init__(self, cluster, nodes, pool_size=10, retries=10, retry_back_off_multiplier=2, retry_interval_sec=2):
        # type: (str, Dict[str, Tuple[str, int]], int, int, int, int) -> None
        """
        Initializes the client
        :param cluster: Identifier of the cluster
        :type cluster: str
        :param nodes: Dict with all node sockets. {name of the node: (ip of node, port of node)}
        :type nodes: dict
        :param pool_size: Number of clients to keep in the pool
        :type pool_size: int
        :param retries: Number of retries to do
        :type retries: int
        :param retry_back_off_multiplier: Back off multiplier. Multiplies the retry_interval_sec with this number ** retry
        :type retry_back_off_multiplier: int
        :param retry_interval_sec: Seconds to wait before retrying. Exponentially increases with every retry.
        :type retry_interval_sec: int
        """
        self._pool = PyrakoonPool(cluster, nodes, pool_size, retries, retry_back_off_multiplier, retry_interval_sec)
        self._sequences = {}

    def get(self, key, consistency=None):
        # type: (str, Consistency) -> Any
        """
        Retrieves a certain value for a given key
        :param key: The key whose value you are interested in
        :type key: str
        :param consistency: Consistency of the get
        :type consistency: Consistency
        :return: The value associated with the given key
        :rtype: any
        """
        with self._pool.get_client() as client:
            return client.get(key, consistency)

    def get_multi(self, keys, must_exist=True):
        # type: (List[str], bool) -> Generator[Tuple[str, any]]
        """
        Get multiple keys at once
        :param keys: All keys to fetch
        :type keys" List[str]
        :param must_exist: Should all listed keys exist
        :type must_exist: bool
        :return: Generator that yields key value pairs
        :rtype: iterable[Tuple[str, any]
        """
        with self._pool.get_client() as client:
            return client.get_multi(keys, must_exist=must_exist)

    def set(self, key, value, transaction=None):
        # type: (str, any, str) -> None
        """
        Sets the value for a key to a given value
        If the key does not yet have a value associated with it, a new key value pair will be created.
        If the key does have a value associated with it, it is overwritten.
        :param key: The key to set/update
        :type key: str
        :param value: The value to store
        :type value: any
        :param transaction: ID of the transaction to add the update too
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addSet(key, value)
        with self._pool.get_client() as client:
            return client.set(key, value)

    def prefix(self, prefix):
        # type: (str) -> Generator[str]
        """
        Lists all keys starting with the given prefix
        :param prefix: Prefix of the key
        :type prefix: str
        :return: Generator that yields keys
        :rtype: iterable[str]
        """
        with self._pool.get_client() as client:
            return client.prefix(prefix)

    def prefix_entries(self, prefix):
        # type: (str) -> Generator[Tuple[str, any]]
        """
        Lists all key, value pairs starting with the given prefix
        :param prefix: Prefix of the key
        :type prefix: str
        :return: Generator that yields key, value pairs
        :rtype: iterable[Tuple[str, any]
        """
        with self._pool.get_client() as client:
            return client.prefix_entries(prefix)

    def delete(self, key, must_exist=True, transaction=None):
        # type: (str, bool, str) -> any
        """
        Deletes a given key from the store
        :param key; Key to remove
        ;type key: str
        :param must_exist: Should the key exist
        :type must_exist: bool
        :param transaction: Transaction to apply the update too
        :type transaction: id
        :return The previous value in case must_exist=False, None incase must_exist=False
        :rtype: any
        """
        if transaction is not None:
            if must_exist is True:
                return self._sequences[transaction].addDelete(key)
            else:
                return self._sequences[transaction].addReplace(key, None)
        with self._pool.get_client() as client:
            return client.delete(key, must_exist)

    def delete_prefix(self, prefix, transaction=None):
        # type: (str, Optional[str]) -> None
        """
        Removes a given prefix from the store
        :param prefix: Prefix of the key
        :type prefix: str
        :param transaction: Transaction to apply the update too
        :type transaction: str
        :return None
        ;:rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addDeletePrefix(prefix)
        with self._pool.get_client() as client:
            return client.delete_prefix(prefix)

    def nop(self):
        # type: () -> None
        """
        Executes a nop command
        """
        with self._pool.get_client() as client:
            return client.nop()

    def exists(self, key):
        # type: (str) -> bool
        """
        Check if key exists
        :param key: Key to check
        :type key: str
        :return True if key exists else False
        :rtype: bool
        """
        with self._pool.get_client() as client:
            return client.exists(key)

    def assert_value(self, key, value, transaction=None):
        # type: (str, any, str) -> None
        """
        Asserts a key-value pair
        :param key: Key of the value to assert
        :type key: str
        :param value: Value to assert
        :type value: any
        :param transaction: Transaction to apply the assert too
        :type transaction: str
        :raises: ArakoonAssertionFailed if the value could not be asserted
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addAssert(key, value)
        with self._pool.get_client() as client:
            return client.assert_value(key, value)

    def assert_exists(self, key, transaction=None):
        # type: (str, str) -> None
        """
        Asserts that a given key exists
        :param key: Key to assert
        :type key: str
        :param transaction: Transaction to apply the assert too
        :type transaction: str
        :raises: ArakoonAssertionFailed if the value could not be asserted
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addAssertExists(key)
        with self._pool.get_client() as client:
            return client.assert_exists(key)

    def begin_transaction(self):
        # type: () -> str
        """
        Creates a transaction (wrapper around Arakoon sequences)
        :return: Identifier of the transaction
        :rtype: str
        """
        key = str(uuid.uuid4())
        self._sequences[key] = Sequence()
        return key

    def apply_transaction(self, transaction, delete=True):
        # type: (str, Optional[bool]) -> None
        """
        Applies a transaction
        :param transaction: Identifier of the transaction
        :type transaction: str
        :param delete: Delete transaction after attempting to apply the transaction
        Disabling this option requires a delete_transaction to be called at some point to avoid memory leaking
        :type delete: bool
        :return: None
        :rtype: NoneType
        """
        with self._pool.get_client() as client:
            try:
                sequence = self._sequences[transaction]
                return client._apply_transaction(sequence)
            finally:
                if delete:
                    self.delete_transaction(transaction)

    def delete_transaction(self, transaction):
        """
        Deletes a transaction
        :param transaction: Identifier of the transaction
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        self._sequences.pop(transaction, None)

    def lock(self, name, wait=None, expiration=60):
        # type: (str, float, float) -> PyrakoonLock
        """
        Returns the Arakoon lock implementation
        :param name: Name to give to the lock
        :type name: str
        :param wait: Wait time for the lock (in seconds)
        :type wait: float
        :param expiration: Expiration time for the lock (in seconds)
        :type expiration: float
        :return: The lock implementation
        :rtype: PyrakoonLock
        """
        raise NotImplementedError('')

    def apply_callback_transaction(self, transaction_callback, max_retries=0, retry_wait_function=None):
        # type: (callable, int, callable) -> None
        """
        Apply a transaction which is the result of the callback.
        The callback should build the complete transaction again to handle the asserts. If the possible previous run was interrupted,
        Handles all Arakoon errors by re-executing the callback until it finished or until no more retries can be made
        :param transaction_callback: Callback function which returns the transaction ID to apply
        :type transaction_callback: callable
        :param max_retries: Number of retries to try. Retries are attempted when an AssertException is thrown.
        Defaults to 0
        :param retry_wait_function: Function called retrying the transaction. The current try number is passed as an argument
        Defaults to lambda retry: time.sleep(randint(0, 25) / 100.0)
        :type retry_wait_function: callable
        :return: None
        :rtype: NoneType
        """
        def default_retry_wait(retry):
            _ = retry
            time.sleep(random.randint(0, 25) / 100.0)

        retry_wait_func = retry_wait_function or default_retry_wait
        tries = 0
        while True:
            tries += 1
            try:
                transaction = transaction_callback()  # type: str
                return self.apply_transaction(transaction)
            except ArakoonAssertionFailed as ex:
                self._logger.warning('Asserting failed. Retrying {0} more times'.format(max_retries - tries))
                last_exception = ex
                if tries > max_retries:
                    raise last_exception
                retry_wait_func(tries)
Esempio n. 17
0
class VDiskBalance(object):

    logger = Logger('vdisk_balance')

    def __init__(self,
                 storagedriver,
                 limit,
                 balance=None,
                 overflow=None,
                 added=None,
                 hosted_guids=None):
        # type: (StorageDriver, int, Optional[List[str]], Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> None
        """
        Represents the vdisk balance of a storagedriver
        :param storagedriver: StorageDriver to balance for
        :type storagedriver: StorageDriver
        :param limit: Maximum amount of vdisks to host. -1 means no limit
        :type limit: int
        :param balance: Balance of vdisk guids to use. Used in serializing/deserializing
        :type balance: Optional[List[str]]
        :param overflow: Overflow of vdisk guids to use. Used in serializing/deserializing
        :type overflow: Optional[List[str]]
        :param added: List of vdisk guids added to the balance. Used in serializing/deserializing
        :type added: Optional[List[str]]
        :param hosted_guids: Guids of the vDisks hosted on the given storagedriver. Used in serializing/deserializing
        :type hosted_guids: Optional[List[str]]
        """
        self.storagedriver = storagedriver
        self.hosted_guids = hosted_guids if hosted_guids is not None else storagedriver.vdisks_guids
        self.limit = limit

        combination_vars = [balance, overflow, added, hosted_guids]
        combination_vars_names = [
            'balance', 'overflow', 'added', 'hosted_guids'
        ]
        combination_vars_given = all(v is not None for v in combination_vars)
        if any(v is not None
               for v in combination_vars) and not combination_vars_given:
            raise ValueError(
                'When providing any of the variables {}, all should be provided'
                .format(', '.join(combination_vars_names)))
        if combination_vars_given:
            self.balance = balance
            self.overflow = overflow
            self.added = added
        else:
            self.balance, self.overflow = self.impose_limit()
            self.added = []

    def __add__(self, other):
        if not isinstance(
                other,
                VDiskBalance) or self.storagedriver != other.storagedriver:
            raise ValueError('Different objects cannot be added')
        limit = self.limit + other.limit
        self.set_limit(limit)
        self.added += other.added

    def set_limit(self, limit):
        """
        Set a new limit
        :param limit: Limit to set
        :return: The guids of vdisks that can fit and the guids that cannot fit in on the current host
        :rtype: Tuple(List[str], List[str])
        """
        self.limit = limit
        self.balance, self.overflow = self.impose_limit()
        return self.balance, self.overflow

    def impose_limit(self):
        # type: () -> Tuple[List[str], List[str]]
        """
        Impose the set limit. Returns the max amount of vdisks that can be hosted and the vdisks that need to go
        :return: The guids of vdisks that can fit and the guids that cannot fit in on the current host
        :rtype: Tuple(List[str], List[str])
        """
        if self.limit == -1:
            return self.hosted_guids, []
        overflow = self.hosted_guids[self.limit:]
        balance = self.hosted_guids[:self.limit]
        return balance, overflow

    def fill(self, vdisk_guids):
        # type: (List[str]) -> Tuple[List[str], List[str]]
        """
        Fill this balance until the limit is reached
        :param vdisk_guids: Guids to add
        :type vdisk_guids: List[str]
        :return: The guids that could be added to this balanced and the guids that couldn't be added
        :rtype: Tuple[List[str], List[str]]
        """
        amount_to_add = self.limit - len(self.balance)
        added = []
        overflow = vdisk_guids
        if amount_to_add:
            added = vdisk_guids[:amount_to_add]
            overflow = vdisk_guids[amount_to_add:]
        self.balance.extend(added)
        self.added.extend(added)
        return added, overflow

    def generate_overview(self):
        # type: () -> dict
        """
        Generate the move overview depending on the current state
        :return: The overview from where the disks are coming from
        :rtype: dict
        """
        added_source_overview = {}
        for vdisk_guid in self.added:
            storagedriver_id = VDisk(vdisk_guid).storagedriver_id
            if storagedriver_id not in added_source_overview:
                added_source_overview[storagedriver_id] = []
            added_source_overview[storagedriver_id].append(vdisk_guid)
        overview = {
            'added': self.added,
            'balance': self.balance,
            'overflow': self.overflow,
            'add_source_overview': added_source_overview
        }
        return overview

    def execute_balance_change(self,
                               force=False,
                               user_input=False,
                               abort_on_error=False):
        # type: (Optional[bool], Optional[bool], Optional[bool]) -> Tuple[List[str], List[str]]
        """
        Execute the necessary steps to balance out
        :param force: Indicates whether to force the migration or not (forcing can lead to data loss)
        :type force: bool
        :param user_input: require user input to proceed to next vDisk
        :type user_input: bool
        :param abort_on_error: Abort script when error occurs during migration
        :type abort_on_error: bool
        :return: List with all successful moves, list with all failed moves
        :rtype: NoneType
        """
        failed_moves = []
        successful_moves = []
        vdisk_guid = None
        try:
            for vdisk_guid in self.added:
                try:
                    self._execute_move(vdisk_guid, self.storagedriver, force,
                                       user_input)
                    successful_moves.append(vdisk_guid)
                except:
                    self.logger.exception(
                        'Unable to move VDisk {0} to {1}'.format(
                            vdisk_guid, self.storagedriver.storagerouter_guid))
                    if abort_on_error:
                        raise RuntimeError(
                            "Something went wrong during moving VDisk {0} to {1}"
                            .format(vdisk_guid,
                                    self.storagedriver.storagerouter_guid))
                    failed_moves.append(vdisk_guid)
        except KeyboardInterrupt:
            interrupt_msg = 'You have interrupted while moving vdisks. The last move (vDisk {0}) might be in an inconsistent state.'.format(
                vdisk_guid)
            self.logger.warning(interrupt_msg)
            if user_input:
                if successful_moves:
                    print('Succesfully moved vDisks: \n {0}'.format(
                        ', '.join(successful_moves)))
                if failed_moves:
                    print('\nFailed to move vDisks:\n {0}'.format(
                        ', '.join(failed_moves)))
            raise

        return successful_moves, failed_moves

    def execute_balance_change_through_overflow(self,
                                                balances,
                                                force=False,
                                                user_input=False,
                                                abort_on_error=False):
        # type: (List[VDiskBalance], bool, bool, bool) -> Tuple[List[str], List[str]]
        """
        Execute the necessary steps to balance out. Starts from the overflow to move all vdisks from the container away first
        Other balances must be passed on to see where they'd have to move to
        :param balances: Other balances to work with. Used to find the owner of this balance its overflow
        :type balances: List[VDiskBalance]
        :param force: Indicates whether to force the migration or not (forcing can lead to data loss)
        :type force: bool
        :param user_input: require user input to proceed to next vDisk
        :type user_input: bool
        :param abort_on_error: Abort script when error occurs during migration
        :type abort_on_error: bool
        :return: List with all successful moves, list with all failed moves
        :rtype: NoneType
        """
        failed_moves = []
        successful_moves = []
        vdisk_guid = None
        try:
            vdisk_balance_map = self.map_vdisk_to_destination(balances)
            for vdisk_guid in self.overflow:
                add_balance = vdisk_balance_map[vdisk_guid]
                destination_std = add_balance.storagedriver
                try:
                    self._execute_move(vdisk_guid, destination_std, force,
                                       user_input)
                    successful_moves.append(vdisk_guid)
                except KeyboardInterrupt:
                    raise
                except:
                    self.logger.exception(
                        'Unable to move VDisk {0} to {1}'.format(
                            vdisk_guid, destination_std.storagerouter_guid))
                    if abort_on_error:
                        raise RuntimeError(
                            "Something went wrong during moving VDisk {0} to {1}"
                            .format(vdisk_guid,
                                    self.storagedriver.storagerouter_guid))
                    failed_moves.append(vdisk_guid)
        except KeyboardInterrupt:
            interrupt_msg = 'You have interrupted while moving vdisks. The last move (vDisk {0}) might be in an inconsistent state.'.format(
                vdisk_guid)
            self.logger.warning(interrupt_msg)
            if user_input:
                if successful_moves:
                    print('Succesfully moved vDisks: \n {0}'.format(
                        ', '.join(successful_moves)))
                if failed_moves:
                    print('\nFailed to move vDisks:\n {0}'.format(
                        ', '.join(failed_moves)))
            raise

        return successful_moves, failed_moves

    def _execute_move(self,
                      vdisk_guid,
                      destination_std,
                      force,
                      interactive,
                      minimum_potential=1):
        """
        Perform a move
        :param vdisk_guid: VDisk to move
        :param destination_std: Destination to move to
        :param force: Use force when moving
        :param interactive: Prompt for user input before moving
        :return: None
        """
        _ = force
        try:
            vd = VDisk(vdisk_guid)
            current_sr = StorageRouter(vd.storagerouter_guid).name
            next_sr = destination_std.storagerouter.name
            if vd.storagerouter_guid == destination_std.storagerouter_guid:
                # Ownership changed in meantime
                self.logger.info(
                    'No longer need to move VDisk {0} to {1}'.format(
                        vdisk_guid, destination_std.storagerouter.name))
                return
            rebalance_message = 'Rebalancing vPool by moving vDisk {0} from {1} to {2}'.format(
                vdisk_guid, current_sr, next_sr)
            if interactive:
                retry = True
                while retry:
                    proceed = raw_input('{0}. Continue? (press Enter)'.format(
                        rebalance_message))
                    if proceed == '':  # Mock 'Enter' key
                        retry = False
            try:
                volume_potential = destination_std.vpool.storagedriver_client.volume_potential(
                    str(destination_std.storagedriver_id))
            except:
                self.logger.exception(
                    'Unable to retrieve volume potential. Aborting')
                raise
            if volume_potential > minimum_potential:
                self.logger.info(rebalance_message)
                try:
                    vd.storagedriver_client.migrate(str(vd.volume_id),
                                                    str(destination_std.name),
                                                    False)
                except RuntimeError:
                    # When a RunTimeError occurs. Try restarting the volume locally for safety measures.
                    self.logger.warning(
                        'Encountered RunTimeError. Checking if vdisk({0}) is not running and restarting it.'
                        .format(vd.guid))
                    vd.invalidate_dynamics('info')
                    if vd.info['live_status'] != vd.STATUSES.RUNNING:
                        vd.storagedriver_client.restart_object(
                            str(vd.volume_id), False)
                        # Now check if the migration succeeded and if the volume is running on the correct storagedriver.
                        if vd.storagedriver_id == destination_std.name:
                            self.logger.info(
                                'Vdisk({0}) got restarted and runs on destination storagedriver. Previous error can be ignored.'
                                .format(vd.guid))
                        else:
                            self.logger.warning(
                                'Vdisk({0}) got restarted but doesn\'t run on destination storagedriver.'
                                .format(vd.guid))

            else:
                raise ValueError(
                    'Volume potential is lower than {0}. Not moving anymore!'.
                    format(minimum_potential))
        except ObjectNotFoundException as ex:
            self.logger.warning(
                'Could not retrieve an object. Assuming it\'s a vDisk: {}'.
                format(ex))

    @staticmethod
    def map_vdisk_to_destination(balances):
        # type: (List[VDiskBalance]) -> Dict[str, VDiskBalance]
        """
        Map all vdisks to destinations of balances
        :param balances: Balances to map for
        :return: guid - balance map
        """
        vdisk_balance_map = {}
        for balance in balances:  # type: VDiskBalance
            for vdisk_guid in balance.added:
                if vdisk_guid in vdisk_balance_map:
                    raise RuntimeError(
                        'Vdisk {} has multiple destinations'.format(
                            vdisk_guid))
                vdisk_balance_map[vdisk_guid] = balance
        return vdisk_balance_map

    def __str__(self):
        return 'StorageRouter {} of VPool {}: hosting prior to changes: {}, imposed limit {}, hosting after changes: {}'\
            .format(self.storagedriver.storagerouter.name,
                    self.storagedriver.vpool.name,
                    len(self.hosted_guids),
                    self.limit,
                    len(self.balance))

    def to_dict(self):
        """
        Export the VDiskBalance object. Workaround to being unable to pickle/serialize a DataObject
        Use the associated import function to cast it back to an object
        :return:
        """
        return {
            'storagedriver_guid': self.storagedriver.guid,
            'hosted_guids': self.hosted_guids,
            'limit': self.limit,
            'balance': self.balance,
            'overflow': self.overflow,
            'added': self.added
        }

    @staticmethod
    def from_dict(data):
        # type: (Dict[str, Union[str, int, List[str]]]) -> VDiskBalance
        """
        Instantiate a VDiskBalance through a dict. See to_dict method to check it's form
        :param data: Data dict
        :return:
        """
        kwargs = data.copy()
        kwargs['storagedriver'] = StorageDriver(
            kwargs.pop('storagedriver_guid'))
        return VDiskBalance(**kwargs)
class NBDInstaller:
    """
    Command line NBD installer
    """
    _logger = Logger('extensions-nbd')

    def __init__(self):
        pass

    NBDS_MAX_DEFAULT = 255
    MAX_PART_DEFAULT = 15
    NBD_MODPROBE_LOCATION = '/etc/modprobe.d/nbd.conf'
    MODULES_PATH = '/etc/modules'

    @staticmethod
    def setup(nbds_max=NBDS_MAX_DEFAULT, max_part=MAX_PART_DEFAULT):
        # type: (int, int) -> None
        """
        Setup of the nbd manager. This visible function should be used for installing via python shell and only accepts correct arguments.
        Only allowed parameters are currently
        :param nbds_max:
        :type nbds_max: int
        :param max_part: maximum number of partitions to be made
        :type max_part: int
        :return: None
        """
        NBDInstaller._setup(nbds_max=nbds_max, max_part=max_part)

    @staticmethod
    def _setup(**kwargs):
        # type: (any) -> None
        """
        Setup of the nbd manager. Only allowed parameters are currently
        :param nbds_max:
        :type nbds_max: int
        :param max_part: maximum number of partitions to be made
        :type max_part: int
        :return: None
        """
        NBDInstaller._logger.info('Started setup of NBD-manager.')
        with open(NBDInstaller.NBD_MODPROBE_LOCATION, 'w') as fh:
            for k, v in kwargs.iteritems():
                fh.write('options nbd {0}={1}\n'.format(k, v))
        with open(NBDInstaller.MODULES_PATH, 'r+') as fh2:
            fh2.write('volumedriver-nbd')

        check_output(['modprobe', 'nbd'])
        check_output(['apt-get', 'install', 'volumedriver-nbd'])

        NBDInstaller._logger.info('Succesfully loaded NBD')

    @staticmethod
    def remove():
        """
        Removes the NBD manager
        :return: None
        """
        # type: () -> None
        NBDInstaller._logger.info('Started removal of NBD-manager.')
        check_output(['rm', NBDInstaller.NBD_MODPROBE_LOCATION])
        module_file = open(NBDInstaller.MODULES_PATH, 'r')
        lines = module_file.readlines()
        module_file.close()
        with open(NBDInstaller.MODULES_PATH, 'w') as fh:
            for line in lines:
                if 'volumedriver-nbd' in line:
                    continue
                else:
                    fh.write(line)
        check_output(['apt-get', 'remove', 'volumedriver-nbd'])
        NBDInstaller._logger.info('Succesfully removed NBD')
Esempio n. 19
0
class BaseClient(object):
    """
    Basic API client
    - Supports Authorization with tokens
    - Caches tokens
    """
    disable_warnings(InsecurePlatformWarning)
    disable_warnings(InsecureRequestWarning)
    disable_warnings(SNIMissingWarning)

    _logger = Logger('api')

    def __init__(self,
                 ip,
                 port,
                 credentials=None,
                 verify=False,
                 version='*',
                 raw_response=False,
                 cache_store=None):
        """
        Initializes the object with credentials and connection information
        :param ip: IP to which to connect
        :type ip: str
        :param port: Port on which to connect
        :type port: int
        :param credentials: Credentials to connect
        :type credentials: tuple
        :param verify: Additional verification
        :type verify: bool
        :param version: API version
        :type version: object
        :param raw_response: Retrieve the raw response value
        :type raw_response: bool
        :param cache_store: Store in which to keep the generated token for the client instance
        :type cache_store: any
        :return: None
        :rtype: NoneType
        """
        if credentials is not None and len(credentials) != 2:
            raise RuntimeError(
                'Credentials should be None (no authentication) or a tuple containing client_id and client_secret (authenticated)'
            )
        self.ip = ip
        self.port = port
        self.client_id = credentials[0] if credentials is not None else None
        self.client_secret = credentials[1] if credentials is not None else None
        self._url = 'https://{0}:{1}/api'.format(ip, port)
        self._key = hashlib.sha256('{0}{1}{2}{3}'.format(
            self.ip, self.port, self.client_id,
            self.client_secret)).hexdigest()
        self._token = None
        self._verify = verify
        self._version = version
        self._raw_response = raw_response
        self._volatile_client = cache_store

    def _connect(self):
        """
        Authenticates to the api
        """
        headers = {
            'Accept':
            'application/json',
            'Authorization':
            'Basic {0}'.format(
                base64.b64encode('{0}:{1}'.format(self.client_id,
                                                  self.client_secret)).strip())
        }
        raw_response = requests.post(url='{0}/oauth2/token/'.format(self._url),
                                     data={'grant_type': 'client_credentials'},
                                     headers=headers,
                                     verify=self._verify)

        try:
            response = self._process(response=raw_response, overrule_raw=True)
        except RuntimeError:
            if self._raw_response is True:
                return raw_response
            raise
        if len(response.keys()) in [1, 2] and 'error' in response:
            error = RuntimeError(response['error'])
            error.status_code = raw_response.status_code
            raise error
        self._token = response['access_token']

    def _build_headers(self):
        """
        Builds the request headers
        :return: The request headers
        :rtype: dict
        """
        headers = {
            'Accept': 'application/json; version={0}'.format(self._version),
            'Content-Type': 'application/json'
        }
        if self._token is not None:
            headers['Authorization'] = 'Bearer {0}'.format(self._token)
        return headers

    @classmethod
    def _build_url_params(cls, params=None):
        """
        Build the URL params
        :param params: URL parameters
        :type params: str
        :return: The url params
        :rtype: string
        """
        url_params = ''
        if params:
            url_params = '?{0}'.format(urllib.urlencode(params))
        return url_params

    def _cache_token(self):
        """
        Caches the JWT
        :return: None
        :rtype: NoneType
        """
        if self._volatile_client is not None:
            self._volatile_client.set(self._key, self._token, 300)

    def _prepare(self, **kwargs):
        """
        Prepares the call:
        * Authentication, if required
        * Preparing headers, returning them
        """
        if self.client_id is not None and self._token is None:
            self._connect()

        headers = self._build_headers()
        params = self._build_url_params(kwargs.get('params'))
        url = '{0}{{0}}{1}'.format(self._url, params)
        self._cache_token(
        )  # Volatile cache might have expired or the key is gone

        return headers, url

    def _process(self, response, overrule_raw=False):
        """
        Processes a call result
        """
        if self._raw_response is True and overrule_raw is False:
            return response

        status_code = response.status_code
        parsed_output = None
        try:
            parsed_output = response.json()
        except:
            pass

        if 200 <= status_code < 300:
            return parsed_output
        else:
            message = None
            if parsed_output is not None:
                if 'error_description' in parsed_output:
                    message = parsed_output['error_description']
                if 'error' in parsed_output:
                    if message is None:
                        message = parsed_output['error']
                    else:
                        message += ' ({0})'.format(parsed_output['error'])
            else:
                messages = {
                    401: 'No access to the requested API',
                    403: 'No access to the requested API',
                    404: 'The requested API could not be found',
                    405: 'Requested method not allowed',
                    406: 'The request was unacceptable',
                    426: 'Upgrade is needed',
                    429: 'Rate limit was hit',
                    500: 'Internal server error'
                }
                if status_code in messages:
                    message = messages[status_code]
            if message is None:
                message = 'Unknown error'
            if status_code in [401, 403]:
                raise HttpForbiddenException(message, '')
            elif status_code == 404:
                raise HttpNotFoundException(message, '')
            else:
                raise HttpException(status_code, message)

    def _call(self, api, params, fct, timeout=None, **kwargs):
        if not api.endswith('/'):
            api = '{0}/'.format(api)
        if not api.startswith('/'):
            api = '/{0}'.format(api)
        if self._volatile_client is not None:
            self._token = self._volatile_client.get(self._key)
        first_connect = self._token is None
        headers, url = self._prepare(params=params)
        try:
            return self._process(
                fct(url=url.format(api),
                    headers=headers,
                    verify=self._verify,
                    timeout=timeout,
                    **kwargs))
        except HttpForbiddenException:
            if self._volatile_client is not None:
                self._volatile_client.delete(self._key)
            if first_connect is True:  # First connect, so no token was present yet, so no need to try twice without token
                raise
            self._token = None
            headers, url = self._prepare(params=params)
            return self._process(
                fct(url=url.format(api),
                    headers=headers,
                    verify=self._verify,
                    **kwargs))
        except Exception:
            if self._volatile_client is not None:
                self._volatile_client.delete(self._key)
            raise

    @classmethod
    def get_instance(cls, connection_info, cache_store=None, version=6):
        """
        Retrieve an OVSClient instance to the connection information passed
        :param connection_info: Connection information, includes: 'host', 'port', 'client_id', 'client_secret'
        :type connection_info: dict
        :param cache_store: Store in which to keep the generated token for the client
        :type cache_store: object
        :param version: Version for the API
        :type version: int
        :return: An instance of the OVSClient class
        :rtype: ovs_extensions.api.client.OVSClient
        """
        ExtensionsToolbox.verify_required_params(
            actual_params=connection_info,
            required_params={
                'host': (str, ExtensionsToolbox.regex_ip),
                'port': (int, {
                    'min': 1,
                    'max': 65535
                }),
                'client_id': (str, None),
                'client_secret': (str, None),
                'local': (bool, None, False)
            })
        return cls(ip=connection_info['host'],
                   port=connection_info['port'],
                   credentials=(connection_info['client_id'],
                                connection_info['client_secret']),
                   version=version,
                   cache_store=cache_store)

    def get(self, api, params=None):
        """
        Executes a GET call
        :param api: Specification to fill out in the URL, eg: /vpools/<vpool_guid>/shrink_vpool
        :param params: Additional query parameters as comma separated list, eg: {'contents':'dynamic1,dynamic2,-dynamic3,_relations,-relation1'}
        :type params: dict
        """
        return self._call(api=api, params=params, fct=requests.get)

    def post(self, api, data=None, params=None):
        """
        Executes a POST call
        :param api: Specification to fill out in the URL, eg: /vpools/<vpool_guid>/shrink_vpool
        :param data: Data to post
        :param params: Additional query parameters, eg: _dynamics
        """
        return self._call(api=api, params=params, fct=requests.post, data=data)

    def put(self, api, data=None, params=None):
        """
        Executes a PUT call
        :param api: Specification to fill out in the URL, eg: /vpools/<vpool_guid>/shrink_vpool
        :param data: Data to put
        :param params: Additional query parameters, eg: _dynamics
        """
        return self._call(api=api, params=params, fct=requests.put, data=data)

    def patch(self, api, data=None, params=None):
        """
        Executes a PATCH call
        :param api: Specification to fill out in the URL, eg: /vpools/<vpool_guid>/shrink_vpool
        :param data: Data to patch
        :param params: Additional query parameters, eg: _dynamics
        """
        return self._call(api=api,
                          params=params,
                          fct=requests.patch,
                          data=data)

    def delete(self, api, params=None):
        """
        Executes a DELETE call
        :param api: Specification to fill out in the URL, eg: /vpools/<vpool_guid>/
        :param params: Additional query parameters, eg: _dynamics
        """
        return self._call(api=api, params=params, fct=requests.delete)
Esempio n. 20
0
class ArakoonConfigurationLock(object):
    """
    Lock implementation around Arakoon
    To be used as a context manager
    """
    LOCK_LOCATION = '/ovs/locks/{0}'
    EXPIRATION_KEY = 'expires'

    _logger = Logger('arakoon_configuration_lock')

    def __init__(self, cacc_location, name, wait=None, expiration=60):
        # type: (str, str, float, float) -> None
        """
        Initialize a ConfigurationLock
        :param cacc_location: Path to the the configuration file
        :type cacc_location: str
        :param name: Name of the lock to acquire.
        :type name: str
        :param expiration: Expiration time of the lock (in seconds)
        :type expiration: float
        :param wait: Amount of time to wait to acquire the lock (in seconds)
        :type wait: float
        """
        self.id = str(uuid.uuid4())
        self.name = name
        self._cacc_location = cacc_location
        config = ArakoonConfiguration(self._cacc_location)
        self._client = config.get_client()
        self._expiration = expiration
        self._data_set = None
        self._key = self.LOCK_LOCATION.format(self.name)
        self._wait = wait
        self._start = 0
        self._has_lock = False

    def __enter__(self):
        # type: () -> ArakoonConfigurationLock
        self.acquire()
        return self

    def __exit__(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        _ = args, kwargs
        self.release()

    def acquire(self, wait=None):
        # type: (float) -> bool
        """
        Acquire a lock on the mutex, optionally given a maximum wait timeout
        :param wait: Time to wait for lock
        :type wait: float
        """
        if self._has_lock:
            return True
        self._start = time.time()
        if wait is None:
            wait = self._wait
        while self._client.exists(self._key):
            time.sleep(0.005)
            # Check if it has expired
            try:
                original_lock_data = self._client.get(self._key)
                lock_data = ujson.loads(original_lock_data)
            except ArakoonNotFound:
                self._logger.debug(
                    'Unable to retrieve data: Key {0} was removed in meanwhile'
                    .format(self._key))
                continue  # Key was removed in meanwhile
            except Exception:
                self._logger.exception(
                    'Unable to retrieve the data of key {0}'.format(self._key))
                continue
            expiration = lock_data.get(self.EXPIRATION_KEY, None)
            if expiration is None or time.time() > expiration:
                self._logger.info(
                    'Expiration for key {0} (lock id: {1}) was reached. Looking to remove it.'
                    .format(self._key, lock_data['id']))
                # Remove the expired lock
                transaction = self._client.begin_transaction()
                self._client.assert_value(self._key,
                                          original_lock_data,
                                          transaction=transaction)
                self._client.delete(self._key, transaction=transaction)
                try:
                    self._client.apply_transaction(transaction)
                except ArakoonAssertionFailed:
                    self._logger.warning(
                        'Lost the race to cleanup the expired key {0}.'.format(
                            self._key))
                except:
                    self._logger.exception(
                        'Unable to remove the expired entry')
                continue  # Always check the key again even when errors occurred
            passed = time.time() - self._start
            if wait is not None and passed > wait:
                self._logger.error(
                    'Lock for {0} could not be acquired. {1} sec > {2} sec'.
                    format(self._key, passed, wait))
                raise NoLockAvailableException(
                    'Could not acquire lock {0}'.format(self._key))
        # Create the lock entry
        now = time.time()
        transaction = self._client.begin_transaction()
        self._client.assert_value(
            self._key, None, transaction=transaction)  # Key shouldn't exist
        data_to_set = ujson.dumps({
            'time_set': now,
            self.EXPIRATION_KEY: now + self._expiration,
            'id': self.id
        })
        self._client.set(self._key, data_to_set, transaction=transaction)
        try:
            self._client.apply_transaction(transaction)
            self._data_set = data_to_set
            self._logger.debug('Acquired lock {0}'.format(self._key))
        except ArakoonAssertionFailed:
            self._logger.info(
                'Lost the race with another lock, back to acquiring')
            return self.acquire(wait)
        except:
            self._logger.exception('Exception occurred while setting the lock')
            raise
        passed = time.time() - self._start
        if passed > 0.2:  # More than 200 ms is a long time to wait
            if self._logger is not None:
                self._logger.warning('Waited {0} sec for lock {1}'.format(
                    passed, self._key))
        self._start = time.time()
        self._has_lock = True
        return True

    def release(self):
        # type: () -> None
        """
        Releases the lock
        """
        if self._has_lock and self._data_set is not None:
            transaction = self._client.begin_transaction()
            self._client.assert_value(self._key,
                                      self._data_set,
                                      transaction=transaction)
            self._client.delete(self._key, transaction=transaction)
            try:
                self._client.apply_transaction(transaction)
                self._logger.debug('Removed lock {0}'.format(self._key))
            except ArakoonAssertionFailed:
                self._logger.warning(
                    'The lock was removed and possible in use. Another client must have cleaned up the expired entry'
                )
            except:
                self._logger.exception('Unable to remove the lock')
                raise
            passed = time.time() - self._start
            if passed > 0.5:  # More than 500 ms is a long time to hold a lock
                if self._logger is not None:
                    self._logger.warning(
                        'A lock on {0} was kept for {1} sec'.format(
                            self._key, passed))
            self._has_lock = False
Esempio n. 21
0
class Generic(unittest.TestCase):
    """
    This test class will validate the various scenarios of the Generic logic
    """
    _logger = Logger('unittest')

    def setUp(self):
        """
        (Re)Sets the stores on every test
        """
        self.volatile, self.persistent = DalHelper.setup()

    def tearDown(self):
        """
        Clean up test suite
        """
        DalHelper.teardown()

    def test_arakoon_collapse(self):
        """
        Test the Arakoon collapse functionality
        """
        # Set up the test
        structure = DalHelper.build_dal_structure(
            structure={'storagerouters': [1, 2]})
        storagerouter_1 = structure['storagerouters'][1]
        storagerouter_2 = structure['storagerouters'][2]
        MockedSSHClient._run_returns[storagerouter_1.ip] = {}
        MockedSSHClient._run_returns[storagerouter_2.ip] = {}

        # Make sure we cover all Arakoon cluster types
        clusters_to_create = {
            ServiceType.ARAKOON_CLUSTER_TYPES.SD: [{
                'name': 'unittest-voldrv',
                'internal': True,
                'success': True
            }],
            ServiceType.ARAKOON_CLUSTER_TYPES.CFG: [{
                'name': 'unittest-cacc',
                'internal': True,
                'success': True
            }],
            ServiceType.ARAKOON_CLUSTER_TYPES.FWK: [{
                'name': 'unittest-ovsdb',
                'internal': True,
                'success': False
            }],
            ServiceType.ARAKOON_CLUSTER_TYPES.ABM: [{
                'name': 'unittest-cluster-1-abm',
                'internal': True,
                'success': False
            }, {
                'name': 'unittest-random-abm-name',
                'internal': False,
                'success': True
            }],
            ServiceType.ARAKOON_CLUSTER_TYPES.NSM: [{
                'name': 'unittest-cluster-1-nsm_0',
                'internal': True,
                'success': True
            }]
        }
        self.assertEqual(
            first=sorted(clusters_to_create.keys()),
            second=sorted(ServiceType.ARAKOON_CLUSTER_TYPES.keys()),
            msg=
            'An Arakoon cluster type has been removed or added, please update this test accordingly'
        )

        # Create all Arakoon clusters and related services
        failed_clusters = []
        external_clusters = []
        successful_clusters = []
        for cluster_type, cluster_infos in clusters_to_create.iteritems():
            filesystem = cluster_type == ServiceType.ARAKOON_CLUSTER_TYPES.CFG
            for cluster_info in cluster_infos:
                internal = cluster_info['internal']
                cluster_name = cluster_info['name']

                base_dir = DalHelper.CLUSTER_DIR.format(cluster_name)
                arakoon_installer = ArakoonInstaller(cluster_name=cluster_name)
                arakoon_installer.create_cluster(cluster_type=cluster_type,
                                                 ip=storagerouter_1.ip,
                                                 base_dir=base_dir,
                                                 internal=internal)
                arakoon_installer.start_cluster()
                arakoon_installer.extend_cluster(new_ip=storagerouter_2.ip,
                                                 base_dir=base_dir)

                service_name = ArakoonInstaller.get_service_name_for_cluster(
                    cluster_name=cluster_name)
                if cluster_type == ServiceType.ARAKOON_CLUSTER_TYPES.ABM:
                    service_type = ServiceTypeList.get_by_name(
                        ServiceType.SERVICE_TYPES.ALBA_MGR)
                elif cluster_type == ServiceType.ARAKOON_CLUSTER_TYPES.NSM:
                    service_type = ServiceTypeList.get_by_name(
                        ServiceType.SERVICE_TYPES.NS_MGR)
                else:
                    service_type = ServiceTypeList.get_by_name(
                        ServiceType.SERVICE_TYPES.ARAKOON)

                if internal is True:
                    DalHelper.create_service(
                        service_name=service_name,
                        service_type=service_type,
                        storagerouter=storagerouter_1,
                        ports=arakoon_installer.ports[storagerouter_1.ip])
                    DalHelper.create_service(
                        service_name=service_name,
                        service_type=service_type,
                        storagerouter=storagerouter_2,
                        ports=arakoon_installer.ports[storagerouter_2.ip])
                else:
                    DalHelper.create_service(service_name=service_name,
                                             service_type=service_type)

                    external_clusters.append(cluster_name)
                    continue

                if cluster_info['success'] is True:
                    if filesystem is True:
                        config_path = ArakoonClusterConfig.CONFIG_FILE.format(
                            cluster_name)
                    else:
                        config_path = Configuration.get_configuration_path(
                            ArakoonClusterConfig.CONFIG_KEY.format(
                                cluster_name))
                    MockedSSHClient._run_returns[storagerouter_1.ip][
                        'arakoon --collapse-local 1 2 -config {0}'.format(
                            config_path)] = None
                    MockedSSHClient._run_returns[storagerouter_2.ip][
                        'arakoon --collapse-local 2 2 -config {0}'.format(
                            config_path)] = None
                    successful_clusters.append(cluster_name)
                else:  # For successful False clusters we don't emulate the collapse, thus making it fail
                    failed_clusters.append(cluster_name)

        # Start collapse and make it fail for all clusters on StorageRouter 2
        SSHClient._raise_exceptions[storagerouter_2.ip] = {
            'users': ['ovs'],
            'exception': UnableToConnectException('No route to host')
        }
        GenericController.collapse_arakoon()

        # Verify all log messages for each type of cluster
        generic_logs = Logger._logs.get('lib', {})
        for cluster_name in successful_clusters + failed_clusters + external_clusters:
            collect_msg = (
                'DEBUG',
                'Collecting info for cluster {0}'.format(cluster_name))
            unreachable_msg = (
                'ERROR',
                'Could not collapse any cluster on {0} (not reachable)'.format(
                    storagerouter_2.name))
            end_collapse_msg = (
                'DEBUG', 'Collapsing cluster {0} on {1} completed'.format(
                    cluster_name, storagerouter_1.ip))
            start_collapse_msg = ('DEBUG',
                                  'Collapsing cluster {0} on {1}'.format(
                                      cluster_name, storagerouter_1.ip))
            failed_collapse_msg = (
                'ERROR', 'Collapsing cluster {0} on {1} failed'.format(
                    cluster_name, storagerouter_1.ip))
            messages_to_validate = []
            if cluster_name in successful_clusters:
                assert_function = self.assertIn
                messages_to_validate.append(collect_msg)
                messages_to_validate.append(unreachable_msg)
                messages_to_validate.append(start_collapse_msg)
                messages_to_validate.append(end_collapse_msg)
            elif cluster_name in failed_clusters:
                assert_function = self.assertIn
                messages_to_validate.append(collect_msg)
                messages_to_validate.append(unreachable_msg)
                messages_to_validate.append(start_collapse_msg)
                messages_to_validate.append(failed_collapse_msg)
            else:
                assert_function = self.assertNotIn
                messages_to_validate.append(collect_msg)
                messages_to_validate.append(start_collapse_msg)
                messages_to_validate.append(end_collapse_msg)

            for severity, message in messages_to_validate:
                if assert_function == self.assertIn:
                    assert_message = 'Expected to find log message: {0}'.format(
                        message)
                else:
                    assert_message = 'Did not expect to find log message: {0}'.format(
                        message)
                assert_function(member=message,
                                container=generic_logs,
                                msg=assert_message)
                if assert_function == self.assertIn:
                    self.assertEqual(
                        first=severity,
                        second=generic_logs[message],
                        msg='Log message {0} is of severity {1} expected {2}'.
                        format(message, generic_logs[message], severity))

        # Collapse should always have a 'finished' message since each cluster should be attempted to be collapsed
        for general_message in [
                'Arakoon collapse started', 'Arakoon collapse finished'
        ]:
            self.assertIn(member=general_message,
                          container=generic_logs,
                          msg='Expected to find log message: {0}'.format(
                              general_message))

    def test_refresh_package_information(self):
        """
        Test the refresh package information functionality
        """
        def _update_info_cluster_1(client, update_info, package_info):
            _ = package_info
            update_info[client.ip]['framework'] = {
                'packages': {
                    'package1': {
                        'candidate': 'version2',
                        'installed': 'version1'
                    }
                },
                'prerequisites': []
            }

        def _update_info_cluster_2(client, update_info, package_info):
            _ = package_info
            update_info[client.ip]['component2'] = {
                'packages': {
                    'package2': {
                        'candidate': 'version2',
                        'installed': 'version1'
                    }
                },
                'prerequisites': []
            }
            if client.ip == storagerouter_3.ip:
                update_info[client.ip]['errors'] = [
                    'Unexpected error occurred for StorageRouter {0}'.format(
                        storagerouter_3.name)
                ]

        def _update_info_plugin_1(error_information):
            _ = error_information  # get_update_info_plugin is used for Alba nodes, so not testing here

        expected_package_info = {
            'framework': {
                'packages': {
                    'package1': {
                        'candidate': 'version2',
                        'installed': 'version1'
                    }
                },
                'prerequisites': [['node_down', '2']]
            },
            'component2': {
                'packages': {
                    'package2': {
                        'candidate': 'version2',
                        'installed': 'version1'
                    }
                },
                'prerequisites': []
            }
        }

        # StorageRouter 1 successfully updates its package info
        # StorageRouter 2 is inaccessible
        # StorageRouter 3 gets error in 2nd hook --> package_information is reset to {}
        structure = DalHelper.build_dal_structure(
            structure={'storagerouters': [1, 2, 3]})
        storagerouter_1 = structure['storagerouters'][1]
        storagerouter_2 = structure['storagerouters'][2]
        storagerouter_3 = structure['storagerouters'][3]
        Toolbox._function_pointers['update-get_update_info_cluster'] = [
            _update_info_cluster_1, _update_info_cluster_2
        ]
        Toolbox._function_pointers['update-get_update_info_plugin'] = [
            _update_info_plugin_1
        ]

        SSHClient._raise_exceptions[storagerouter_2.ip] = {
            'users': ['root'],
            'exception': UnableToConnectException('No route to host')
        }

        with self.assertRaises(excClass=Exception) as raise_info:
            GenericController.refresh_package_information()

        storagerouter_1.discard()
        storagerouter_2.discard()
        storagerouter_3.discard()
        self.assertDictEqual(
            d1=expected_package_info,
            d2=storagerouter_1.package_information,
            msg='Incorrect package information found for StorageRouter 1'.
            format(storagerouter_1.name))
        self.assertDictEqual(
            d1={},
            d2=storagerouter_2.package_information,
            msg='Incorrect package information found for StorageRouter 2'.
            format(storagerouter_2.name))
        self.assertDictEqual(
            d1={},
            d2=storagerouter_3.package_information,
            msg='Incorrect package information found for StorageRouter {0}'.
            format(storagerouter_3.name))
        self.assertIn(
            member='Unexpected error occurred for StorageRouter {0}'.format(
                storagerouter_3.name),
            container=raise_info.exception.message,
            msg=
            'Expected to find log message about unexpected error for StorageRouter {0}'
            .format(storagerouter_3.name))

    ##################
    # HELPER METHODS #
    ##################
    def _print_message(self, message):
        if self.debug is True:
            print message

    def _validate(self, vdisk, current_day, base_date, sticky_hours,
                  consistent_hours, inconsistent_hours):
        """
        This validates assumes the same policy as currently implemented in the policy code
        itself. In case the policy strategy ever changes, this unittest should be adapted as well
        or rewritten to load the implemented policy
        """

        # Implemented policy:
        # < 1d | 1d bucket | 1 | best of bucket   | 1d
        # < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w
        # < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m
        # > 1m | delete

        minute = 60
        hour = minute * 60

        self._print_message('  - {0}'.format(vdisk.name))

        # Visualisation
        if self.debug is True:
            snapshots = {}
            for snapshot in vdisk.snapshots:
                snapshots[int(snapshot['timestamp'])] = snapshot
            for day in xrange(0, current_day + 1):
                timestamp = self._make_timestamp(base_date,
                                                 datetime.timedelta(1) * day)
                visual = '    - {0} '.format(
                    datetime.datetime.fromtimestamp(timestamp).strftime(
                        '%Y-%m-%d'))
                for t in xrange(timestamp, timestamp + hour * 24, minute * 30):
                    if t in snapshots:
                        visual += 'S' if snapshots[t][
                            'is_sticky'] else 'C' if snapshots[t][
                                'is_consistent'] else 'R'
                    else:
                        visual += '-'
                self._print_message(visual)

        sticky = [
            int(s['timestamp']) for s in vdisk.snapshots
            if s['is_sticky'] is True
        ]
        consistent = [
            int(s['timestamp']) for s in vdisk.snapshots
            if s['is_consistent'] is True
        ]
        inconsistent = [
            int(s['timestamp']) for s in vdisk.snapshots
            if s['is_consistent'] is False
        ]
        self._print_message(
            '    - {0} consistent, {1} inconsistent, {2} sticky'.format(
                len(consistent), len(inconsistent), len(sticky)))

        # Check for correct amount of snapshots
        amount_sticky = len(sticky_hours) * current_day
        amount_consistent = 0
        amount_inconsistent = 0
        pointer = 0
        if pointer < current_day:
            amount_consistent += len(consistent_hours)
            amount_inconsistent += len(inconsistent_hours)
            pointer += 1
        while pointer < current_day and pointer <= 7:
            if len(consistent_hours) > 0:
                amount_consistent += 1  # One consistent snapshot per day
            else:
                amount_inconsistent += 1
            pointer += 1
        while pointer < current_day and pointer <= 28:
            if len(consistent_hours) > 0:
                amount_consistent += 1  # One consistent snapshot per week
            else:
                amount_inconsistent += 1
            pointer += 7
        self.assertEqual(
            first=len(sticky),
            second=amount_sticky,
            msg='Wrong amount of sticky snapshots: {0} vs expected {1}'.format(
                len(sticky), amount_sticky))
        if len(sticky) == 0:
            self.assertEqual(
                first=len(consistent),
                second=amount_consistent,
                msg='Wrong amount of consistent snapshots: {0} vs expected {1}'
                .format(len(consistent), amount_consistent))
            self.assertEqual(
                first=len(inconsistent),
                second=amount_inconsistent,
                msg='Wrong amount of inconsistent snapshots: {0} vs expected {1}'
                .format(len(inconsistent), amount_inconsistent))

        # Check of the correctness of the snapshot timestamp
        if len(consistent_hours) > 0:
            sn_type = 'consistent'
            container = consistent
            time_diff = (hour * consistent_hours[-1]) + (minute * 30)
        else:
            sn_type = 'inconsistent'
            container = inconsistent
            time_diff = (hour * inconsistent_hours[-1])

        for day in xrange(0, current_day):
            for h in sticky_hours:
                timestamp = self._make_timestamp(
                    base_date,
                    datetime.timedelta(1) * day) + (hour * h) + (minute * 30)
                self.assertIn(
                    member=timestamp,
                    container=sticky,
                    msg='Expected sticky snapshot for {0} at {1}'.format(
                        vdisk.name, self._from_timestamp(timestamp)))
            if day == (current_day - 1):
                for h in inconsistent_hours:
                    timestamp = self._make_timestamp(
                        base_date,
                        datetime.timedelta(1) * day) + (hour * h)
                    self.assertIn(
                        member=timestamp,
                        container=inconsistent,
                        msg=
                        'Expected hourly inconsistent snapshot for {0} at {1}'.
                        format(vdisk.name, self._from_timestamp(timestamp)))
                for h in consistent_hours:
                    timestamp = self._make_timestamp(
                        base_date,
                        datetime.timedelta(1) * day) + (hour * h) + (minute *
                                                                     30)
                    self.assertIn(
                        member=timestamp,
                        container=consistent,
                        msg='Expected random consistent snapshot for {0} at {1}'
                        .format(vdisk.name, self._from_timestamp(timestamp)))
            elif day > (current_day - 7):
                timestamp = self._make_timestamp(
                    base_date,
                    datetime.timedelta(1) * day) + time_diff
                self.assertIn(
                    member=timestamp,
                    container=container,
                    msg='Expected daily {0} snapshot for {1} at {2}'.format(
                        sn_type, vdisk.name, self._from_timestamp(timestamp)))
            elif day % 7 == 0 and day > 28:
                timestamp = self._make_timestamp(
                    base_date,
                    datetime.timedelta(1) * day) + time_diff
                self.assertIn(
                    member=timestamp,
                    container=container,
                    msg='Expected weekly {0} snapshot for {1} at {2}'.format(
                        sn_type, vdisk.name, self._from_timestamp(timestamp)))

    @staticmethod
    def _make_timestamp(base, offset):
        return int(time.mktime((base + offset).timetuple()))

    @staticmethod
    def _from_timestamp(timestamp):
        return datetime.datetime.fromtimestamp(timestamp).strftime(
            '%Y-%m-%d %H:%M')
 def _get_logger_instance(cls):
     return Logger('extensions-services')
Esempio n. 23
0
class PyrakoonClient(PyrakoonBase):
    """
    Arakoon client wrapper:
    - Easier sequence management
    """
    _logger = Logger('extensions')

    def __init__(self,
                 cluster,
                 nodes,
                 retries=10,
                 retry_back_off_multiplier=2,
                 retry_interval_sec=2):
        # type: (str, Dict[str, Tuple[str, int]], int, int, int) -> None
        """
        Initializes the client
        :param cluster: Identifier of the cluster
        :type cluster: str
        :param nodes: Dict with all node sockets. {name of the node: (ip of node, port of node)}
        :type nodes: dict
        :param retries: Number of retries to do
        :type retries: int
        :param retry_back_off_multiplier: Back off multiplier. Multiplies the retry_interval_sec with this number ** retry
        :type retry_back_off_multiplier: int
        :param retry_interval_sec: Seconds to wait before retrying. Exponentially increases with every retry.
        :type retry_interval_sec: int
        """
        cleaned_nodes = {}
        for node, info in nodes.iteritems():
            cleaned_nodes[str(node)] = ([str(entry)
                                         for entry in info[0]], int(info[1]))
        # Synchronization
        self._lock = RLock()
        # Wrapping
        self._config = ArakoonClientConfig(str(cluster), cleaned_nodes)
        self._client = ArakoonClient(self._config,
                                     timeout=5,
                                     noMasterTimeout=5)

        self._identifier = int(round(random.random() * 10000000))
        self._batch_size = 500
        self._sequences = {}
        # Retrying
        self._retries = retries
        self._retry_back_off_multiplier = retry_back_off_multiplier
        self._retry_interval_sec = retry_interval_sec

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def get(self, key, consistency=None):
        # type: (str, Consistency) -> Any
        """
        Retrieves a certain value for a given key
        :param key: The key whose value you are interested in
        :type key: str
        :param consistency: Consistency of the get
        :type consistency: Consistency
        :return: The value associated with the given key
        :rtype: any
        """
        return self._client.get(key, consistency)

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def _get_multi(self, keys, must_exist):
        # type: (List[str], bool) -> List[Tuple[str, any]]
        """
        Get multiple keys at once
        Serves as the arakoon error handling
        :param keys: All keys to fetch
        :type keys" List[str]
        :param must_exist: Should all listed keys exist
        :type must_exist: bool
        :return: List with key value pairs
        :rtype: List[Tuple[str, any]
        """
        func = self._client.multiGet if must_exist is True else self._client.multiGetOption
        return func(keys)

    def get_multi(self, keys, must_exist=True):
        # type: (List[str], bool) -> Generator[Tuple[str, any]]
        """
        Get multiple keys at once
        :param keys: All keys to fetch
        :type keys" List[str]
        :param must_exist: Should all listed keys exist
        :type must_exist: bool
        :return: Generator that yields key value pairs
        :rtype: iterable[Tuple[str, any]
        """
        for item in self._get_multi(keys, must_exist):
            yield item

    @locked()
    @handle_arakoon_errors(is_read_only=False)
    def set(self, key, value, transaction=None):
        # type: (str, any, str) -> None
        """
        Sets the value for a key to a given value
        If the key does not yet have a value associated with it, a new key value pair will be created.
        If the key does have a value associated with it, it is overwritten.
        :param key: The key to set/update
        :type key: str
        :param value: The value to store
        :type value: any
        :param transaction: ID of the transaction to add the update too
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addSet(key, value)
        return self._client.set(key, value)

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def _range(self,
               begin_key,
               begin_key_included,
               end_key,
               end_key_included,
               max_elements=None):
        # type: (str, bool, str, bool, Optional[int]) -> List[str]
        """
        Get a range of keys from Arakoon
        :param begin_key: Key to start range with
        :type begin_key: str
        :param begin_key_included: Should the key be included
        :type begin_key_included: bool
        :param end_key: Key to end the range with
        :type end_key: str
        :param end_key_included: Is the end key included
        :type end_key_included: bool
        :param max_elements: Maximum amount of elements to return. Defaults to the batch size of the class
        :type max_elements: Optional[int]
        :return: List of keys
        :rtype: List[str]
        """
        max_elements = self._batch_size if max_elements is None else max_elements
        return self._client.range(beginKey=begin_key,
                                  beginKeyIncluded=begin_key_included,
                                  endKey=end_key,
                                  endKeyIncluded=end_key_included,
                                  maxElements=max_elements)

    def prefix(self, prefix):
        # type: (str) -> Generator[str]
        """
        Lists all keys starting with the given prefix
        :param prefix: Prefix of the key
        :type prefix: str
        :return: Generator that yields keys
        :rtype: Generator[str]
        """
        next_prefix = self._next_prefix(prefix)
        batch = None
        while batch is None or len(batch) > 0:
            batch = self._range(
                begin_key=prefix if batch is None else batch[-1],
                begin_key_included=batch is None,
                end_key=next_prefix,
                end_key_included=False)
            for item in batch:
                yield item

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def _range_entries(self,
                       begin_key,
                       begin_key_included,
                       end_key,
                       end_key_included,
                       max_elements=None):
        # type: (str, bool, str, bool, Optional[int]) -> List[Tuple[str, Any]]
        """
        Get a range of keys value pairs from Arakoon
        :param begin_key: Key to start range with
        :type begin_key: str
        :param begin_key_included: Should the key be included
        :type begin_key_included: bool
        :param end_key: Key to end the range with
        :type end_key: str
        :param end_key_included: Is the end key included
        :type end_key_included: bool
        :param max_elements: Maximum amount of elements to return. Defaults to the batch size of the class
        :type max_elements: Optional[int]
        :return: List of keys
        :rtype: List[Tuple[str, Any]]
        """
        max_elements = self._batch_size if max_elements is None else max_elements
        return self._client.range_entries(beginKey=begin_key,
                                          beginKeyIncluded=begin_key_included,
                                          endKey=end_key,
                                          endKeyIncluded=end_key_included,
                                          maxElements=max_elements)

    def prefix_entries(self, prefix):
        # type: (str) -> Generator[Tuple[str, any]]
        """
        Lists all key, value pairs starting with the given prefix
        :param prefix: Prefix of the key
        :type prefix: str
        :return: Generator that yields key, value pairs
        :rtype: Generator[Tuple[str, any]
        """
        next_prefix = self._next_prefix(prefix)
        batch = None
        while batch is None or len(batch) > 0:
            batch = self._range_entries(
                begin_key=prefix if batch is None else batch[-1][0],
                begin_key_included=batch is None,
                end_key=next_prefix,
                end_key_included=False)
            for item in batch:
                yield item

    @locked()
    @handle_arakoon_errors(is_read_only=False)
    def delete(self, key, must_exist=True, transaction=None):
        # type: (str, bool, str) -> any
        """
        Deletes a given key from the store
        :param key; Key to remove
        ;type key: str
        :param must_exist: Should the key exist
        :type must_exist: bool
        :param transaction: Transaction to apply the update too
        :type transaction: id
        :return The previous value in case must_exist=False, None incase must_exist=False
        :rtype: any
        """
        if transaction is not None:
            if must_exist is True:
                return self._sequences[transaction].addDelete(key)
            else:
                return self._sequences[transaction].addReplace(key, None)
        if must_exist is True:
            return self._client.delete(key)
        else:
            return self._client.replace(key, None)

    @locked()
    @handle_arakoon_errors(is_read_only=False)
    def delete_prefix(self, prefix, transaction=None):
        # type: (str, Optional[str]) -> None
        """
        Removes a given prefix from the store
        :param prefix: Prefix of the key
        :type prefix: str
        :param transaction: Transaction to apply the update too
        :type transaction: str
        :return None
        ;:rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addDeletePrefix(prefix)
        return self._client.deletePrefix(prefix)

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def nop(self):
        # type: () -> None
        """
        Executes a nop command
        """
        return self._client.nop()

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def exists(self, key):
        # type: (str) -> bool
        """
        Check if key exists
        :param key: Key to check
        :type key: str
        :return True if key exists else False
        :rtype: bool
        """
        return self._client.exists(key)

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def assert_value(self, key, value, transaction=None):
        # type: (str, any, str) -> None
        """
        Asserts a key-value pair
        :param key: Key of the value to assert
        :type key: str
        :param value: Value to assert
        :type value: any
        :param transaction: Transaction to apply the assert too
        :type transaction: str
        :raises: ArakoonAssertionFailed if the value could not be asserted
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addAssert(key, value)
        return self._client.aSSert(key, value)

    @locked()
    @handle_arakoon_errors(is_read_only=True)
    def assert_exists(self, key, transaction=None):
        # type: (str, str) -> None
        """
        Asserts that a given key exists
        :param key: Key to assert
        :type key: str
        :param transaction: Transaction to apply the assert too
        :type transaction: str
        :raises: ArakoonAssertionFailed if the value could not be asserted
        :return: None
        :rtype: NoneType
        """
        if transaction is not None:
            return self._sequences[transaction].addAssertExists(key)
        return self._client.aSSert_exists(key)

    def begin_transaction(self):
        # type: () -> str
        """
        Creates a transaction (wrapper around Arakoon sequences)
        :return: Identifier of the transaction
        :rtype: str
        """
        key = str(uuid.uuid4())
        self._sequences[key] = self._client.makeSequence()
        return key

    @handle_arakoon_errors(is_read_only=False, max_duration=1)
    def _apply_transaction(self, sequence):
        # type: (Sequence) -> None
        """
        Does the retrying aspect to avoid deleting the transaction in a finally clause. Paried with apply_transaction
        :param sequence: Sequence to execute
        :type sequence: Sequence
        :return: None
        :rtype: NoneType
        """
        self._client.sequence(sequence)

    @locked()
    def apply_transaction(self, transaction, delete=True):
        # type: (str, Optional[bool]) -> None
        """
        Applies a transaction
        :param transaction: Identifier of the transaction
        :type transaction: str
        :param delete: Delete transaction after attempting to apply the transaction
        Disabling this option requires a delete_transaction to be called at some point to avoid memory leaking
        :type delete: bool
        :return: None
        :rtype: NoneType
        """
        try:
            sequence = self._sequences[transaction]
            return self._apply_transaction(sequence)
        finally:
            if delete:
                self.delete_transaction(transaction)

    def delete_transaction(self, transaction):
        """
        Deletes a transaction
        :param transaction: Identifier of the transaction
        :type transaction: str
        :return: None
        :rtype: NoneType
        """
        self._sequences.pop(transaction, None)

    def lock(self, name, wait=None, expiration=60):
        # type: (str, float, float) -> PyrakoonLock
        """
        Returns the Arakoon lock implementation
        :param name: Name to give to the lock
        :type name: str
        :param wait: Wait time for the lock (in seconds)
        :type wait: float
        :param expiration: Expiration time for the lock (in seconds)
        :type expiration: float
        :return: The lock implementation
        :rtype: PyrakoonLock
        """
        return PyrakoonLock(self, name, wait, expiration)

    @locked()
    def apply_callback_transaction(self,
                                   transaction_callback,
                                   max_retries=0,
                                   retry_wait_function=None):
        # type: (callable, int, callable) -> None
        """
        Apply a transaction which is the result of the callback.
        The callback should build the complete transaction again to handle the asserts. If the possible previous run was interrupted,
        Handles all Arakoon errors by re-executing the callback until it finished or until no more retries can be made
        :param transaction_callback: Callback function which returns the transaction ID to apply
        :type transaction_callback: callable
        :param max_retries: Number of retries to try. Retries are attempted when an AssertException is thrown.
        Defaults to 0
        :param retry_wait_function: Function called retrying the transaction. The current try number is passed as an argument
        Defaults to lambda retry: time.sleep(randint(0, 25) / 100.0)
        :type retry_wait_function: callable
        :return: None
        :rtype: NoneType
        """
        def default_retry_wait(retry):
            _ = retry
            time.sleep(random.randint(0, 25) / 100.0)

        retry_wait_func = retry_wait_function or default_retry_wait
        tries = 0
        while True:
            tries += 1
            try:
                transaction = transaction_callback()  # type: str
                return self.apply_transaction(transaction)
            except ArakoonAssertionFailed as ex:
                self._logger.warning(
                    'Asserting failed. Retrying {0} more times'.format(
                        max_retries - tries))
                last_exception = ex
                if tries > max_retries:
                    raise last_exception
                retry_wait_func(tries)
Esempio n. 24
0
class Configuration(object):
    """
    Configuration wrapper.
    Uses a special key format to specify the path within the configuration store, and specify a path inside the json data
    object that might be stored inside the key.
    key  = <main path>[|<json path>]
    main path = slash-delimited path
    json path = dot-delimited path
    Examples:
        > Configuration.set('/foo', 1)
        > print Configuration.get('/foo')
        < 1
        > Configuration.set('/foo', {'bar': 1})
        > print Configuration.get('/foo')
        < {u'bar': 1}
        > print Configuration.get('/foo|bar')
        < 1
        > Configuration.set('/bar|a.b', 'test')
        > print Configuration.get('/bar')
        < {u'a': {u'b': u'test'}}
    """

    BASE_KEY = '/ovs/framework'
    CACC_LOCATION = CACC_LOCATION
    EDITION_KEY = '{0}/edition'.format(BASE_KEY)

    _clients = {}
    _logger = Logger('extensions')

    def __init__(self):
        # type: () -> None
        """
        Dummy init method
        """
        _ = self

    #####################
    # To be implemented #
    #####################

    @classmethod
    def lock(cls, name, wait=None, expiration=60):
        """
        Places a mutex on the Configuration management
        To be used a context manager
        :param name: Name of the lock to acquire.
        :type name: str
        :param expiration: Expiration time of the lock (in seconds)
        :type expiration: float
        :param wait: Amount of time to wait to acquire the lock (in seconds)
        :type wait: float
        """
        return cls._passthrough(method='lock',
                                name=name,
                                wait=wait,
                                expiration=expiration)

    @classmethod
    def get_configuration_path(cls, key):
        # type: (str) -> str
        """
        Retrieve the configuration path
        For arakoon: 'arakoon://cluster_id/{0}?ini=/path/to/arakoon_cacc.ini:{0}'.format(key)
        :param key: Key to retrieve the full configuration path for
        :type key: str
        :return: Configuration path
        :rtype: str
        """
        return cls._passthrough(method='get_configuration_path', key=key)

    @classmethod
    def extract_key_from_path(cls, path):
        # type: (str) -> str
        """
        Used in unittests to retrieve last key from a path
        :param path: Path to extract key from
        :type path: str
        :return: The last part of the path
        :rtype: str
        """
        return cls._passthrough(method='extract_key_from_path', path=path)

    ###################
    # Implementations #
    ###################

    @classmethod
    def get(cls, key, raw=False, **kwargs):
        # type: (str, bool, **any) -> any
        """
        Get value from the configuration store
        :param key: Key to get
        :param raw: Raw data if True else json format
        :return: Value for key
        """
        # Using this bool here, because the default value itself could be None or False-ish and we want to be able to return the default value specified
        default_specified = 'default' in kwargs
        default_value = kwargs.pop('default', None)
        try:
            key_entries = key.split('|')
            data = cls._get(key_entries[0], raw=raw, **kwargs)
            if len(key_entries) == 1:
                return data
            try:
                temp_data = data
                for entry in key_entries[1].split('.'):
                    temp_data = temp_data[entry]
                return temp_data
            except KeyError as ex:
                raise NotFoundException(ex.message)
        except NotFoundException:
            if default_specified is True:
                return default_value
            raise

    @classmethod
    def _get(cls, key, raw=False, **kwargs):
        # type: (str, bool, **any) -> Union[dict, None]
        data = cls._passthrough(method='get', key=key, **kwargs)
        if raw is True:
            return data
        return json.loads(data)

    @classmethod
    def set(cls, key, value, raw=False, transaction=None):
        # type: (str, any, bool, str) -> None
        """
        Set value in the configuration store
        :param key: Key to store
        :param value: Value to store
        :param raw: Raw data if True else apply json format
        :param transaction: Transaction to apply the delete too
        :return: None
        """
        key_entries = key.split('|')
        set_data = value
        if len(key_entries) == 1:
            cls._set(key_entries[0],
                     set_data,
                     raw=raw,
                     transaction=transaction)
            return
        try:
            data = cls._get(key_entries[0], raw=raw)
        except NotFoundException:
            data = {}
        temp_config = data
        entries = key_entries[1].split('.')
        for entry in entries[:-1]:
            if entry in temp_config:
                temp_config = temp_config[entry]
            else:
                temp_config[entry] = {}
                temp_config = temp_config[entry]
        temp_config[entries[-1]] = set_data
        cls._set(key_entries[0], data, raw=raw, transaction=transaction)

    @classmethod
    def _set(cls, key, value, raw=False, transaction=None):
        # type: (str, any, bool, str) -> None
        data = value
        if raw is False:
            data = cls._dump_data(data)
        return cls._passthrough(method='set',
                                key=key,
                                value=data,
                                transaction=transaction)

    @classmethod
    def _dump_data(cls, value):
        # type: (Union[str, Dict[Any, Any]]) -> str
        """
        Dumps data to JSON format if possible
        :param value: The value to dump
        :type value: str or dict
        :return: The converted data
        :rtype: str
        """
        try:
            data = json.loads(value)
            data = json.dumps(data, indent=4)
        except Exception:
            data = json.dumps(value, indent=4)
        return data

    @classmethod
    def delete(cls, key, remove_root=False, raw=False, transaction=None):
        # type: (str, bool, bool, str) -> None
        """
        Delete key - value from the configuration store
        :param key: Key to delete
        :param remove_root: Remove root
        :param raw: Raw data if True else apply json format
        :param transaction: Transaction to apply the delete too
        :return: None
        """
        key_entries = key.split('|')
        if len(key_entries) == 1:
            cls._delete(key_entries[0],
                        recursive=True,
                        transaction=transaction)
            return
        data = cls._get(key_entries[0], raw)
        temp_config = data
        entries = key_entries[1].split('.')
        if len(entries) > 1:
            for entry in entries[:-1]:
                if entry in temp_config:
                    temp_config = temp_config[entry]
                else:
                    temp_config[entry] = {}
                    temp_config = temp_config[entry]
            del temp_config[entries[-1]]
        if len(entries) == 1 and remove_root is True:
            del data[entries[0]]
        cls._set(key_entries[0], data, raw, transaction=transaction)

    @classmethod
    def _delete(cls, key, recursive, transaction=None):
        # type: (str, bool, str) -> None
        return cls._passthrough(method='delete',
                                key=key,
                                recursive=recursive,
                                transaction=transaction)

    @classmethod
    def rename(cls, key, new_key, max_retries=20):
        # type: (str, str, int) -> None
        """
        Rename path in the configuration store
        :param key: Key to store
        :type key: str
        :param new_key: New key to store
        :type new_key: str
        :param max_retries: Maximal number of attempts that can be made to store new path
        :type max_retries: int
        :return: None
        """
        return cls._passthrough(method='rename',
                                key=key,
                                new_key=new_key,
                                max_retries=max_retries)

    @classmethod
    def exists(cls, key, raw=False):
        # type: (str, bool) -> bool
        """
        Check if key exists in the configuration store
        :param key: Key to check
        :param raw: Process raw data
        :return: True if exists
        """
        try:
            cls.get(key, raw)
            return True
        except NotFoundException:
            return False

    @classmethod
    def dir_exists(cls, key):
        # type: (str) -> bool
        """
        Check if directory exists in the configuration store
        :param key: Directory to check
        :return: True if exists
        """
        return cls._passthrough(method='dir_exists', key=key)

    @classmethod
    def list(cls, key, recursive=False):
        # type: (str, bool) -> Iterable[str]
        """
        List all keys in tree in the configuration store
        :param key: Key to list
        :type key: str
        :param recursive: Recursively list all keys
        :type recursive: bool
        :return: Generator object
        """
        return cls._passthrough(method='list', key=key, recursive=recursive)

    @classmethod
    def begin_transaction(cls):
        # type: () -> str
        """
        Starts a new transaction. Get/set/delete calls can be chained into one
        :return: New transaction ID
        """
        return cls._passthrough(method='begin_transaction')

    @classmethod
    def apply_transaction(cls, transaction):
        # type: (str) -> None
        """
        Applies the given transaction
        :param transaction: ID of the transaction to apply
        :type transaction: str
        :return: None
        """
        return cls._passthrough(method='apply_transaction',
                                transaction=transaction)

    @classmethod
    def assert_value(cls, key, value, transaction=None, raw=False):
        # type: (str, Any, str, bool) -> None
        """
        Asserts a key-value pair
        :param key: Key to assert for
        :type key: str
        :param value: Value that the key should have
        :type value: any
        :param transaction: Transaction to apply this action too
        :type transaction: str
        :param raw: Raw data if True else apply json format
        :type raw: bool
        :return: None
        :rtype: NoneType
        """
        data = value
        # When data is None, checking for a key that does not exist. Avoids comparing None to null
        if raw is False and data is not None:
            data = cls._dump_data(data)
        return cls._passthrough(method='assert_value',
                                key=key,
                                value=data,
                                transaction=transaction)

    @classmethod
    def assert_exists(cls, key, transaction=None):
        """
        Asserts whether a given key exists
        Raises when the assertion failed
        """
        return cls._passthrough(method='assert_exists',
                                key=key,
                                transaction=transaction)

    @classmethod
    def get_client(cls):
        """
        Retrieve a configuration store client
        """
        return cls._passthrough(method='get_client')

    @classmethod
    def _passthrough(cls, method, *args, **kwargs):
        # type: (str, *any, **any) -> any
        if os.environ.get('RUNNING_UNITTESTS') == 'True':
            store = 'unittest'
        else:
            store = cls.get_store_info()
        instance = cls._clients.get(store)
        if instance is None:
            instance = cls._build_instance()
        # Map towards generic exceptions
        not_found_exception = instance.key_not_found_exception
        assertion_exception = instance.assertion_exception
        try:
            return getattr(instance, method)(*args, **kwargs)
        except not_found_exception as ex:
            # Preserve traceback
            exception_type, exception_instance, traceback = sys.exc_info()
            raise NotFoundException, NotFoundException(ex.message), traceback
        except assertion_exception as ex:
            # Preserve traceback
            exception_type, exception_instance, traceback = sys.exc_info()
            raise ConfigurationAssertionException, ConfigurationAssertionException(
                ex.message), traceback

    @classmethod
    def _build_instance(cls, cache=True):
        """
        Build an instance of the underlying Configuration to use
        :param cache: Cache the instance
        :type cache: bool
        :return: An instance of an underlying Configuration
        :rtype: ovs_extensions.generic.configuration.clients.base.ConfigurationBase
        """
        if os.environ.get('RUNNING_UNITTESTS') == 'True':
            store = 'unittest'
        else:
            store = cls.get_store_info()
        if store == 'arakoon':
            from ovs_extensions.generic.configuration.clients.arakoon import ArakoonConfiguration
            instance = ArakoonConfiguration(cacc_location=cls.CACC_LOCATION)
        elif store == 'unittest':
            from ovs_extensions.generic.configuration.clients.mock_keyvalue import ConfigurationMockKeyValue
            instance = ConfigurationMockKeyValue()
        else:
            raise NotImplementedError(
                'Store {0} is not implemented'.format(store))
        if cache is True:
            cls._clients[store] = instance
        return instance

    @classmethod
    def get_store_info(cls):
        """
        Retrieve the configuration store method. This can currently only be 'arakoon'
        :return: A tuple containing the store and params that can be passed to the configuration implementation instance
        :rtype: tuple(str, dict)
        """
        raise NotImplementedError()

    @classmethod
    def get_edition(cls):
        # type: () -> str
        """
        Retrieve the installed edition (community or enterprise)
            * Community: Free edition downloaded from apt.openvstorage.org
            * Enterprise: Paid edition which is indicated by the packages with 'ee' in their name and downloaded from apt-ee.openvstorage.com
        WARNING: This method assumes every node in the cluster has the same edition installed
        :return: The edition which has been installed
        :rtype: str
        """
        # Verify edition via configuration management
        try:
            edition = cls.get(key=cls.EDITION_KEY)
            if edition in [
                    PackageFactory.EDITION_ENTERPRISE,
                    PackageFactory.EDITION_COMMUNITY
            ]:
                return edition
        except Exception:
            pass

        # Verify edition via StorageDriver package
        try:
            return PackageFactory.EDITION_ENTERPRISE if 'ee-' in check_output(
                [PackageFactory.VERSION_CMD_SD],
                shell=True) else PackageFactory.EDITION_COMMUNITY
        except Exception:
            pass

        # Verify edition via ALBA package
        try:
            return PackageFactory.EDITION_ENTERPRISE if 'ee-' in check_output(
                [PackageFactory.VERSION_CMD_ALBA],
                shell=True) else PackageFactory.EDITION_COMMUNITY
        except Exception:
            pass

        return PackageFactory.EDITION_COMMUNITY

    @classmethod
    def safely_store(cls, callback, max_retries=20):
        # type: (List[callable], int) -> List[Tuple[str, Any]]
        """
        Safely store a key/value pair within the persistent storage
        :param callback: Callable function which returns the key to set, current value to safe and the expected value
        When the callback resolves in an iterable different from tuple, it will iterate to set all keys at once
        :type callback: callable
        :param max_retries: Number of retries to attempt
        :type max_retries: int
        :return: List of key-value pairs of the stored items
        :rtype: list(tuple(str, any))
        :raises: ConfigurationAssertionException:
        - When the save could not happen
        """
        tries = 0
        success = False
        last_exception = None
        return_value = []
        while success is False:
            transaction = cls.begin_transaction()
            return_value = []  # Reset value
            tries += 1
            if tries > max_retries:
                raise last_exception
            callback_result = callback()
            if not isinstance(callback_result, collections.Iterable):
                raise ValueError(
                    'Callback does not produce an iterable result')
            # Multiple key/values to set
            for key, value, expected_value in callback_result:
                return_value.append((key, value))
                cls.assert_value(key, expected_value, transaction=transaction)
                cls.set(key, value, transaction=transaction)
            try:
                cls.apply_transaction(transaction)
                success = True
            except ConfigurationAssertionException as ex:
                cls._logger.warning(
                    'Asserting failed. Retrying {0} more times'.format(
                        max_retries - tries))
                last_exception = ex
                time.sleep(randint(0, 25) / 100.0)
                cls._logger.info('Executing the passed function again')
        return return_value

    @classmethod
    def register_usage(cls, component_identifier, registration_key=None):
        # type: (str, str) -> List[str]
        """
        Registers that the component is using configuration management
        When sharing the same configuration management for multiple processes, these registrations can be used to determine
        if the configuration access can be wiped on the node
        :param component_identifier: Identifier of the component
        :type component_identifier: str
        :param registration_key: Key to register the component under
        :type registration_key: str
        :return: The currently registered users
        :rtype: List[str]
        """
        registration_key = registration_key or cls.get_registration_key()

        def _register_user_callback():
            registered_applications = cls.get(registration_key, default=None)
            new_registered_applications = (registered_applications
                                           or []) + [component_identifier]
            return [(registration_key, new_registered_applications,
                     registered_applications)]

        return cls.safely_store(_register_user_callback, 20)[0][1]

    @classmethod
    def get_registration_key(cls):
        # type: () -> str
        """
        Generate the key to register the component under
        :return: The registration key
        :rtype: str
        """
        return cls.generate_registration_key(System.get_my_machine_id())

    @classmethod
    def generate_registration_key(cls, identifier):
        # type: (str) -> str
        """
        Generate a registration key with a given identifier
        :param identifier: Identifier for the config key
        :type identifier: str
        :return:
        """
        return COMPONENTS_KEY.format(identifier)

    @classmethod
    def unregister_usage(cls, component_identifier):
        # type: (str) -> List[str]
        """
        Registers that the component is using configuration management
        When sharing the same configuration management for multiple processes, these registrations can be used to determine
        if the configuration access can be wiped on the node
        :param component_identifier: Identifier of the component
        :type component_identifier: str
        :return: The currently registered users
        :rtype: List[str]
        """
        registration_key = cls.get_registration_key()

        def _unregister_user_callback():
            registered_applications = cls.get(registration_key,
                                              default=None)  # type: List[str]
            if not registered_applications:
                # No more entries. Save an empty list
                new_registered_applications = []
            else:
                new_registered_applications = registered_applications[:]
                if component_identifier in registered_applications:
                    new_registered_applications.remove(component_identifier)
            return [(registration_key, new_registered_applications,
                     registered_applications)]

        return cls.safely_store(_unregister_user_callback, 20)[0][1]
Esempio n. 25
0
class SSHClient(object):
    """
    Remote/local client
    """
    IP_REGEX = re.compile(
        '^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))$'
    )
    REFERENCE_ATTR = 'ovs_ref_counter'

    _logger = Logger('extensions')
    _raise_exceptions = {}  # Used by unit tests
    client_cache = {}

    def __init__(self,
                 endpoint,
                 username='******',
                 password=None,
                 cached=True,
                 timeout=None):
        """
        Initializes an SSHClient
        Please note that the underlying (cached) Paramiko instance is not thread safe!
        When using the client in a multithreaded use-case. Use the cached=False to avoid any racing between threads
        Possible issues that can happen when you don't:
        - The underlying Paramiko session will never get activated anymore (a deactivation of another thread leads to the deadlock)
        - The underlying Paramiko connection would be closed by garbage collection (a patch has been implemented to avoid, but still worth mentioning)
        The downside to using a non-cached instance is that the connection needs to happen again: this can take between 0.1sec up to 1sec
        :param endpoint: Ip address to connect to / storagerouter
        :type endpoint: basestring | ovs.dal.hybrids.storagerouter.StorageRouter
        :param username: Name of the user to connect as
        :type username: str
        :param password: Password to authenticate the user as. Can be None when ssh keys are in place.
        :type password: str
        :param cached: Cache this SSHClient instance
        :type cached: bool
        :param timeout: An optional timeout (in seconds) for the TCP connect
        :type timeout: float
        """
        from subprocess import check_output
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError('Incorrect IP {0} specified'.format(ip))
        else:
            raise ValueError('The endpoint parameter should be an IP address')

        self.ip = ip
        self._client = None
        self.local_ips = [
            lip.strip() for lip in check_output(
                "ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1",
                shell=True).strip().splitlines()
        ]
        self.is_local = self.ip in self.local_ips
        self.password = password
        self.timeout = timeout
        self._unittest_mode = os.environ.get('RUNNING_UNITTESTS') == 'True'
        self._client_lock = RLock()

        current_user = check_output('whoami', shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = False  # If specified user differs from current executing user, we always use the paramiko SSHClient

        if self._unittest_mode is True:
            self.is_local = True
            if self.ip in self._raise_exceptions:
                raise_info = self._raise_exceptions[self.ip]
                if self.username in raise_info['users']:
                    raise raise_info['exception']

        if not self.is_local:
            logging.getLogger('paramiko').setLevel(logging.WARNING)
            key = None
            create_new = True
            if cached is True:
                key = '{0}@{1}'.format(self.ip, self.username)
                if key in SSHClient.client_cache:
                    create_new = False
                    self._client = SSHClient.client_cache[key]

            if create_new is True:
                import paramiko
                client = paramiko.SSHClient()
                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                if cached is True:
                    SSHClient.client_cache[key] = client
                self._client = client

        if self._client is not None:
            # Increment the ref counter to avoid closing the connection
            if not hasattr(self._client, self.REFERENCE_ATTR):
                setattr(self._client, self.REFERENCE_ATTR, 0)
            self._client.ovs_ref_counter += 1  # GIL will be locking this

        self._connect()

    def __del__(self):
        """
        Class destructor
        """
        try:
            if not self.is_local:
                self._disconnect()
        except Exception:
            pass  # Absorb destructor exceptions

    def is_connected(self):
        """
        Check whether the client is still connected
        :return: True when the connection is still active else False
        :rtype: bool
        """
        if self._client is None:
            return False
        try:
            transport = self._client.get_transport()
            if transport is None:
                return False
            transport.send_ignore()
            return True
        except EOFError:
            # Connection is closed
            return False

    def _connect(self):
        """
        Connects to the remote end
        :raises: TimeOutException: When the initially set timeout has been reached
        :raises: UnableToConnectException: When unable to connect because of 'No route to host' or 'Unable to connect'
        :raises: socket.error: When unable to connect but for different reasons than UnableToConnectException
        :raises: NotAuthenticatedException: When authentication has failed
        """
        if self.is_local is True:
            return

        from paramiko import AuthenticationException
        try:
            try:
                warnings.filterwarnings(
                    action='ignore',
                    message='.*CTR mode needs counter parameter.*',
                    category=FutureWarning)
                self._client.connect(self.ip,
                                     username=self.username,
                                     password=self.password,
                                     timeout=self.timeout)
            except:
                try:
                    self._client.close()
                except:
                    pass
                raise
        except socket.timeout as ex:
            message = str(ex)
            self._logger.error(message)
            raise TimeOutException(message)
        except socket.error as ex:
            message = str(ex)
            self._logger.error(message)
            if 'No route to host' in message or 'Unable to connect' in message:
                raise UnableToConnectException(message)
            raise
        except AuthenticationException:
            raise NotAuthenticatedException('Authentication failed')

    def _disconnect(self):
        """
        Disconnects from the remote end
        :return: None
        :rtype: NoneType
        """
        if self.is_local is True:
            return
        with self._client_lock:
            # Check if it is safe to disconnect
            self._client.ovs_ref_counter -= 1
            if self._client.ovs_ref_counter == 0:  # When this is not 0 that means that other SSHClients are using this reference
                self._client.close()

    @classmethod
    def _clean(cls):
        """
        Clean everything up related to the unittests
        """
        cls._raise_exceptions = {}

    @staticmethod
    def shell_safe(argument):
        """
        Makes sure that the given path/string is escaped and safe for shell
        :param argument: Argument to make safe for shell
        """
        return "'{0}'".format(argument.replace(r"'", r"'\''"))

    @staticmethod
    def _clean_text(text):
        if type(text) is list:
            text = '\n'.join(line.rstrip() for line in text)
        try:
            # This strip is absolutely necessary. Without it, channel.communicate() is never executed (odd but true)
            cleaned = text.strip()
            # I ? unicode
            if not isinstance(text, unicode):
                cleaned = unicode(cleaned.decode('utf-8', 'replace'))
            for old, new in {
                    u'\u2018': "'",
                    u'\u2019': "'",
                    u'\u201a': "'",
                    u'\u201e': '"',
                    u'\u201c': '"',
                    u'\u25cf': '*'
            }.iteritems():
                cleaned = cleaned.replace(old, new)
            cleaned = unicodedata.normalize('NFKD', cleaned)
            cleaned = cleaned.encode('ascii', 'ignore')
            return cleaned
        except UnicodeDecodeError:
            SSHClient._logger.error(
                'UnicodeDecodeError with output: {0}'.format(text))
            raise

    @connected()
    @mocked(MockedSSHClient.run)
    def run(self,
            command,
            debug=False,
            suppress_logging=False,
            allow_nonzero=False,
            allow_insecure=False,
            return_stderr=False,
            return_exit_code=False,
            timeout=None):
        """
        Executes a shell command
        :param suppress_logging: Do not log anything
        :type suppress_logging: bool
        :param command: Command to execute
        :type command: list or str
        :param debug: Extended logging
        :type debug: bool
        :param allow_nonzero: Allow non-zero exit code
        :type allow_nonzero: bool
        :param allow_insecure: Allow string commands (which might be improperly escaped)
        :type allow_insecure: bool
        :param return_stderr: Return stderr
        :type return_stderr: bool
        :param return_exit_code: Return exit code of the command
        :type return_exit_code: bool
        :param timeout: Timeout after which the command should be aborted (in seconds)
        :type timeout: int
        :return: The command's stdout or tuple for stdout and stderr
        :rtype: str or tuple(str, str)
        """
        if not isinstance(command, list) and not allow_insecure:
            raise RuntimeError(
                'The given command must be a list, or the allow_insecure flag must be set'
            )
        if isinstance(command, list):
            command = ' '.join(
                [self.shell_safe(str(entry)) for entry in command])
        original_command = command
        if self.is_local is True:
            stderr = None
            try:
                try:
                    if not hasattr(select, 'poll'):
                        import subprocess
                        subprocess._has_poll = False  # Damn 'monkey patching'
                    if timeout is not None:
                        command = "'timeout' '{0}' {1}".format(
                            timeout, command)
                    channel = Popen(command,
                                    stdout=PIPE,
                                    stderr=PIPE,
                                    shell=True)
                except OSError as ose:
                    raise CalledProcessError(1, original_command, str(ose))
                stdout, stderr = channel.communicate()
                stdout = self._clean_text(stdout)
                stderr = self._clean_text(stderr)
                exit_code = channel.returncode
                if exit_code == 124:
                    raise CalledProcessTimeout(exit_code, original_command,
                                               'Timeout during command')
                if exit_code != 0 and allow_nonzero is False:  # Raise same error as check_output
                    raise CalledProcessError(exit_code, original_command,
                                             stdout)
                if debug is True:
                    self._logger.debug('stdout: {0}'.format(stdout))
                    self._logger.debug('stderr: {0}'.format(stderr))
                return_value = [stdout]
                # Order matters for backwards compatibility
                if return_stderr is True:
                    return_value.append(stderr)
                if return_exit_code is True:
                    return_value.append(exit_code)
                # Backwards compatibility
                if len(return_value) == 1:
                    return return_value[0]
                return tuple(return_value)
            except CalledProcessError as cpe:
                if suppress_logging is False:
                    self._logger.error(
                        'Command "{0}" failed with output "{1}"{2}'.format(
                            original_command, cpe.output, '' if stderr is None
                            else ' and error "{0}"'.format(stderr)))
                raise
        else:
            _, stdout, stderr = self._client.exec_command(
                command, timeout=timeout)  # stdin, stdout, stderr
            try:
                output = self._clean_text(stdout.readlines())
                error = self._clean_text(stderr.readlines())
                exit_code = stdout.channel.recv_exit_status()
            except socket.timeout:
                raise CalledProcessTimeout(124, original_command,
                                           'Timeout during command')
            if exit_code != 0 and allow_nonzero is False:  # Raise same error as check_output
                if suppress_logging is False:
                    self._logger.error(
                        'Command "{0}" failed with output "{1}" and error "{2}"'
                        .format(command, output, error))
                raise CalledProcessError(exit_code, command, output)
            return_value = [output]
            # Order matters for backwards compatibility
            if return_stderr is True:
                return_value.append(error)
            if return_exit_code is True:
                return_value.append(exit_code)
            # Backwards compatibility
            if len(return_value) == 1:
                return return_value[0]
            return tuple(return_value)

    @mocked(MockedSSHClient.dir_create)
    def dir_create(self, directories):
        """
        Ensures a directory exists on the remote end
        :param directories: Directories to create
        """
        if isinstance(directories, basestring):
            directories = [directories]
        for directory in directories:
            if self.is_local is True:
                if not os.path.exists(directory):
                    os.makedirs(directory)
            else:
                self.run(['mkdir', '-p', directory])

    @mocked(MockedSSHClient.dir_delete)
    def dir_delete(self, directories, follow_symlinks=False):
        """
        Remove a directory (or multiple directories) from the remote filesystem recursively
        :param directories: Single directory or list of directories to delete
        :param follow_symlinks: Boolean to indicate if symlinks should be followed and thus be deleted too
        """
        if isinstance(directories, basestring):
            directories = [directories]
        for directory in directories:
            real_path = self.file_read_link(directory)
            if real_path and follow_symlinks is True:
                self.file_unlink(directory.rstrip('/'))
                self.dir_delete(real_path)
            else:
                if self.is_local is True:
                    if os.path.exists(directory):
                        for dirpath, dirnames, filenames in os.walk(
                                directory,
                                topdown=False,
                                followlinks=follow_symlinks):
                            for filename in filenames:
                                os.remove('/'.join([dirpath, filename]))
                            for sub_directory in dirnames:
                                os.rmdir('/'.join([dirpath, sub_directory]))
                        os.rmdir(directory)
                else:
                    if self.dir_exists(directory):
                        self.run(['rm', '-rf', directory])

    @mocked(MockedSSHClient.dir_exists)
    def dir_exists(self, directory):
        """
        Checks if a directory exists on a remote host
        :param directory: Directory to check for existence
        """
        if self.is_local is True:
            return os.path.isdir(directory)
        else:
            command = """import os, json
print json.dumps(os.path.isdir('{0}'))""".format(directory)
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))

    @mocked(MockedSSHClient.dir_chmod)
    def dir_chmod(self, directories, mode, recursive=False):
        """
        Chmod a or multiple directories
        :param directories: Directories to chmod
        :param mode: Mode to chmod
        :param recursive: Chmod the directories recursively or not
        :return: None
        """
        if not isinstance(mode, int):
            raise ValueError('Mode should be an integer')

        if isinstance(directories, basestring):
            directories = [directories]
        for directory in directories:
            if self.is_local is True:
                os.chmod(directory, mode)
                if recursive is True:
                    for root, dirs, _ in os.walk(directory):
                        for sub_dir in dirs:
                            os.chmod('/'.join([root, sub_dir]), mode)
            else:
                command = ['chmod', oct(mode), directory]
                if recursive is True:
                    command.insert(1, '-R')
                self.run(command)

    @mocked(MockedSSHClient.dir_chown)
    def dir_chown(self, directories, user, group, recursive=False):
        """
        Chown a or multiple directories
        :param directories: Directories to chown
        :param user: User to assign to directories
        :param group: Group to assign to directories
        :param recursive: Chown the directories recursively or not
        :return: None
        """
        if self._unittest_mode is True:
            return

        all_users = [user_info[0] for user_info in pwd.getpwall()]
        all_groups = [group_info[0] for group_info in grp.getgrall()]

        if user not in all_users:
            raise ValueError(
                'User "{0}" is unknown on the system'.format(user))
        if group not in all_groups:
            raise ValueError(
                'Group "{0}" is unknown on the system'.format(group))

        uid = pwd.getpwnam(user)[2]
        gid = grp.getgrnam(group)[2]
        if isinstance(directories, basestring):
            directories = [directories]
        for directory in directories:
            if self.is_local is True:
                os.chown(directory, uid, gid)
                if recursive is True:
                    for root, dirs, _ in os.walk(directory):
                        for sub_dir in dirs:
                            os.chown('/'.join([root, sub_dir]), uid, gid)
            else:
                command = ['chown', '{0}:{1}'.format(user, group), directory]
                if recursive is True:
                    command.insert(1, '-R')
                self.run(command)

    @mocked(MockedSSHClient.dir_list)
    def dir_list(self, directory):
        """
        List contents of a directory on a remote host
        :param directory: Directory to list
        """
        if self.is_local is True:
            return os.listdir(directory)
        else:
            command = """import os, json
print json.dumps(os.listdir('{0}'))""".format(directory)
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))

    @mocked(MockedSSHClient.symlink)
    def symlink(self, links):
        """
        Create symlink
        :param links: Dictionary containing the absolute path of the files and their link which needs to be created
        :return: None
        """
        if self.is_local is True:
            for link_name, source in links.iteritems():
                os.symlink(source, link_name)
        else:
            for link_name, source in links.iteritems():
                self.run(['ln', '-s', source, link_name])

    @mocked(MockedSSHClient.file_create)
    def file_create(self, filenames):
        """
        Create a or multiple files
        :param filenames: Files to create
        :return: None
        """
        if isinstance(filenames, basestring):
            filenames = [filenames]
        for filename in filenames:
            if not filename.startswith('/'):
                raise ValueError(
                    'Absolute path required for filename {0}'.format(filename))

            if self.is_local is True:
                if not self.dir_exists(directory=os.path.dirname(filename)):
                    self.dir_create(os.path.dirname(filename))
                if not os.path.exists(filename):
                    open(filename, 'a').close()
            else:
                directory = os.path.dirname(filename)
                self.dir_create(directory)
                self.run(['touch', filename])

    @mocked(MockedSSHClient.file_delete)
    def file_delete(self, filenames):
        """
        Remove a file (or multiple files) from the remote filesystem
        :param filenames: File names to delete
        """
        if isinstance(filenames, basestring):
            filenames = [filenames]
        for filename in filenames:
            if self.is_local is True:
                if '*' in filename:
                    for fn in glob.glob(filename):
                        os.remove(fn)
                else:
                    if os.path.isfile(filename):
                        os.remove(filename)
            else:
                if '*' in filename:
                    command = """import glob, json
print json.dumps(glob.glob('{0}'))""".format(filename)
                    for fn in json.loads(
                            self.run(
                                ['python', '-c', """{0}""".format(command)])):
                        self.run(['rm', '-f', fn])
                else:
                    if self.file_exists(filename):
                        self.run(['rm', '-f', filename])

    @mocked(MockedSSHClient.file_unlink)
    def file_unlink(self, path):
        """
        Unlink a file
        :param path: Path of the file to unlink
        :return: None
        """
        if self.is_local is True:
            if os.path.islink(path):
                os.unlink(path)
        else:
            self.run(['unlink', path])

    @mocked(MockedSSHClient.file_read_link)
    def file_read_link(self, path):
        """
        Read the symlink of the specified path
        :param path: Path of the symlink
        :return: None
        """
        path = path.rstrip('/')
        if self.is_local is True:
            if os.path.islink(path):
                return os.path.realpath(path)
        else:
            command = """import os, json
if os.path.islink('{0}'):
    print json.dumps(os.path.realpath('{0}'))""".format(path)
            try:
                return json.loads(
                    self.run(['python', '-c', """{0}""".format(command)]))
            except ValueError:
                pass

    @mocked(MockedSSHClient.file_read)
    def file_read(self, filename):
        """
        Load a file from the remote end
        :param filename: File to read
        """
        if self.is_local is True:
            with open(filename, 'r') as the_file:
                return the_file.read()
        else:
            return self.run(['cat', filename])

    @connected()
    @mocked(MockedSSHClient.file_write)
    def file_write(self, filename, contents):
        """
        Writes into a file to the remote end
        :param filename: File to write
        :param contents: Contents to write to the file
        """
        temp_filename = '{0}~'.format(filename)
        if self.is_local is True:
            if os.path.isfile(filename):
                # Use .run([cp -pf ...]) here, to make sure owner and other rights are preserved
                self.run(['cp', '-pf', filename, temp_filename])
            with open(temp_filename, 'w') as the_file:
                the_file.write(contents)
                the_file.flush()
                os.fsync(the_file)
            os.rename(temp_filename, filename)
        else:
            handle, local_temp_filename = tempfile.mkstemp()
            with open(local_temp_filename, 'w') as the_file:
                the_file.write(contents)
                the_file.flush()
                os.fsync(the_file)
            os.close(handle)
            try:
                if self.file_exists(filename):
                    self.run(['cp', '-pf', filename, temp_filename])
                sftp = self._client.open_sftp()
                sftp.put(local_temp_filename, temp_filename)
                sftp.close()
                self.run(['mv', '-f', temp_filename, filename])
            finally:
                os.remove(local_temp_filename)

    @connected()
    @mocked(MockedSSHClient.file_upload)
    def file_upload(self, remote_filename, local_filename):
        """
        Uploads a file to a remote end
        :param remote_filename: Name of the file on the remote location
        :param local_filename: Name of the file locally
        """
        temp_remote_filename = '{0}~'.format(remote_filename)
        if self.is_local is True:
            self.run(['cp', '-f', local_filename, temp_remote_filename])
            self.run(['mv', '-f', temp_remote_filename, remote_filename])
        else:
            sftp = self._client.open_sftp()
            sftp.put(local_filename, temp_remote_filename)
            sftp.close()
            self.run(['mv', '-f', temp_remote_filename, remote_filename])

    @mocked(MockedSSHClient.file_exists)
    def file_exists(self, filename):
        """
        Checks if a file exists on a remote host
        :param filename: File to check for existence
        """
        if self.is_local is True:
            return os.path.isfile(filename)
        else:
            command = """import os, json
print json.dumps(os.path.isfile('{0}'))""".format(filename)
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))

    @mocked(MockedSSHClient.file_chmod)
    def file_chmod(self, filename, mode):
        """
        Sets the mode of a remote file
        :param filename: File to chmod
        :param mode: Mode to give to file, eg: 0744
        """
        self.run(['chmod', oct(mode), filename])

    @mocked(MockedSSHClient.file_chown)
    def file_chown(self, filenames, user, group):
        """
        Sets the ownership of a remote file
        :param filenames: Files to chown
        :param user: User to set
        :param group: Group to set
        :return: None
        """
        if self._unittest_mode is True:
            return

        all_users = [user_info[0] for user_info in pwd.getpwall()]
        all_groups = [group_info[0] for group_info in grp.getgrall()]

        if user not in all_users:
            raise ValueError(
                'User "{0}" is unknown on the system'.format(user))
        if group not in all_groups:
            raise ValueError(
                'Group "{0}" is unknown on the system'.format(group))

        uid = pwd.getpwnam(user)[2]
        gid = grp.getgrnam(group)[2]
        if isinstance(filenames, basestring):
            filenames = [filenames]
        for filename in filenames:
            if self.file_exists(filename=filename) is False:
                continue
            if self.is_local is True:
                os.chown(filename, uid, gid)
            else:
                self.run(['chown', '{0}:{1}'.format(user, group), filename])

    @mocked(MockedSSHClient.file_list)
    def file_list(self, directory, abs_path=False, recursive=False):
        """
        List all files in directory
        WARNING: If executed recursively while not locally, this can take quite some time

        :param directory: Directory to list the files in
        :param abs_path: Return the absolute path of the files or only the file names
        :param recursive: Loop through the directories recursively
        :return: List of files in directory
        """
        all_files = []
        if self.is_local is True:
            for root, dirs, files in os.walk(directory):
                for file_name in files:
                    if abs_path is True:
                        all_files.append('/'.join([root, file_name]))
                    else:
                        all_files.append(file_name)
                if recursive is False:
                    break
        else:
            with remote(self.ip, [os], 'root') as rem:
                for root, dirs, files in rem.os.walk(directory):
                    for file_name in files:
                        if abs_path is True:
                            all_files.append('/'.join([root, file_name]))
                        else:
                            all_files.append(file_name)
                    if recursive is False:
                        break
        return all_files

    @mocked(MockedSSHClient.file_move)
    def file_move(self, source_file_name, destination_file_name):
        """
        Move a file
        :param source_file_name: Absolute path of the file to move
        :type source_file_name: str
        :param destination_file_name: Location to move to (Can be (new) filename or directory)
        :type destination_file_name: str
        :raises: ValueError - When source file does not exists
        :return: None
        :rtype: NoneType
        """
        if not source_file_name.startswith('/'):
            raise ValueError('Source should start with a "/"')
        if not destination_file_name.startswith('/'):
            raise ValueError('Destination should start with a "/"')
        if not self.file_exists(filename=source_file_name):
            raise ValueError(
                'Source file {0} does not exist'.format(source_file_name))

        while '//' in source_file_name:
            source_file_name.replace('//', '/')
        while '//' in destination_file_name:
            destination_file_name.replace('//', '/')

        if self.dir_exists(directory=destination_file_name):
            target_dir = destination_file_name
            # If destination is a directory, we use file name of source
            destination_file_name = os.path.join(
                destination_file_name, os.path.basename(source_file_name))
        else:
            target_dir = os.path.dirname(destination_file_name)

        if not self.dir_exists(directory=target_dir):
            self.dir_create(directories=target_dir)

        if self.is_local is True:
            return os.rename(source_file_name, destination_file_name)
        else:
            command = """import os, json
print json.dumps(os.rename('{0}', '{1}'))""".format(source_file_name,
                                                    destination_file_name)
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))

    @connected()
    @mocked(MockedSSHClient.path_exists)
    def path_exists(self, file_path):
        """
        Checks if a file exists on a remote host
        :param file_path: File path to check for existence
        :type file_path: str
        """
        if self.is_local is True:
            return os.path.exists(file_path)
        else:
            command = """import os, json
print json.dumps(os.path.exists('{0}'))""".format(file_path)
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))

    def is_mounted(self, path):
        """
        Verify whether a mount point is mounted
        :param path: Path to check
        :type path: str
        :return: True if mount point is mounted
        :rtype: bool
        """
        path = path.rstrip('/')
        if self.is_local is True:
            return os.path.ismount(path)

        command = """import os, json
print json.dumps(os.path.ismount('{0}'))""".format(path)
        try:
            return json.loads(
                self.run(['python', '-c', """{0}""".format(command)]))
        except ValueError:
            return False

    def get_hostname(self):
        """
        Gets the simple and fq domain name
        """
        short = self.run(['hostname', '-s'])
        try:
            fqdn = self.run(['hostname', '-f'])
        except:
            fqdn = short
        return short, fqdn