def __init__(self, config_type, vpool_guid, storagedriver_id): """ Initializes the class """ def make_configure(sct): """ section closure :param sct: Section to create configure function for """ return lambda **kwargs: self._add(sct, **kwargs) if config_type != 'storagedriver': raise RuntimeError('Invalid configuration type. Allowed: storagedriver') storagerouterclient.Logger.setupLogging(LogHandler.load_path('storagerouterclient')) # noinspection PyArgumentList storagerouterclient.Logger.enableLogging() self._logger = LogHandler.get('extensions', name='storagedriver') self.config_type = config_type self.configuration = {} self.path = '/ovs/vpools/{0}/hosts/{1}/config/{{0}}'.format(vpool_guid, storagedriver_id) self.remote_path = 'etcd://127.0.0.1:2379{0}'.format(self.path.format('')).strip('/') self.is_new = True self.dirty_entries = [] self.params = copy.deepcopy(StorageDriverConfiguration.parameters) # Never use parameters directly # Fix some manual "I know what I'm doing" overrides backend_connection_manager = 'backend_connection_manager' self.params[self.config_type][backend_connection_manager]['optional'].append('s3_connection_strict_consistency') # Generate configure_* methods for section in self.params[self.config_type]: setattr(self, 'configure_{0}'.format(section), make_configure(section))
def new_function(*args, **kwargs): """ Wrapped function """ request = _find_request(args) method_args = list(args)[:] method_args = method_args[method_args.index(request) + 1:] # Log the call metadata = {'meta': dict((str(key), str(value)) for key, value in request.META.iteritems()), 'request': dict((str(key), str(value)) for key, value in request.REQUEST.iteritems()), 'cookies': dict((str(key), str(value)) for key, value in request.COOKIES.iteritems())} _logger = LogHandler.get('log', name='api') _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format( f.__module__, f.__name__, getattr(request, 'client').user_guid if hasattr(request, 'client') else None, json.dumps(method_args), json.dumps(kwargs), json.dumps(metadata) )) # Call the function start = time.time() return_value = f(*args, **kwargs) duration = time.time() - start if duration > 5 and log_slow is True: logger.warning('API call {0}.{1} took {2}s'.format(f.__module__, f.__name__, round(duration, 2))) return return_value
def process_exception(self, request, exception): """ Logs information about the given error """ _ = self, request logger = LogHandler.get('api', 'middleware') logger.exception('An unhandled exception occurred: {0}'.format(exception))
def pulse(): """ Update the heartbeats for all Storage Routers :return: None """ logger = LogHandler.get('extensions', name='heartbeat') current_time = int(time.time()) machine_id = System.get_my_machine_id() amqp = '{0}://{1}:{2}@{3}//'.format(EtcdConfiguration.get('/ovs/framework/messagequeue|protocol'), EtcdConfiguration.get('/ovs/framework/messagequeue|user'), EtcdConfiguration.get('/ovs/framework/messagequeue|password'), EtcdConfiguration.get('/ovs/framework/hosts/{0}/ip'.format(machine_id))) celery_path = OSManager.get_path('celery') worker_states = check_output("{0} inspect ping -b {1} --timeout=5 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true".format(celery_path, amqp), shell=True) routers = StorageRouterList.get_storagerouters() for node in routers: if node.heartbeats is None: node.heartbeats = {} if 'celery@{0}: OK'.format(node.name) in worker_states: node.heartbeats['celery'] = current_time if node.machine_id == machine_id: node.heartbeats['process'] = current_time else: try: # check timeout of other nodes and clear arp cache if node.heartbeats and 'process' in node.heartbeats: if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT: check_output("/usr/sbin/arp -d {0}".format(node.name), shell=True) except CalledProcessError: logger.exception('Error clearing ARP cache') node.save()
def log_slow_calls(f): """ Wrapper to print duration when call takes > 1s :param f: Function to wrap :return: Wrapped function """ logger = LogHandler.get('extensions', name='etcdconfiguration') def new_function(*args, **kwargs): """ Execute function :return: Function output """ start = time.time() try: return f(*args, **kwargs) finally: key_info = '' if 'key' in kwargs: key_info = ' (key: {0})'.format(kwargs['key']) elif len(args) > 0: key_info = ' (key: {0})'.format(args[0]) duration = time.time() - start if duration > 1: logger.warning('Call to {0}{1} took {2}s'.format(f.__name__, key_info, duration)) new_function.__name__ = f.__name__ new_function.__module__ = f.__module__ return new_function
def __init__(self, ip, username, password): """ Initializes the object with credentials and connection information Requires novaclient library to be installed on the node this code is executed Uses v2 api in Kilo/Liberty (v1 is deprecated in Kilo/Liberty) Uses v1_1 api in Juno """ try: from novaclient.v2 import client as nova_client except ImportError: from novaclient.v1_1 import client as nova_client try: from cinderclient.v2 import client as cinder_client except ImportError: from cinderclient.v1 import client as cinder_client from novaclient import exceptions self._logger = LogHandler.get('extensions', name='openstack_mgmt') self._novaclientexceptions = exceptions self.nova_client = nova_client.Client(username = username, api_key = password, project_id = 'admin', auth_url = 'http://{0}:35357/v2.0'.format(ip), service_type="compute") self.cinder_client = cinder_client.Client(username = username, api_key = password, project_id = 'admin', auth_url = 'http://{0}:35357/v2.0'.format(ip), service_type="volumev2") self.management = OpenStackManagement(cinder_client = self.cinder_client) self.STATE_MAPPING = {'up': 'RUNNING'} self._logger.debug('Init complete')
def manage_running_tasks(tasklist, timesleep=10): """ Manage a list of running celery task - discard PENDING tasks after a certain timeout - validate RUNNING tasks are actually running :param tasklist: Dictionary of tasks to wait {IP address: AsyncResult} :type tasklist: dict :param timesleep: leep between checks - -for long running tasks it's better to sleep for a longer period of time to reduce number of ssh calls :type timesleep: int :return: results :rtype: dict """ logger = LogHandler.get('lib', name='celery toolbox') ssh_clients = {} tasks_pending = {} tasks_pending_timeout = 1800 # 30 minutes results = {} failed_nodes = [] while len(tasklist.keys()) > 0: for ip, task in tasklist.items(): if task.state in ('SUCCESS', 'FAILURE'): logger.info('Task {0} finished: {1}'.format(task.id, task.state)) results[ip] = task.get(propagate=False) del tasklist[ip] elif task.state == 'PENDING': if task.id not in tasks_pending: tasks_pending[task.id] = time.time() else: task_pending_since = tasks_pending[task.id] if time.time() - task_pending_since > tasks_pending_timeout: logger.warning('Task {0} is pending since {1} on node {2}. Task will be revoked'.format(task.id, datetime.datetime.fromtimestamp(task_pending_since), ip)) revoke(task.id) del tasklist[ip] del tasks_pending[task.id] failed_nodes.append(ip) elif task.state == 'STARTED': if ip not in ssh_clients: ssh_clients[ip] = SSHClient(ip, username='******') client = ssh_clients[ip] if ServiceManager.get_service_status('workers', client) is False: logger.error('Service ovs-workers on node {0} appears halted while there is a task PENDING for it {1}. Task will be revoked.'.format(ip, task.id)) revoke(task.id) del tasklist[ip] failed_nodes.append(ip) else: ping_result = task.app.control.inspect().ping() storage_router = StorageRouterList.get_by_ip(ip) if "celery@{0}".format(storage_router.name) not in ping_result: logger.error('Service ovs-workers on node {0} is not reachable via rabbitmq while there is a task STARTED for it {1}. Task will be revoked.'.format(ip, task.id)) revoke(task.id) del tasklist[ip] failed_nodes.append(ip) if len(tasklist.keys()) > 0: time.sleep(timesleep) return results, failed_nodes
def task_postrun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, **kwds): """ Hook for celery postrun event """ _ = sender, task, args, kwargs, kwds try: MessageController.fire(MessageController.Type.TASK_COMPLETE, task_id) except Exception as ex: loghandler = LogHandler.get('celery', name='celery') loghandler.error('Caught error during postrun handler: {0}'.format(ex))
def __init__(self, name, wait=None): """ Creates a volatile mutex object """ self._logger = LogHandler.get('extensions', 'volatile mutex') self._volatile = VolatileFactory.get_client() self.name = name self._has_lock = False self._start = 0 self._wait = wait
def __init__(self, *args, **kwargs): """ Initializes the distributed scheduler """ self._logger = LogHandler.get('celery', name='celery beat') self._persistent = PersistentFactory.get_client() self._namespace = 'ovs_celery_beat' self._mutex = volatile_mutex('celery_beat', 10) self._has_lock = False super(DistributedScheduler, self).__init__(*args, **kwargs) self._logger.debug('DS init')
def _log(task, kwargs, storagedriver_id): """ Log an event """ metadata = {'storagedriver': StorageDriverList.get_by_storagedriver_id(storagedriver_id).guid} _logger = LogHandler.get('log', name='volumedriver_event') _logger.info('[{0}.{1}] - {2} - {3}'.format( task.__class__.__module__, task.__class__.__name__, json.dumps(kwargs), json.dumps(metadata) ))
def limit(amount, per, timeout): """ Rate-limits the decorated call """ logger = LogHandler.get('api', 'oauth2') def wrap(f): """ Wrapper function """ def new_function(self, request, *args, **kwargs): """ Wrapped function """ now = time.time() key = 'ovs_api_limit_{0}.{1}_{2}'.format( f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'] ) client = VolatileFactory.get_client() mutex = volatile_mutex(key) try: mutex.acquire() rate_info = client.get(key, {'calls': [], 'timeout': None}) active_timeout = rate_info['timeout'] if active_timeout is not None: if active_timeout > now: logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now)) return HttpResponse, {'error_code': 'rate_limit_timeout', 'error': 'Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2))}, 429 else: rate_info['timeout'] = None rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now] calls = len(rate_info['calls']) if calls > amount: rate_info['timeout'] = now + timeout client.set(key, rate_info) logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout)) return HttpResponse, {'error_code': 'rate_limit_reached', 'error': 'Rate limit reached ({0} in last {1}s)'.format(calls, per)}, 429 client.set(key, rate_info) finally: mutex.release() return f(self, request, *args, **kwargs) new_function.__name__ = f.__name__ new_function.__module__ = f.__module__ return new_function return wrap
def limit(amount, per, timeout): """ Rate-limits the decorated call """ logger = LogHandler.get('api') def wrap(f): """ Wrapper function """ def new_function(*args, **kwargs): """ Wrapped function """ request = _find_request(args) now = time.time() key = 'ovs_api_limit_{0}.{1}_{2}'.format( f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'] ) client = VolatileFactory.get_client() with volatile_mutex(key): rate_info = client.get(key, {'calls': [], 'timeout': None}) active_timeout = rate_info['timeout'] if active_timeout is not None: if active_timeout > now: logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now)) raise Throttled(wait=active_timeout - now) else: rate_info['timeout'] = None rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now] calls = len(rate_info['calls']) if calls > amount: rate_info['timeout'] = now + timeout client.set(key, rate_info) logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout)) raise Throttled(wait=timeout) client.set(key, rate_info) return f(*args, **kwargs) new_function.__name__ = f.__name__ new_function.__module__ = f.__module__ return new_function return wrap
def __init__(self, host='127.0.0.1', login='******'): self._logger = LogHandler.get('extensions', name='kvm sdk') self._logger.debug('Init libvirt') self.states = {libvirt.VIR_DOMAIN_NOSTATE: 'NO STATE', libvirt.VIR_DOMAIN_RUNNING: 'RUNNING', libvirt.VIR_DOMAIN_BLOCKED: 'BLOCKED', libvirt.VIR_DOMAIN_PAUSED: 'PAUSED', libvirt.VIR_DOMAIN_SHUTDOWN: 'SHUTDOWN', libvirt.VIR_DOMAIN_SHUTOFF: 'TURNEDOFF', libvirt.VIR_DOMAIN_CRASHED: 'CRASHED'} self.libvirt = libvirt self.host = host self.login = login self._conn = None self.ssh_client = SSHClient(self.host, username='******') self._logger.debug('Init complete')
def __init__(self, name, wait=None): """ Creates a file mutex object """ self._logger = LogHandler.get('extensions', 'file mutex') self.name = name self._has_lock = False self._start = 0 self._handle = open(self.key(), 'w') self._wait = wait try: os.chmod( self.key(), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH ) except OSError: pass
def new_function(self, request, *args, **kwargs): """ Wrapped function """ # Log the call metadata = {'meta': dict((str(key), str(value)) for key, value in request.META.iteritems()), 'request': dict((str(key), str(value)) for key, value in request.REQUEST.iteritems()), 'cookies': dict((str(key), str(value)) for key, value in request.COOKIES.iteritems())} _logger = LogHandler.get('log', name='api') _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format( f.__module__, f.__name__, getattr(request, 'client').user_guid if hasattr(request, 'client') else None, json.dumps(list(args)), json.dumps(kwargs), json.dumps(metadata) )) # Call the function return f(self, request, *args, **kwargs)
def new_function(*args, **kwargs): """ Wrapped function """ # Log the call if event_type == 'VOLUMEDRIVER_TASK': metadata = {'storagedriver': StorageDriverList.get_by_storagedriver_id(kwargs['storagedriver_id']).guid} else: metadata = {} _logger = LogHandler.get('log', name=event_type.lower()) _logger.info('[{0}.{1}] - {2} - {3} - {4}'.format( f.__module__, f.__name__, json.dumps(list(args)), json.dumps(kwargs), json.dumps(metadata) )) # Call the function return f(*args, **kwargs)
def __init__(self, cinder_client): self._logger = LogHandler.get('extensions', name='openstack_mgmt') self.client = SSHClient('127.0.0.1', username='******') self.cinder_client = cinder_client self._NOVA_CONF = '/etc/nova/nova.conf' self._CINDER_CONF = '/etc/cinder/cinder.conf' self._is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), self.client) self._nova_installed = self.client.file_exists(self._NOVA_CONF) self._cinder_installed = self.client.file_exists(self._CINDER_CONF) self._driver_location = OSManager.get_openstack_package_base_path() self._openstack_users = OSManager.get_openstack_users() self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py' try: self._is_devstack = 'stack' in str(self.client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true')) except SystemExit: # ssh client raises system exit 1 self._is_devstack = False except Exception: self._is_devstack = False try: from cinder import version version_string = version.version_string() if version_string.startswith('9.0'): self._stack_version = 'newton' elif version_string.startswith('8.0'): self._stack_version = 'mitaka' elif version_string.startswith('2015.2') or version_string.startswith('7.0'): self._stack_version = 'liberty' elif version_string.startswith('2015.1'): self._stack_version = 'kilo' elif version_string.startswith('2014.2'): self._stack_version = 'juno' else: raise ValueError('Unsupported cinder version: {0}'.format(version_string)) except Exception as ex: raise ValueError('Cannot determine cinder version: {0}'.format(ex))
def auto_response(): """ Json response wrapper """ logger = LogHandler.get('api', 'oauth2') def wrap(f): """ Wrapper function """ def new_function(*args, **kw): """ Wrapped function """ results = f(*args, **kw) if isinstance(results, tuple) or isinstance(results, list): return_type, data = results[0], results[1] if len(results) == 2: if isinstance(data, dict): return return_type(json.dumps(data), content_type='application/json') return return_type(data) else: status_code = results[2] if isinstance(data, dict): return return_type(json.dumps(data), content_type='application/json', status=status_code) return return_type(data, status=status_code) elif isinstance(results, HttpResponse): return results else: logger.error('Got invalid function return data in auto_reponse') return HttpResponseServerError() new_function.__name__ = f.__name__ new_function.__module__ = f.__module__ return new_function return wrap
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Disk module """ import re from subprocess import check_output, CalledProcessError from ovs.extensions.os.os import OSManager from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='disktools') class DiskTools(object): """ This class contains various helper methods wrt Disk maintenance """ @staticmethod def create_partition(disk_path, disk_size, partition_start, partition_size): """ Creates a partition :param disk_path: Path of disk device :type disk_path: str :param disk_size: Total size of disk
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Memcache store module """ import memcache import re from threading import Lock from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', 'memcache store') def locked(): """ Locking decorator. """ def wrap(f): """ Returns a wrapped function """ def new_function(self, *args, **kwargs): """ Executes the decorated function in a locked context """ lock = kwargs.get('lock', True)
import json import inspect from ovs.dal.exceptions import (ObjectNotFoundException, ConcurrencyException, LinkedObjectException, MissingMandatoryFieldsException, SaveRaceConditionException, InvalidRelationException, VolatileObjectException) from ovs.dal.helpers import Descriptor, Toolbox, HybridRunner from ovs.dal.relations import RelationMapper from ovs.dal.dataobjectlist import DataObjectList from ovs.dal.datalist import DataList from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.extensions.storage.exceptions import KeyNotFoundException from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.log.logHandler import LogHandler logger = LogHandler.get('dal', name='dataobject') class MetaClass(type): """ This metaclass provides dynamic __doc__ generation feeding doc generators """ def __new__(mcs, name, bases, dct): """ Overrides instance creation of all DataObject instances """ if name != 'DataObject': for internal in ['_properties', '_relations', '_dynamics']: data = set() for base in bases:
import os import time import cPickle import inspect import imp from celery.beat import Scheduler from celery import current_app from celery.schedules import crontab from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.extensions.storage.exceptions import KeyNotFoundException from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.extensions.generic.system import System from ovs.log.logHandler import LogHandler logger = LogHandler.get('celery', name='celery beat') class DistributedScheduler(Scheduler): """ Distributed scheduler that can run on multiple nodes at the same time. """ TIMEOUT = 60 * 30 def __init__(self, *args, **kwargs): """ Initializes the distributed scheduler """ self._persistent = PersistentFactory.get_client() self._namespace = 'ovs_celery_beat'
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python """ Migration module """ import ConfigParser from model import Model from brander import Brander from ovs.log.logHandler import LogHandler logger = LogHandler('extensions', name='migrations') logger.logger.propagate = False # No need to propagate this class Migration(object): """ Handles all migrations between versions """ @staticmethod def migrate(): """ Executes all migrations. It keeps track of an internal "migration version" which is a always increasing one digit version for now. """ def execute(function, start, end): """
""" This module contains all code for using the VMware SOAP API/SDK """ from time import sleep import re import os import shutil from suds.client import Client, WebFault from suds.cache import ObjectCache from suds.sudsobject import Property from suds.plugin import MessagePlugin from ovs.log.logHandler import LogHandler logger = LogHandler('extensions', name='vmware sdk') class NotAuthenticatedException(BaseException): pass def authenticated(force=False): """ Decorator to make that a login is executed in case the current session isn't valid anymore @param force: Force a (re)login, as some methods also work when not logged in """ def wrapper(function): def new_function(self, *args, **kwargs): self.__doc__ = function.__doc__ try:
import time from ovs.log.logHandler import LogHandler from ovs.extensions.generic.system import System from ovs.extensions.generic.configuration import Configuration from ovs.extensions.api.client import OVSClient from django.views.generic import View from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse, HttpResponseBadRequest from django.conf import settings from oauth2.decorators import auto_response, limit, authenticated from backend.decorators import required_roles, load from ovs.dal.lists.bearertokenlist import BearerTokenList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.dal.lists.backendtypelist import BackendTypeList logger = LogHandler.get('api', name='metadata') class MetadataView(View): """ Implements retrieval of generic metadata about the services """ @auto_response() @limit(amount=60, per=60, timeout=60) def get(self, request, *args, **kwargs): """ Fetches metadata """ _ = args, kwargs data = { 'authenticated':
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time import uuid import os import logging from ovs.log.logHandler import LogHandler from ovs.extensions.storage.persistentfactory import PersistentFactory logger = LogHandler.get('extensions', name='watcher') def _log(log_target, entry, level): """ Logs an entry """ if level > 0: # 0 = debug, 1 = info, 2 = error logger.debug('[{0}] {1}'.format(log_target, entry)) def services_running(target): try: key = 'ovs-watcher-{0}'.format(str(uuid.uuid4())) value = str(time.time())
# http://www.openvstorage.org/OVS_NON_COMMERCIAL # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Systemd module """ from subprocess import CalledProcessError from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='servicemanager') class Systemd(object): """ Contains all logic related to Systemd services """ @staticmethod def _service_exists(name, client, path): if path is None: path = '/lib/systemd/system/' file_to_check = '{0}{1}.service'.format(path, name) return client.file_exists(file_to_check) @staticmethod
import zlib import json import base64 from ovs.celery_run import celery from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException from ovs.extensions.api.client import OVSClient from ovs.extensions.support.agent import SupportAgent from ovs.extensions.generic.configuration import Configuration from ovs.dal.hybrids.license import License from ovs.dal.lists.licenselist import LicenseList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.lib.helpers.toolbox import Toolbox from ovs.lib.helpers.decorators import add_hooks from ovs.log.logHandler import LogHandler logger = LogHandler.get('lib', name='license') class LicenseController(object): """ Validates licenses """ @staticmethod @celery.task(name='ovs.license.validate') def validate(license_string): """ Validates a license with the various components """ try: result = {} data = LicenseController._decode(license_string)
from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.dal.lists.backendtypelist import BackendTypeList from ovs.dal.lists.vmachinelist import VMachineList from ovs.extensions.generic.system import System from ovs.extensions.generic.osdist import Osdist from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.plugin.provider.configuration import Configuration from ovs.plugin.provider.package import Package from volumedriver.storagerouter.storagerouterclient import ClusterRegistry, ArakoonNodeConfig, ClusterNodeConfig, LocalStorageRouterClient from ovs.log.logHandler import LogHandler from ovs.extensions.openstack.cinder import OpenStackCinder logger = LogHandler('lib', name='storagerouter') class StorageRouterController(object): """ Contains all BLL related to StorageRouter """ @staticmethod @celery.task(name='ovs.storagerouter.get_physical_metadata') def get_physical_metadata(files, storagerouter_guid): """ Gets physical information about the machine this task is running on """ from ovs.lib.vpool import VPoolController
import re import os import time from subprocess import CalledProcessError from pyudev import Context from ovs.celery_run import celery from ovs.log.logHandler import LogHandler from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.hybrids.disk import Disk from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException from ovs.extensions.generic.remote import Remote from ovs.lib.helpers.decorators import ensure_single logger = LogHandler.get('lib', name='disk') class DiskController(object): """ Contains all BLL wrt physical Disks """ @staticmethod @celery.task(name='ovs.disk.sync_with_reality') @ensure_single(task_name='ovs.disk.sync_with_reality', mode='CHAINED') def sync_with_reality(storagerouter_guid=None): """ Syncs the Disks from all StorageRouters with the reality. :param storagerouter_guid: Guid of the Storage Router to synchronize """
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import subprocess from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='exportfs') class Nfsexports(object): """ Basic management for /etc/exports """ def __init__(self): self._exports_file = '/etc/exports' self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs'] self._restart = [ '/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra' ] self._rpcmountd_stop = [ '/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd' ]
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script can be used for testing purposes, adding data passed in as the only argument to the body of a new entry on the queue. """ import sys import pika from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='sender') if __name__ == '__main__': data = sys.argv[1] if len(sys.argv) >= 2 else '{}' queue = sys.argv[2] if len(sys.argv) >= 3 else 'default' connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) channel = connection.channel() channel.queue_declare(queue=queue, durable=True) logger.debug('Sending to {0}: {1}'.format(queue, data)) channel.basic_publish(exchange='', routing_key=queue, body=data,
# limitations under the License. import os import time import logging import tempfile from ConfigParser import RawConfigParser from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx from ovs.extensions.generic.remote import Remote from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.system import System from ovs.extensions.services.service import ServiceManager from StringIO import StringIO from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='arakoon_installer') logger.logger.propagate = False class ArakoonNodeConfig(object): """ cluster node config parameters """ def __init__(self, name, ip, client_port, messaging_port, log_dir, home, tlog_dir): """ Initializes a new Config entry for a single Node """ self.name = name self.ip = ip self.client_port = client_port
from ovs.dal.lists.servicelist import ServiceList from ovs.dal.lists.servicetypelist import ServiceTypeList from ovs.dal.lists.vpoollist import VPoolList from ovs.extensions.db.etcd.configuration import EtcdConfiguration from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.sshclient import UnableToConnectException from ovs.extensions.generic.system import System from ovs.extensions.storageserver.storagedriver import MetadataServerClient from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration from ovs.lib.helpers.decorators import ensure_single from ovs.log.logHandler import LogHandler from volumedriver.storagerouter import storagerouterclient from volumedriver.storagerouter.storagerouterclient import MDSMetaDataBackendConfig from volumedriver.storagerouter.storagerouterclient import MDSNodeConfig logger = LogHandler.get('lib', name='mds') storagerouterclient.Logger.setupLogging( LogHandler.load_path('storagerouterclient')) storagerouterclient.Logger.enableLogging() class MDSServiceController(object): """ Contains all BLL related to MDSServices """ @staticmethod def prepare_mds_service(storagerouter, vpool, fresh_only, reload_config): """ Prepares an MDS service: * Creates the required configuration * Sets up the service files
import time from ovs.dal.lists.userlist import UserList from rest_framework.response import Response from toolbox import Toolbox from rest_framework.exceptions import PermissionDenied, NotAuthenticated, NotAcceptable, Throttled from rest_framework import status from django.http import Http404 from django.conf import settings from ovs.dal.exceptions import ObjectNotFoundException from backend.serializers.serializers import FullSerializer from ovs.log.logHandler import LogHandler from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.dal.hybrids.log import Log logger = LogHandler('api') regex = re.compile('^(.*; )?version=(?P<version>([0-9]+|\*)?)(;.*)?$') def required_roles(roles): """ Role validation decorator """ def wrap(f): """ Wrapper function """ def new_function(*args, **kw): """ Wrapped function """
from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.lists.vdisklist import VDiskList from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.dal.lists.vpoollist import VPoolList from ovs.dal.lists.pmachinelist import PMachineList from ovs.dal.hybrids.vpool import VPool from ovs.extensions.hypervisor.factory import Factory from ovs.extensions.storageserver.storagedriver import StorageDriverClient from ovs.log.logHandler import LogHandler from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.volatilemutex import VolatileMutex logger = LogHandler('lib', name='vdisk') class VDiskController(object): """ Contains all BLL regarding VDisks """ @staticmethod @celery.task(name='ovs.disk.list_volumes') def list_volumes(vpool_guid=None): """ List all known volumes on a specific vpool or on all """ if vpool_guid is not None: vpool = VPool(vpool_guid) storagedriver_client = StorageDriverClient().load(vpool)
from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.lists.vdisklist import VDiskList from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.dal.lists.vpoollist import VPoolList from ovs.dal.lists.pmachinelist import PMachineList from ovs.dal.lists.mgmtcenterlist import MgmtCenterList from ovs.dal.hybrids.vpool import VPool from ovs.extensions.hypervisor.factory import Factory from ovs.extensions.storageserver.storagedriver import StorageDriverClient from ovs.log.logHandler import LogHandler from ovs.lib.mdsservice import MDSServiceController from ovs.extensions.generic.volatilemutex import VolatileMutex from volumedriver.storagerouter import storagerouterclient from volumedriver.storagerouter.storagerouterclient import MDSMetaDataBackendConfig, MDSNodeConfig logger = LogHandler.get('lib', name='vdisk') storagerouterclient.Logger.setupLogging(LogHandler.load_path('storagerouterclient')) storagerouterclient.Logger.enableLogging() class VDiskController(object): """ Contains all BLL regarding VDisks """ @staticmethod @celery.task(name='ovs.vdisk.list_volumes') def list_volumes(vpool_guid=None): """ List all known volumes on a specific vpool or on all """
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Service Factory module """ import time from subprocess import check_output, CalledProcessError from ovs.extensions.services.upstart import Upstart from ovs.extensions.services.systemd import Systemd from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='servicemanager') class ServiceManager(object): """ Factory class returning specialized classes """ ImplementationClass = None class MetaClass(type): """ Metaclass """ def __getattr__(cls, item): """ Returns the appropriate class
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for MgmtCenterController """ from ovs.celery_run import celery from ovs.dal.hybrids.mgmtcenter import MgmtCenter from ovs.dal.hybrids.pmachine import PMachine from ovs.extensions.hypervisor.factory import Factory from ovs.log.logHandler import LogHandler logger = LogHandler.get('lib', name='mgmtcenter') class MgmtCenterController(object): """ Contains all BLL regarding MgmtCenters """ @staticmethod @celery.task(name='ovs.mgmtcenter.test_connection') def test_connection(mgmt_center_guid): """ Test management center connection """ mgmt_center = MgmtCenter(mgmt_center_guid) try:
# limitations under the License. """ Contains various decorator """ import json import time from django.http import HttpResponse, HttpResponseServerError from django.contrib.auth import authenticate, login from rest_framework.request import Request from rest_framework.exceptions import PermissionDenied from django.core.handlers.wsgi import WSGIRequest from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.log.logHandler import LogHandler logger = LogHandler.get('api', 'oauth2') def _find_request(args): """ Finds the "request" object in args """ for item in args: if isinstance(item, Request) or isinstance(item, WSGIRequest): return item def auto_response(): """ Json response wrapper """
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ File mutex module """ import time import fcntl import os import stat from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', 'file mutex') class NoLockAvailableException(Exception): """ Custom exception thrown when lock could not be acquired in due time """ pass class FileMutex(object): """ This is mutex backed on the filesystem. It's cross thread and cross process. However its limited to the boundaries of a filesystem """
from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.helpers import Descriptor import os import re import grp import pwd import glob import json import time import logging import tempfile import paramiko import socket logger = LogHandler.get('extensions', name='sshclient') class UnableToConnectException(Exception): pass class SSHClient(object): """ Remote/local client """ IP_REGEX = re.compile( '^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))$' )
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Volatile mutex module """ import time from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', 'volatile mutex') class VolatileMutex(object): """ This is a volatile, distributed mutex to provide cross thread, cross process and cross node locking. However, this mutex is volatile and thus can fail. You want to make sure you don't lock for longer than a few hundred milliseconds to prevent this. """ def __init__(self, name, wait=None): """ Creates a volatile mutex object """ self._volatile = VolatileFactory.get_client() self.name = name
# limitations under the License. import sys import os import time import subprocess import shutil from ovs.extensions.db.arakoon.arakoon.EnsureArakoonWorks import EnsureArakoonWorks from ovs.extensions.generic.system import System from ArakoonManagement import ArakoonManagementEx from ovs.log.logHandler import LogHandler manager = ArakoonManagementEx() ensurearakoonworks = EnsureArakoonWorks() logger = LogHandler.get('arakoon', name='tlogchcker') class CheckArakoonError(Exception): def __init__(self, message): self.message = message def __speak__(self): print '{0}'.format(self.message) class CheckArakoonTlogMark(object): """ check if tlogs need marking mark tlogs that are unmarked
""" import os import sys import json import time import base64 import requests from subprocess import check_output from ConfigParser import RawConfigParser from ovs.extensions.db.etcd.configuration import EtcdConfiguration from ovs.extensions.generic.system import System from ovs.extensions.packages.package import PackageManager from ovs.log.logHandler import LogHandler logger = LogHandler.get('support', name='agent') class SupportAgent(object): """ Represents the Support client """ def __init__(self): """ Initializes the client """ self._enable_support = EtcdConfiguration.get( '/ovs/framework/support|enablesupport') self.interval = EtcdConfiguration.get( '/ovs/framework/support|interval') self._url = 'https://monitoring.openvstorage.com/api/support/heartbeat/'
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import time from subprocess import CalledProcessError from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.system import System from ovs.extensions.services.service import ServiceManager from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='etcd_installer') class EtcdInstaller(object): """ class to dynamically install/(re)configure etcd cluster """ DB_DIR = '/opt/OpenvStorage/db' DATA_DIR = '{0}/etcd/{1}/data' WAL_DIR = '{0}/etcd/{1}/wal' SERVER_URL = 'http://{0}:2380' CLIENT_URL = 'http://{0}:2379' MEMBER_REGEX = re.compile(ur'^(?P<id>[^:]+): name=(?P<name>[^ ]+) peerURLs=(?P<peer>[^ ]+) clientURLs=(?P<client>[^ ]+)$') def __init__(self): """
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Debian Package module """ import time from ovs.log.logHandler import LogHandler from subprocess import check_output from subprocess import CalledProcessError logger = LogHandler.get('lib', name='packager') class DebianPackage(object): """ Contains all logic related to Debian packages (used in e.g. Debian, Ubuntu) """ OVS_PACKAGE_NAMES = [ 'openvstorage', 'openvstorage-core', 'openvstorage-webapps', 'openvstorage-sdm', 'openvstorage-backend', 'openvstorage-backend-core', 'openvstorage-backend-webapps', 'openvstorage-cinder-plugin', 'volumedriver-server', 'volumedriver-base', 'alba', 'arakoon' ] APT_CONFIG_STRING = '-o Dir::Etc::sourcelist="sources.list.d/ovsaptrepo.list" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"'
from rest_framework.response import Response from toolbox import Toolbox from rest_framework.exceptions import PermissionDenied, NotAuthenticated, NotAcceptable, Throttled from rest_framework import status from rest_framework.request import Request from django.core.handlers.wsgi import WSGIRequest from django.http import Http404 from django.conf import settings from ovs.dal.exceptions import ObjectNotFoundException from backend.serializers.serializers import FullSerializer from ovs.log.logHandler import LogHandler from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.extensions.generic.volatilemutex import VolatileMutex logger = LogHandler.get('api') regex = re.compile('^(.*; )?version=(?P<version>([0-9]+|\*)?)(;.*)?$') def _find_request(args): """ Finds the "request" object in args """ for item in args: if isinstance(item, Request) or isinstance(item, WSGIRequest): return item def required_roles(roles): """ Role validation decorator
# See the License for the specific language governing permissions and # limitations under the License. """ Generic system module, executing statements on local node """ import os import uuid import time from subprocess import check_output from ConfigParser import RawConfigParser from StringIO import StringIO from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='system') class System(object): """ Generic helper class """ OVS_ID_FILE = '/etc/openvstorage_id' my_storagerouter_guid = '' my_storagedriver_id = '' def __init__(self): """ Dummy init method
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains various decorators """ import json from celery.task.control import inspect from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.log.logHandler import LogHandler logger = LogHandler.get('lib', name='scheduled tasks') def log(event_type): """ Task logger """ def wrap(f): """ Wrapper function """ def new_function(*args, **kwargs): """ Wrapped function
from rest_framework import viewsets from rest_framework.permissions import IsAuthenticated from ovs.dal.lists.mgmtcenterlist import MgmtCenterList from ovs.dal.hybrids.mgmtcenter import MgmtCenter from backend.serializers.serializers import FullSerializer from rest_framework.exceptions import NotAcceptable from rest_framework.response import Response from rest_framework import status from backend.decorators import required_roles, load, return_object, return_list, log from ovs.log.logHandler import LogHandler from ovs.lib.mgmtcenter import MgmtCenterController from celery.exceptions import TimeoutError logger = LogHandler.get('api', 'mgmtcenters') class MgmtCenterViewSet(viewsets.ViewSet): """ Information about mgmtCenters """ permission_classes = (IsAuthenticated, ) prefix = r'mgmtcenters' base_name = 'mgmtcenters' @log() @required_roles(['read']) @return_list(MgmtCenter) @load() def list(self):
import zlib import json import base64 from ovs.celery_run import celery from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException from ovs.extensions.api.client import OVSClient from ovs.extensions.support.agent import SupportAgent from ovs.extensions.generic.configuration import Configuration from ovs.dal.hybrids.license import License from ovs.dal.lists.licenselist import LicenseList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.lib.helpers.toolbox import Toolbox from ovs.lib.helpers.decorators import add_hooks from ovs.log.logHandler import LogHandler logger = LogHandler.get('lib', name='license') class LicenseController(object): """ Validates licenses """ @staticmethod @celery.task(name='ovs.license.validate') def validate(license_string): """ Validates a license with the various components """ try: result = {}
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time import uuid import os import shutil from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.log.logHandler import LogHandler logger = LogHandler('arakoon', name='validator') class EnsureArakoonError(Exception): def __init__(self, message): self.message = message def __speak__(self): print '{0}'.format(self.message) class EnsureArakoonWorks(): """ Wait for the following operation to be possible: 1) Set a value
""" import re import os import time from subprocess import CalledProcessError from pyudev import Context from ovs.celery_run import celery from ovs.log.logHandler import LogHandler from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.hybrids.disk import Disk from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException from ovs.extensions.generic.remote import Remote logger = LogHandler.get('lib', name='disk') class DiskController(object): """ Contains all BLL wrt physical Disks """ @staticmethod @celery.task(name='ovs.disk.sync_with_reality') def sync_with_reality(storagerouter_guid=None): """ Syncs the Disks from all StorageRouters with the reality. """ storagerouters = [] if storagerouter_guid is not None:
]) celery.conf.CELERY_DEFAULT_QUEUE = 'ovs_generic' celery.conf.CELERY_QUEUES = tuple([ Queue('ovs_generic', routing_key='generic.#'), Queue('ovs_masters', routing_key='masters.#'), Queue('ovs_{0}'.format(unique_id), routing_key='sr.{0}.#'.format(unique_id)) ]) celery.conf.CELERY_DEFAULT_EXCHANGE = 'generic' celery.conf.CELERY_DEFAULT_EXCHANGE_TYPE = 'topic' celery.conf.CELERY_DEFAULT_ROUTING_KEY = 'generic.default' celery.conf.CELERY_ACKS_LATE = True # This, together with the below PREFETCH_MULTIPLIER, makes sure that the celery.conf.CELERYD_PREFETCH_MULTIPLIER = 1 # workers basically won't be prefetching tasks, to prevent deadlocks celery.conf.CELERYBEAT_SCHEDULE = {} loghandler = LogHandler.get('celery', name='celery') @task_postrun.connect def task_postrun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, **kwds): """ Hook for celery postrun event """ _ = sender, task, args, kwargs, kwds MessageController.fire(MessageController.Type.TASK_COMPLETE, task_id)
'''Arakoon Nursery support''' #pylint: disable=R0903 # R0903: Too few public methods import operator try: import cStringIO as StringIO except ImportError: import StringIO from ovs.extensions.db.arakoon.pyrakoon.pyrakoon import protocol, utils from ovs.log.logHandler import LogHandler LOGGER = LogHandler.get('arakoon', 'pyrakoon', propagate=False) class NurseryConfigType(protocol.Type): '''NurseryConfig type''' def check(self, value): raise NotImplementedError('NurseryConfig can\'t be checked') def serialize(self, value): raise NotImplementedError('NurseryConfig can\'t be serialized') def receive(self): buffer_receiver = protocol.STRING.receive() request = buffer_receiver.next() #pylint: disable=E1101 while isinstance(request, protocol.Request):
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rpm Package module """ import time from ovs.log.logHandler import LogHandler from subprocess import check_output from subprocess import CalledProcessError logger = LogHandler.get('lib', name='packager') class RpmPackage(object): """ Contains all logic related to Rpm packages (used in e.g. Centos) """ OVS_PACKAGE_NAMES = ['openvstorage', 'openvstorage-backend', 'volumedriver-server', 'volumedriver-base', 'alba', 'openvstorage-sdm'] @staticmethod def _get_version(package_name): return check_output("yum info {0} | grep Version | cut -d ':' -f 2 || true".format(package_name), shell=True).strip() @staticmethod def get_versions():
""" This module contains all code for using the KVM libvirt api """ from xml.etree import ElementTree import subprocess import os import glob import re import time import libvirt from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.system import System from ovs.log.logHandler import LogHandler logger = LogHandler.get('extensions', name='kvm sdk') ROOT_PATH = '/etc/libvirt/qemu/' # Get static info from here, or use dom.XMLDesc(0) RUN_PATH = '/var/run/libvirt/qemu/' # Get live info from here # Helpers def _recurse(treeitem): result = {} for key, item in treeitem.items(): result[key] = item for child in treeitem.getchildren(): result[child.tag] = _recurse(child) for key, item in child.items(): result[child.tag][key] = item result[child.tag]['<text>'] = child.text return result