def run(self): update_diag(sync_start=time.time(), backend_status="0 - Synchronizer Start") # start the openstack observer observer = XOSObserver() observer_thread = threading.Thread(target=observer.run, name='synchronizer') observer_thread.start() # start model policies thread observer_name = getattr(Config(), "observer_name", "") if (not observer_name): model_policy_thread = threading.Thread(target=run_policy) model_policy_thread.start() else: model_policy_thread = None print "Skipping model policies thread for service observer." # start event listene #event_manager = EventListener(wake_up=observer.wake_up) #event_manager_thread = threading.Thread(target=event_manager.run) #event_manager_thread.start() while True: try: time.sleep(1000) except KeyboardInterrupt: print "exiting due to keyboard interrupt" # TODO: See about setting the threads as daemons observer_thread._Thread__stop() if model_policy_thread: model_policy_thread._Thread__stop() sys.exit(1)
def load_sync_step_modules(self, step_dir=None): sync_steps = [] if step_dir is None: try: step_dir = Config().observer_steps_dir except: step_dir = '/opt/xos/synchronizers/openstack/steps' for fn in os.listdir(step_dir): pathname = os.path.join(step_dir, fn) if os.path.isfile(pathname) and fn.endswith(".py") and ( fn != "__init__.py"): module = imp.load_source(fn[:-3], pathname) for classname in dir(module): c = getattr(module, classname, None) # make sure 'c' is a descendent of SyncStep and has a # provides field (this eliminates the abstract base classes # since they don't have a provides) if inspect.isclass(c) and issubclass( c, SyncStep) and hasattr( c, "provides") and (c not in sync_steps): sync_steps.append(c) return sync_steps
def get_prop(prop): try: sync_config_dir = Config().sync_config_dir except: sync_config_dir = '/etc/xos/sync' prop_config_path = '/'.join(sync_config_dir, self.name, prop) return open(prop_config_path).read().rstrip()
def update_diag(diag_class, loop_end=None, loop_start=None, syncrecord_start=None, sync_start=None, backend_status=None): observer_name = Config().observer_name try: diag = diag_class.objects.filter(name=observer_name).first() if (not diag): diag = diag_class(name=observer_name) br_str = diag.backend_register br = json.loads(br_str) if loop_end: br['last_run'] = loop_end if loop_end and loop_start: br['last_duration'] = loop_end - loop_start if syncrecord_start: br['last_syncrecord_start'] = syncrecord_start if sync_start: br['last_synchronizer_start'] = sync_start if backend_status: diag.backend_status = backend_status diag.backend_register = json.dumps(br) diag.save() except: logger.log_exc("Exception in update_diag") traceback.print_exc()
def fetch_pending(self, deleted): # Images come from the back end # You can't delete them if (deleted): return [] # get list of images on disk images_path = Config().observer_images_directory available_images = {} if os.path.exists(images_path): for f in os.listdir(images_path): filename = os.path.join(images_path, f) if os.path.isfile(filename): available_images[f] = filename images = Image.objects.all() image_names = [image.name for image in images] for image_name in available_images: #remove file extension clean_name = ".".join(image_name.split('.')[:-1]) if clean_name not in image_names: image = Image(name=clean_name, disk_format='raw', container_format='bare', path = available_images[image_name]) image.save() return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
def get_extra_attributes(self, o): # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know # 1) Allowed tenant ids # 2) Ceilometer API service endpoint URL if running externally # 3) Credentials to access Ceilometer API service ceilometer_services = CeilometerService.get_service_objects().filter( id=o.provider_service.id) if not ceilometer_services: raise "No associated Ceilometer service" ceilometer_service = ceilometer_services[0] ceilometer_pub_sub_url = ceilometer_service.ceilometer_pub_sub_url if not ceilometer_pub_sub_url: ceilometer_pub_sub_url = '' instance = self.get_instance(o) try: full_setup = Config().observer_full_setup except: full_setup = True fields = { "unique_id": o.id, "allowed_tenant_ids": o.tenant_list, "auth_url": instance.controller.auth_url, "admin_user": instance.controller.admin_user, "admin_password": instance.controller.admin_password, "admin_tenant": instance.controller.admin_tenant, "ceilometer_pub_sub_url": ceilometer_pub_sub_url, "full_setup": full_setup } return fields
def extract_context(self,cur): try: observer_name=Config().observer_name cur['synchronizer_name']=observer_name except: pass return cur
def __init__(self, sync_steps): # The Condition object that gets signalled by Feefie events self.step_lookup = {} self.sync_steps = sync_steps self.load_sync_steps() self.event_cond = threading.Condition() self.driver = DRIVER self.observer_name = getattr(Config(), "observer_name", "")
def extract_context(self, cur): try: observer_name = Config().observer_name cur['synchronizer_name'] = observer_name except: pass self.sanitize_extra_args(cur) return cur
def run(self): # This is our unique client id, to be used when firing and receiving events # It needs to be generated once and placed in the config file try: user = Config().feefie_client_user except: user = '******' try: clid = Config().feefie_client_id except: clid = get_random_client_id() print "EventListener: no feefie_client_id configured. Using random id %s" % clid f = Fofum(user=user) listener_thread = threading.Thread(target=f.listen_for_event, args=(clid, self.handle_event)) listener_thread.start()
def run(self): # start the openstack observer observer = XOSObserver() observer_thread = threading.Thread(target=observer.run) observer_thread.start() # start model policies thread observer_name = getattr(Config(), "observer_name", "") if (not observer_name): model_policy_thread = threading.Thread(target=run_policy) model_policy_thread.start() else: print "Skipping model policies thread for service observer."
def __init__(self, logfile=None, loggername=None, level=logging.INFO): # default is to locate loggername from the logfile if avail. if not logfile: try: from xos.config import Config logfile = Config().observer_log_file except: logfile = "/var/log/xos.log" if (logfile == "console"): loggername = "console" handler = logging.StreamHandler() else: if not loggername: loggername = os.path.basename(logfile) try: handler = logging.handlers.RotatingFileHandler( logfile, maxBytes=1000000, backupCount=5) except IOError: # This is usually a permissions error becaue the file is # owned by root, but httpd is trying to access it. tmplogfile = os.getenv( "TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile) # In strange uses, 2 users on same machine might use same code, # meaning they would clobber each others files # We could (a) rename the tmplogfile, or (b) # just log to the console in that case. # Here we default to the console. if os.path.exists(tmplogfile) and not os.access( tmplogfile, os.W_OK): loggername = loggername + "-console" handler = logging.StreamHandler() else: handler = logging.handlers.RotatingFileHandler( tmplogfile, maxBytes=1000000, backupCount=5) handler.setFormatter( logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) self.logger = logging.getLogger(loggername) self.logger.setLevel(level) # check if logger already has the handler we're about to add handler_exists = False for l_handler in self.logger.handlers: if ((not hasattr(l_handler,"baseFilename")) or (l_handler.baseFilename == handler.baseFilename)) and \ l_handler.level == handler.level: handler_exists = True if not handler_exists: self.logger.addHandler(handler) self.loggername = loggername
def get_cmi_hostname(self, hpc_service=None): if getattr(Config(), "observer_cmi_hostname", None): return getattr(Config(), "observer_cmi_hostname") if (hpc_service is None): hpc_service = self.get_hpc_service() if hpc_service.cmi_hostname: return hpc_service.cmi_hostname try: slices = hpc_service.slices.all() except: # deal with buggy data model slices = hpc_service.service.all() for slice in slices: if slice.name.endswith("cmi"): for sliver in slice.slivers.all(): if sliver.node: return sliver.node.name raise Exception("Failed to find a CMI sliver")
def config_accessor(): global model_accessor accessor_kind = getattr(Config(), "observer_accessor_kind", "django") if (accessor_kind == "django"): from djangoaccessor import DjangoModelAccessor model_accessor = DjangoModelAccessor() import_models_to_globals() else: grpcapi_endpoint = getattr(Config(), "observer_accessor_endpoint", "xos-core.cord.lab:50051") grpcapi_username = getattr(Config(), "observer_accessor_username", "*****@*****.**") grpcapi_password = getattr(Config(), "observer_accessor_password") # if password starts with "@", then retreive the password from a file if grpcapi_password.startswith("@"): fn = grpcapi_password[1:] if not os.path.exists(fn): raise Exception("%s does not exist" % fn) grpcapi_password = open(fn).readline().strip() from xosapi.xos_grpc_client import SecureClient from twisted.internet import reactor grpcapi_client = SecureClient(endpoint=grpcapi_endpoint, username=grpcapi_username, password=grpcapi_password) grpcapi_client.set_reconnect_callback( functools.partial(grpcapi_reconnect, grpcapi_client, reactor)) grpcapi_client.start() # Start reactor. This will cause the client to connect and then execute # grpcapi_callback(). reactor.run()
def get_hpc_service(self): hpc_service_name = getattr(Config(), "observer_hpc_service", None) if hpc_service_name: hpc_service = HpcService.objects.filter(name = hpc_service_name) else: hpc_service = HpcService.objects.all() if not hpc_service: if hpc_service_name: raise Exception("No HPC Service with name %s" % hpc_service_name) else: raise Exception("No HPC Services") hpc_service = hpc_service[0] return hpc_service
def main(): c = Config() if len(sys.argv) <= 1: help() return if sys.argv[1] == "get": if len(sys.argv) == 4: print getattr(c, sys.argv[2], sys.argv[3]) elif len(sys.argv) == 3: print getattr(c, sys.argv[2]) else: help() else: help()
def save_controller_network(self, controller_network): network_name = controller_network.network.name subnet_name = '%s-%d'%(network_name,controller_network.pk) if controller_network.subnet and controller_network.subnet.strip(): # If a subnet is already specified (pass in by the creator), then # use that rather than auto-generating one. cidr = controller_network.subnet.strip() print "CIDR_MS", cidr else: cidr = self.alloc_subnet(controller_network.pk) print "CIDR_AMS", cidr if controller_network.network.start_ip and controller_network.network.start_ip.strip(): start_ip = controller_network.network.start_ip.strip() print "DEF_START_IP", start_ip else: start_ip = self.alloc_start_ip(cidr) print "DEF_START_AIP", start_ip if controller_network.network.end_ip and controller_network.network.end_ip.strip(): end_ip = controller_network.network.end_ip.strip() print "DEF_START_IP", end_ip else: end_ip = self.alloc_end_ip(cidr) print "DEF_END_AIP", end_ip self.cidr=cidr self.start_ip=start_ip slice = controller_network.network.owner network_fields = {'endpoint':controller_network.controller.auth_url, 'endpoint_v3': controller_network.controller.auth_url_v3, 'admin_user':slice.creator.email, 'tenant_name':slice.name, 'admin_password':slice.creator.remote_password, 'domain': controller_network.controller.domain, 'name':network_name, 'subnet_name':subnet_name, 'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name), 'cidr':cidr, 'gateway':self.alloc_gateway(cidr), 'start_ip':start_ip, 'end_ip':end_ip, 'use_vtn':getattr(Config(), "networking_use_vtn", False), 'delete':False } return network_fields
def journal_object(o, operation, msg=None, timestamp=None): # do not journal unless it has been explicitly enabled if not getattr(Config(), "debug_enable_journal", None): return # ignore objects that generate too much noise if o.__class__.__name__ in ["Diag"]: return if not timestamp: timestamp = timezone.now() j = JournalEntry(objClassName=o.__class__.__name__, objId=o.id, objUnicode=str(o), operation=operation, msg=msg) j.save()
def run(self): # start model policies thread policies_dir = getattr(Config(), "observer_model_policies_dir", None) if policies_dir: from synchronizers.model_policy import run_policy model_policy_thread = threading.Thread(target=run_policy) model_policy_thread.start() else: model_policy_thread = None logger.info( "Skipping model policies thread due to no model_policies dir.") while True: try: time.sleep(1000) except KeyboardInterrupt: print "exiting due to keyboard interrupt" if model_policy_thread: model_policy_thread._Thread__stop() sys.exit(1)
def __init__(self, username=None, password=None, tenant=None, url=None, token=None, endpoint=None, controller=None, cacert=None, admin=True, *args, **kwds): self.has_openstack = has_openstack self.url = controller.auth_url if admin: self.username = controller.admin_user self.password = controller.admin_password self.tenant = controller.admin_tenant else: self.username = None self.password = None self.tenant = None if username: self.username = username if password: self.password = password if tenant: self.tenant = tenant if url: self.url = url if token: self.token = token if endpoint: self.endpoint = endpoint if cacert: self.cacert = cacert else: self.cacert = getattr(Config(), "nova_ca_ssl_cert", "None")
def run(self): update_diag(sync_start=time.time(), backend_status="0 - Synchronizer Start") sync_steps = self.load_sync_step_modules() # start the observer observer = XOSObserver(sync_steps) observer_thread = threading.Thread(target=observer.run, name='synchronizer') observer_thread.start() # start the watcher thread if (watchers_enabled): watcher = XOSWatcher(sync_steps) watcher_thread = threading.Thread(target=watcher.run, name='watcher') watcher_thread.start() # start model policies thread policies_dir = getattr(Config(), "observer_model_policies_dir", None) if policies_dir: from synchronizers.model_policy import run_policy model_policy_thread = threading.Thread(target=run_policy) model_policy_thread.start() else: model_policy_thread = None logger.info( "Skipping model policies thread due to no model_policies dir.") while True: try: time.sleep(1000) except KeyboardInterrupt: print "exiting due to keyboard interrupt" # TODO: See about setting the threads as daemons observer_thread._Thread__stop() if model_policy_thread: model_policy_thread._Thread__stop() sys.exit(1)
def get_extra_attributes(self, o): # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know # 1) Allowed tenant ids # 2) Ceilometer API service endpoint URL if running externally # 3) Credentials to access Ceilometer API service instance = self.get_instance(o) try: full_setup = Config().observer_full_setup except: full_setup = True fields = {"unique_id": o.id, "allowed_tenant_ids": o.tenant_list, "auth_url":instance.controller.auth_url, "admin_user":instance.controller.admin_user, "admin_password":instance.controller.admin_password, "admin_tenant":instance.controller.admin_tenant, "full_setup": full_setup} return fields
def fetch_pending(self, deleted): # Images come from the back end # You can't delete them if (deleted): logger.info("SyncImages: returning because deleted=True") return [] # get list of images on disk images_path = Config().observer_images_directory logger.info("SyncImages: deleted=False, images_path=%s" % images_path) available_images = {} if os.path.exists(images_path): for f in os.listdir(images_path): filename = os.path.join(images_path, f) if os.path.isfile(filename) and filename.endswith(".img"): available_images[f] = filename logger.info("SyncImages: available_images = %s" % str(available_images)) images = Image.objects.all() image_names = [image.name for image in images] for image_name in available_images: #remove file extension clean_name = ".".join(image_name.split('.')[:-1]) if clean_name not in image_names: logger.info("SyncImages: adding %s" % clean_name) image = Image(name=clean_name, disk_format='raw', container_format='bare', path=available_images[image_name]) image.save() return Image.objects.filter( Q(enacted__lt=F('updated')) | Q(enacted=None))
def load_sync_step_modules(self, step_dir=None): if step_dir is None: step_dir = Config().observer_steps_dir for fn in os.listdir(step_dir): pathname = os.path.join(step_dir, fn) if os.path.isfile(pathname) and fn.endswith( ".py") and (fn != "__init__.py"): module = imp.load_source(fn[:-3], pathname) for classname in dir(module): c = getattr(module, classname, None) # make sure 'c' is a descendent of SyncStep and has a # provides field (this eliminates the abstract base classes # since they don't have a provides) if inspect.isclass(c) and issubclass( c, SyncStep) and hasattr( c, "provides") and ( c not in self.sync_steps): self.sync_steps.append(c) logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
def save_controller_network(self, controller_network): network_name = controller_network.network.name subnet_name = '%s-%d' % (network_name, controller_network.pk) cidr = self.alloc_subnet(controller_network.pk) self.cidr = cidr slice = controller_network.network.owner network_fields = { 'endpoint': controller_network.controller.auth_url, 'endpoint_v3': controller_network.controller.auth_url_v3, 'admin_user': slice.creator.email, 'tenant_name': slice.name, 'admin_password': slice.creator.remote_password, 'domain': controller_network.controller.domain, 'name': network_name, 'subnet_name': subnet_name, 'ansible_tag': '%s-%s@%s' % (network_name, slice.slicename, controller_network.controller.name), 'cidr': cidr, 'gateway': self.alloc_gateway(controller_network.pk), 'use_vtn': getattr(Config(), "networking_use_vtn", False), 'delete': False } return network_fields
def Observer(request): try: observer_name = Config().observer_name except AttributeError: observer_name = 'openstack' diag = Diag.objects.filter(name=observer_name).first() if not diag: return HttpResponse(json.dumps({"health": ":-X", "time": time.time(), "comp": 0})) t = time.time() d = json.loads(diag.backend_register) comp = d['last_run'] + d['last_duration']*2 + 300 if comp>t: d['health'] = ':-)' else: d['health'] = ':-X' d['time'] = t d['comp'] = comp return HttpResponse(json.dumps(d))
import inspect import time import traceback import commands import threading import json import pdb from core.models import * from util.logger import Logger, logging logger = Logger(level=logging.INFO) missing_links = {} try: dep_data = open(Config().dependency_graph).read() except: dep_data = open(XOS_DIR + '/model-deps').read() dependencies = json.loads(dep_data) inv_dependencies = {} for k, lst in dependencies.items(): for v in lst: try: inv_dependencies[v].append(k) except KeyError: inv_dependencies[v] = [k] def plural(name):
def get_extra_attributes(self, o): # This is a place to include extra attributes that aren't part of the # object itself. In the case of vCPE, we need to know: # 1) the addresses of dnsdemux, to setup dnsmasq in the vCPE # 2) CDN prefixes, so we know what URLs to send to dnsdemux # 3) BroadBandShield server addresses, for parental filtering # 4) vlan_ids, for setting up networking in the vCPE VM vcpe_service = self.get_vcpe_service(o) dnsdemux_ip = None cdn_prefixes = [] cdn_config_fn = "/opt/xos/synchronizers/vcpe/cdn_config" if os.path.exists(cdn_config_fn): # manual CDN configuration # the first line is the address of dnsredir # the remaining lines are domain names, one per line lines = file(cdn_config_fn).readlines() if len(lines)>=2: dnsdemux_ip = lines[0].strip() cdn_prefixes = [x.strip() for x in lines[1:] if x.strip()] else: # automatic CDN configuiration # it learns everything from CDN objects in XOS # not tested on pod. if vcpe_service.backend_network_label: # Connect to dnsdemux using the network specified by # vcpe_service.backend_network_label for service in HpcService.objects.all(): for slice in service.slices.all(): if "dnsdemux" in slice.name: for instance in slice.instances.all(): for ns in instance.ports.all(): if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels): dnsdemux_ip = ns.ip if not dnsdemux_ip: logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label,extra=o.tologdict()) else: # Connect to dnsdemux using the instance's public address for service in HpcService.objects.all(): for slice in service.slices.all(): if "dnsdemux" in slice.name: for instance in slice.instances.all(): if dnsdemux_ip=="none": try: dnsdemux_ip = socket.gethostbyname(instance.node.name) except: pass if not dnsdemux_ip: logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict()) for prefix in CDNPrefix.objects.all(): cdn_prefixes.append(prefix.prefix) dnsdemux_ip = dnsdemux_ip or "none" # Broadbandshield can either be set up internally, using vcpe_service.bbs_slice, # or it can be setup externally using vcpe_service.bbs_server. bbs_addrs = [] if vcpe_service.bbs_slice: if vcpe_service.backend_network_label: for bbs_instance in vcpe_service.bbs_slice.instances.all(): for ns in bbs_instance.ports.all(): if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels): bbs_addrs.append(ns.ip) else: logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict()) if not bbs_addrs: logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict()) elif vcpe_service.bbs_server: bbs_addrs.append(vcpe_service.bbs_server) else: logger.info("neither bbs_slice nor bbs_server is configured in the vCPE",extra=o.tologdict()) vlan_ids = [] s_tags = [] c_tags = [] if o.volt: vlan_ids.append(o.volt.vlan_id) # XXX remove this s_tags.append(o.volt.s_tag) c_tags.append(o.volt.c_tag) try: full_setup = Config().observer_full_setup except: full_setup = True safe_macs=[] if vcpe_service.url_filter_kind == "safebrowsing": if o.volt and o.volt.subscriber: for user in o.volt.subscriber.users: level = user.get("level",None) mac = user.get("mac",None) if level in ["G", "PG"]: if mac: safe_macs.append(mac) fields = {"vlan_ids": vlan_ids, # XXX remove this "s_tags": s_tags, "c_tags": c_tags, "dnsdemux_ip": dnsdemux_ip, "cdn_prefixes": cdn_prefixes, "bbs_addrs": bbs_addrs, "full_setup": full_setup, "isolation": o.instance.isolation, "safe_browsing_macs": safe_macs, "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]), "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")], "url_filter_kind": vcpe_service.url_filter_kind } # add in the sync_attributes that come from the SubscriberRoot object if o.volt and o.volt.subscriber and hasattr(o.volt.subscriber, "sync_attributes"): for attribute_name in o.volt.subscriber.sync_attributes: fields[attribute_name] = getattr(o.volt.subscriber, attribute_name) return fields
from core.models import Service, Slice, Tag from services.cord.models import VSGService, VSGTenant, VOLTTenant from services.hpc.models import HpcService, CDNPrefix from xos.logger import Logger, logging # hpclibrary will be in steps/.. parentdir = os.path.join(os.path.dirname(__file__),"..") sys.path.insert(0,parentdir) from broadbandshield import BBS logger = Logger(level=logging.INFO) ENABLE_QUICK_UPDATE=False CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False) class SyncVSGTenant(SyncInstanceUsingAnsible): provides=[VSGTenant] observes=VSGTenant requested_interval=0 template_name = "sync_vcpetenant.yaml" service_key_name = "/opt/xos/synchronizers/vcpe/vcpe_private_key" def __init__(self, *args, **kwargs): super(SyncVSGTenant, self).__init__(*args, **kwargs) def fetch_pending(self, deleted): if (not deleted): objs = VSGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False)) else:
def get_node_key(self, node): return getattr(Config(), "observer_node_key", "/opt/cord_profile/node_key")