def config_accessor_grpcapi(): global orig_sigint grpcapi_endpoint = Config.get("accessor.endpoint") grpcapi_username = Config.get("accessor.username") grpcapi_password = Config.get("accessor.password") # if password starts with "@", then retreive the password from a file if grpcapi_password.startswith("@"): fn = grpcapi_password[1:] if not os.path.exists(fn): raise Exception("%s does not exist" % fn) grpcapi_password = open(fn).readline().strip() from xosapi.xos_grpc_client import SecureClient from twisted.internet import reactor grpcapi_client = SecureClient(endpoint=grpcapi_endpoint, username=grpcapi_username, password=grpcapi_password) grpcapi_client.set_reconnect_callback(functools.partial(grpcapi_reconnect, grpcapi_client, reactor)) grpcapi_client.start() # Start reactor. This will cause the client to connect and then execute # grpcapi_callback(). # Reactor will take over SIGINT during reactor.run(), but does not return it when reactor.stop() is called. orig_sigint = signal.getsignal(signal.SIGINT) # Start reactor. This will cause the client to connect and then execute # grpcapi_callback(). reactor.run()
def wait_for_database(db_host, db_port, retry_interval): retry_count = 0 db_user = Config.get("database.username") db_password = Config.get("database.password") while True: try: myConnection = psycopg2.connect( host=db_host, port=db_port, user=db_user, password=db_password, connect_timeout=5, ) # Exit on successful connection print("Database is available") myConnection.close() return except psycopg2.OperationalError: # timeout reached, retrying retry_count += 1 print("Timeout connecting to db, retrying (retry count: %d)" % retry_count) except BaseException: traceback.print_exc("Unknown exception while connecting to db") # sleep for the retry interval between retries time.sleep(float(retry_interval))
def start(self): eventbus_kind = Config.get("event_bus.kind") eventbus_endpoint = Config.get("event_bus.endpoint") if not eventbus_kind: self.log.error( "Eventbus kind is not configured in synchronizer config file.") return if eventbus_kind not in ["kafka"]: self.log.error( "Eventbus kind is set to a technology we do not implement.", eventbus_kind=eventbus_kind, ) return if not eventbus_endpoint: self.log.error( "Eventbus endpoint is not configured in synchronizer config file." ) return for step in self.event_steps: if step.technology == "kafka": thread = XOSKafkaThread(step, [eventbus_endpoint], self.model_accessor, self.log) thread.start() self.threads.append(thread) else: self.log.error( "Unknown technology. Skipping step", technology=step.technology, step=step.__name__, )
def grpcapi_reconnect(client, reactor): global model_accessor # Make sure to try to load models before trying to initialize the ORM. It might be the ORM is broken because it # is waiting on our models. if Config.get("models_dir"): try: ModelLoadClient(client).upload_models(Config.get("name"), Config.get("models_dir")) except Exception, e: # TODO: narrow exception scope if (hasattr(e, "code") and callable(e.code) and hasattr(e.code(), "name") and (e.code().name) == "UNAVAILABLE"): # We need to make sure we force a reconnection, as it's possible that we will end up downloading a # new xos API. log.info( "grpc unavailable during loadmodels. Force a reconnect") client.connected = False client.connect() return log.exception("failed to onboard models") # If it's some other error, then we don't need to force a reconnect. Just try the LoadModels() again. reactor.callLater( 10, functools.partial(grpcapi_reconnect, client, reactor)) return
def run(self): observer_thread = None watcher_thread = None model_policy_thread = None model_accessor.update_diag(sync_start=time.time(), backend_status="Synchronizer Start") steps_dir = Config.get("steps_dir") if steps_dir: sync_steps = self.load_sync_step_modules(steps_dir) if sync_steps: # start the observer observer = XOSObserver(sync_steps, log=self.log) observer_thread = threading.Thread(target=observer.run, name='synchronizer') observer_thread.start() # start the watcher thread if (watchers_enabled): watcher = XOSWatcher(sync_steps) watcher_thread = threading.Thread(target=watcher.run, name='watcher') watcher_thread.start() else: self.log.info( "Skipping observer and watcher threads due to no steps dir.") # start model policies thread policies_dir = Config.get("model_policies_dir") if policies_dir: policy_engine = XOSPolicyEngine(policies_dir=policies_dir, log=self.log) model_policy_thread = threading.Thread(target=policy_engine.run, name="policy_engine") model_policy_thread.start() else: self.log.info( "Skipping model policies thread due to no model_policies dir.") if (not observer_thread) and (not watcher_thread) and ( not model_policy_thread): self.log.info( "No sync steps and no policies. Synchronizer exiting.") # the caller will exit with status 0 return while True: try: time.sleep(1000) except KeyboardInterrupt: print "exiting due to keyboard interrupt" # TODO: See about setting the threads as daemons if observer_thread: observer_thread._Thread__stop() if watcher_thread: watcher_thread._Thread__stop() if model_policy_thread: model_policy_thread._Thread__stop() sys.exit(1)
def init(): global log global kafka_producer if not log: log = create_logger(Config().get("logging")) if kafka_producer: raise Exception("XOSKafkaProducer already initialized") else: log.info("Connecting to Kafka with bootstrap servers: %s" % Config.get("kafka_bootstrap_servers")) try: producer_config = { "bootstrap.servers": ",".join(Config.get("kafka_bootstrap_servers")) } kafka_producer = confluent_kafka.Producer(**producer_config) log.info("Connected to Kafka: %s" % kafka_producer) except confluent_kafka.KafkaError as e: log.exception("Kafka Error: %s" % e)
def config_accessor_grpcapi(): global orig_sigint grpcapi_endpoint = Config.get("accessor.endpoint") grpcapi_username = Config.get("accessor.username") grpcapi_password = Config.get("accessor.password") # if password starts with "@", then retreive the password from a file if grpcapi_password.startswith("@"): fn = grpcapi_password[1:] if not os.path.exists(fn): raise Exception("%s does not exist" % fn) grpcapi_password = open(fn).readline().strip() from xosapi.xos_grpc_client import SecureClient from twisted.internet import reactor grpcapi_client = SecureClient(endpoint=grpcapi_endpoint, username=grpcapi_username, password=grpcapi_password) grpcapi_client.set_reconnect_callback( functools.partial(grpcapi_reconnect, grpcapi_client, reactor)) grpcapi_client.start() # Start reactor. This will cause the client to connect and then execute # grpcapi_callback(). # Reactor will take over SIGINT during reactor.run(), but does not return it when reactor.stop() is called. orig_sigint = signal.getsignal(signal.SIGINT) # Start reactor. This will cause the client to connect and then execute # grpcapi_callback(). reactor.run()
def test_config_not_initialized(self): """ [XOS-Config] Raise if accessing properties without initialization """ with self.assertRaises(Exception) as e: Config.get("database") self.assertEqual(e.exception.message, "[XOS-Config] Module has not been initialized")
def init(): global log global kafka_producer if not log: log = create_logger(Config().get("logging")) if kafka_producer: raise Exception("XOSKafkaProducer already initialized") else: log.info( "Connecting to Kafka with bootstrap servers: %s" % Config.get("kafka_bootstrap_servers") ) try: producer_config = { "bootstrap.servers": ",".join(Config.get("kafka_bootstrap_servers")) } kafka_producer = confluent_kafka.Producer(**producer_config) log.info("Connected to Kafka: %s" % kafka_producer) except confluent_kafka.KafkaError as e: log.exception("Kafka Error: %s" % e)
def test_config_not_initialized(self): """ [XOS-Config] Raise if accessing properties without initialization """ with self.assertRaises(Exception) as e: Config.get("database") self.assertEqual(e.exception.message, "[XOS-Config] Module has not been initialized")
def __init__(self): self.client = None insecure = Config.get("gprc_endpoint") secure = Config.get("gprc_endpoint") self.grpc_secure_endpoint = secure + ":50051" self.grpc_insecure_endpoint = insecure + ":50055"
def test_config_override(self): """ [XOS-Config] If an override is provided for the config, it should return the overridden value """ Config.init(sample_conf, "xos-config-schema.yaml", override_conf) res = Config.get("logging.level") self.assertEqual(res, "info") res = Config.get("database.password") self.assertEqual(res, "overridden_password")
def test_config_override(self): """ [XOS-Config] If an override is provided for the config, it should return the overridden value """ Config.init(sample_conf, "xos-config-schema.yaml", override_conf) res = Config.get("logging.level") self.assertEqual(res, "info") res = Config.get("database.password") self.assertEqual(res, "overridden_password")
def test_config_extend(self): """ [XOS-Config] If an override is provided for the config, it should return the overridden value (also if not defined in the base one) """ Config.init(sample_conf, "xos-config-schema.yaml", extend_conf) res = Config.get("xos_dir") self.assertEqual(res, "/opt/xos") res = Config.get("database.password") self.assertEqual(res, "safe")
def test_config_extend(self): """ [XOS-Config] If an override is provided for the config, it should return the overridden value (also if not defined in the base one) """ Config.init(sample_conf, "xos-config-schema.yaml", extend_conf) res = Config.get("xos_dir") self.assertEqual(res, "/opt/xos") res = Config.get("database.password") self.assertEqual(res, "safe")
def __init__(self,user=None,clientid=None): user = Config.get("feefie.client_user") try: clid = Config.get("feefie.client_id") except: clid = get_random_client_id() print "EventSender: no feefie_client_id configured. Using random id %s" % clid if fofum_enabled: self.fofum = Fofum(user=user) self.fofum.make(clid)
def __init__(self, user=None, clientid=None): user = Config.get("feefie.client_user") try: clid = Config.get("feefie.client_id") except: clid = get_random_client_id() print "EventSender: no feefie_client_id configured. Using random id %s" % clid if fofum_enabled: self.fofum = Fofum(user=user) self.fofum.make(clid)
def config_accessor(): accessor_kind = Config.get("accessor.kind") if accessor_kind == "testframework": config_accessor_mock() elif accessor_kind == "grpcapi": config_accessor_grpcapi() else: raise Exception("Unknown accessor kind %s" % accessor_kind) # now import any wrappers that the synchronizer needs to add to the ORM if Config.get("wrappers"): for wrapper_name in Config.get("wrappers"): importlib.import_module(wrapper_name)
def config_accessor(): accessor_kind = Config.get("accessor.kind") if accessor_kind == "testframework": config_accessor_mock() elif accessor_kind == "grpcapi": config_accessor_grpcapi() else: raise Exception("Unknown accessor kind %s" % accessor_kind) # now import any wrappers that the synchronizer needs to add to the ORM if Config.get("wrappers"): for wrapper_name in Config.get("wrappers"): importlib.import_module(wrapper_name)
def load_dependency_graph(self): try: if Config.get("dependency_graph"): self.log.debug( "Loading model dependency graph", path=Config.get("dependency_graph"), ) dep_graph_str = open(Config.get("dependency_graph")).read() else: self.log.debug("Using default model dependency graph", graph={}) dep_graph_str = "{}" # joint_dependencies is of the form { Model1 -> [(Model2, src_port, dst_port), ...] } # src_port is the field that accesses Model2 from Model1 # dst_port is the field that accesses Model1 from Model2 static_dependencies = json.loads(dep_graph_str) dynamic_dependencies = [ ] # Dropped Service and ServiceInstance dynamic dependencies joint_dependencies = dict(static_dependencies.items() + dynamic_dependencies) model_dependency_graph = DiGraph() for src_model, deps in joint_dependencies.items(): for dep in deps: dst_model, src_accessor, dst_accessor = dep if src_model != dst_model: edge_label = { "src_accessor": src_accessor, "dst_accessor": dst_accessor, } model_dependency_graph.add_edge( src_model, dst_model, edge_label) model_dependency_graph_rev = model_dependency_graph.reverse( copy=True) self.model_dependency_graph = { # deletion True: model_dependency_graph_rev, False: model_dependency_graph, } self.log.debug("Loaded dependencies", edges=model_dependency_graph.edges()) except Exception as e: self.log.exception("Error loading dependency graph", e=e) raise e
def load_dependency_graph(self): try: if Config.get("dependency_graph"): self.log.debug( "Loading model dependency graph", path=Config.get("dependency_graph"), ) dep_graph_str = open(Config.get("dependency_graph")).read() else: self.log.debug("Using default model dependency graph", graph={}) dep_graph_str = "{}" # joint_dependencies is of the form { Model1 -> [(Model2, src_port, dst_port), ...] } # src_port is the field that accesses Model2 from Model1 # dst_port is the field that accesses Model1 from Model2 static_dependencies = json.loads(dep_graph_str) dynamic_dependencies = ( [] ) # Dropped Service and ServiceInstance dynamic dependencies joint_dependencies = dict( list(static_dependencies.items()) + dynamic_dependencies ) model_dependency_graph = DiGraph() for src_model, deps in joint_dependencies.items(): for dep in deps: dst_model, src_accessor, dst_accessor = dep if src_model != dst_model: edge_label = { "src_accessor": src_accessor, "dst_accessor": dst_accessor, } model_dependency_graph.add_edge( src_model, dst_model, **edge_label ) model_dependency_graph_rev = model_dependency_graph.reverse(copy=True) self.model_dependency_graph = { # deletion True: model_dependency_graph_rev, False: model_dependency_graph, } self.log.debug("Loaded dependencies", edges=model_dependency_graph.edges()) except Exception as e: self.log.exception("Error loading dependency graph", e=e) raise e
def spawn_instance(self, name, key_name=None, availability_zone=None, hostname=None, image_id=None, security_group=None, pubkeys=[], nics=None, metadata=None, userdata=None, flavor_name=None): if not flavor_name: flavor_name = Config.get("nova.default_flavor") flavor = self.shell.nova.flavors.find(name=flavor_name) if not security_group: security_group = Config.get("nova.default_security_group") files = {} #if pubkeys: # files["/root/.ssh/authorized_keys"] = "\n".join(pubkeys).encode('base64') hints = {} # determine availability zone and compute host availability_zone_filter = None if availability_zone is None or not availability_zone: availability_zone_filter = 'nova' else: availability_zone_filter = availability_zone if hostname: availability_zone_filter += ':%s' % hostname server = self.shell.nova.servers.create( name=name, key_name=key_name, flavor=flavor.id, image=image_id, security_group=security_group, #files = files, scheduler_hints=hints, availability_zone=availability_zone_filter, nics=nics, networks=nics, meta=metadata, userdata=userdata) return server
def _test_get_child_level(self): """ [XOS-Config] Should return a child level param """ Config.init(sample_conf) res = Config.get("nested.parameter.for") self.assertEqual(res, "testing")
def test_get_missing_param(self): """ [XOS-Config] Should raise reading a missing param """ Config.init(sample_conf) res = Config.get("foo") self.assertEqual(res, None)
def test_get_default_val_for_missing_param(self): """ [XOS-Config] Should get the default value if nothing is specified """ Config.init(basic_conf) dir = Config.get("xos_dir") self.assertEqual(dir, "/opt/xos")
def setUp(self): global XOSKafkaThread, Config, log self.sys_path_save = sys.path self.cwd_save = os.getcwd() sys.path.append(xos_dir) sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base")) sys.path.append( os.path.join(xos_dir, "synchronizers", "new_base", "tests", "event_steps") ) config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, "synchronizer-config-schema.yaml") from synchronizers.new_base.mock_modelaccessor_build import ( build_mock_modelaccessor, ) build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[]) os.chdir(os.path.join(test_path, "..")) # config references tests/model-deps from event_engine import XOSKafkaThread, XOSEventEngine self.event_steps_dir = Config.get("event_steps_dir") self.event_engine = XOSEventEngine(log)
def _test_get_child_level(self): """ [XOS-Config] Should return a child level param """ Config.init(sample_conf) res = Config.get("nested.parameter.for") self.assertEqual(res, "testing")
def setUp(self): global mock_enumerator, event_loop self.sys_path_save = sys.path self.cwd_save = os.getcwd() sys.path.append(xos_dir) sys.path.append(os.path.join(xos_dir, 'synchronizers', 'new_base')) sys.path.append(os.path.join(xos_dir, 'synchronizers', 'new_base', 'tests', 'steps')) config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, 'synchronizer-config-schema.yaml') from synchronizers.new_base.mock_modelaccessor_build import build_mock_modelaccessor build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[]) os.chdir(os.path.join(test_path, '..')) # config references tests/model-deps import event_loop reload(event_loop) import backend reload(backend) from mock_modelaccessor import mock_enumerator from modelaccessor import model_accessor # import all class names to globals for (k, v) in model_accessor.all_model_classes.items(): globals()[k] = v b = backend.Backend() steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = event_loop.XOSObserver(self.steps)
def setUp(self): self.sys_path_save = sys.path self.cwd_save = os.getcwd() config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, "synchronizer-config-schema.yaml") from xossynchronizer.mock_modelaccessor_build import ( build_mock_modelaccessor, ) build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[]) os.chdir(os.path.join(test_path, "..")) # config references xos-synchronizer-tests/model-deps import xossynchronizer.event_loop reload(xossynchronizer.event_loop) import xossynchronizer.backend reload(xossynchronizer.backend) from xossynchronizer.modelaccessor import model_accessor # import all class names to globals for (k, v) in model_accessor.all_model_classes.items(): globals()[k] = v b = xossynchronizer.backend.Backend(model_accessor=model_accessor) steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
def test_get_child_level(self): """ [XOS-Config] Should return a child level param """ Config.init(sample_conf) res = Config.get("database.name") self.assertEqual(res, "xos")
def create_secure_client(self, username, password, arg): """ This method will check if this combination of username/password already has stored orm classes in RESOURCES, otherwise create them """ deferred = defer.Deferred() key = "%s~%s" % (username, password) if key in RESOURCES: reactor.callLater(0, deferred.callback, arg) else: local_cert = Config.get("local_cert") client = SecureClient( endpoint=self.grpc_secure_endpoint, username=username, password=password, cacert=local_cert, ) client.restart_on_disconnect = True # SecureClient is preceeded by an insecure client, so treat all secure clients as previously connected # See CORD-3152 client.was_connected = True client.set_reconnect_callback( functools.partial(self.setup_resources, client, key, deferred, arg)) client.start() return deferred
def setUp(self): global XOSKafkaThread, Config, log self.sys_path_save = sys.path self.cwd_save = os.getcwd() config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, "synchronizer-config-schema.yaml") from xossynchronizer.mock_modelaccessor_build import ( build_mock_modelaccessor, ) build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[]) from xossynchronizer.modelaccessor import model_accessor # The test config.yaml references files in `xos-synchronizer-tests/` so make sure we're in the parent # directory of the test directory. os.chdir(os.path.join(test_path, "..")) from xossynchronizer.event_engine import XOSKafkaThread, XOSEventEngine self.event_steps_dir = Config.get("event_steps_dir") self.event_engine = XOSEventEngine(model_accessor=model_accessor, log=log)
def setUp(self): b = backend.Backend() steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = event_loop.XOSObserver(self.steps) os.remove('/tmp/sync_ports') os.remove('/tmp/delete_ports')
def main(): log = create_logger(Config().get("logging")) models_active = False wait = False while not models_active: try: _ = Instance.objects.first() _ = NetworkTemplate.objects.first() models_active = True except Exception as e: log.exception("Exception", e=e) log.info("Waiting for data model to come up before starting...") time.sleep(10) wait = True if wait: time.sleep( 60 ) # Safety factor, seeing that we stumbled waiting for the data model to come up. # start model policies thread policies_dir = Config.get("model_policies_dir") XOSPolicyEngine(policies_dir=policies_dir, log=log).run()
def unload_models(client, reactor, version): # This function is called by a timer until it succeeds. log.info("unload_models initiated by timer") try: result = ModelLoadClient(client).unload_models( Config.get("name"), version=version, cleanup_behavior=ModelLoadClient.AUTOMATICALLY_CLEAN) log.debug("Unload response", result=result) if result.status in [result.SUCCESS, result.SUCCESS_NOTHING_CHANGED]: log.info("Models successfully unloaded. Exiting with status", code=0) sys.exit(0) if result.status == result.TRYAGAIN: log.info("TRYAGAIN received. Expect to try again in 30 seconds.") except Exception: # If the synchronizer is operational, then assume the ORM's restart_on_disconnect will deal with the # connection being lost. log.exception("Error while unloading. Expect to try again in 30 seconds.") Timer(30, functools.partial(unload_models, client, reactor, version)).start()
def update_diag(diag_class, loop_end=None, loop_start=None, syncrecord_start=None, sync_start=None, backend_status=None, backend_code=0): observer_name = Config.get("name") try: diag = diag_class.objects.filter(name=observer_name).first() if (not diag): if hasattr(diag_class.objects, "new"): # api style diag = diag_class.objects.new(name=observer_name) else: # django style diag = diag_class(name=observer_name) br_str = diag.backend_register if br_str: br = json.loads(br_str) else: br = {} if loop_end: br['last_run'] = loop_end if loop_end and loop_start: br['last_duration'] = loop_end - loop_start if syncrecord_start: br['last_syncrecord_start'] = syncrecord_start if sync_start: br['last_synchronizer_start'] = sync_start if backend_status: diag.backend_status = backend_status diag.backend_register = json.dumps(br) diag.save() except: log.exception("Exception in update_diag") traceback.print_exc()
def test_get_default_val_for_missing_param(self): """ [XOS-Config] Should get the default value if nothing is specified """ Config.init(basic_conf) dir = Config.get("xos_dir") self.assertEqual(dir, "/opt/xos")
def setUp(self): self.sys_path_save = sys.path self.cwd_save = os.getcwd() config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, "synchronizer-config-schema.yaml") from xossynchronizer.mock_modelaccessor_build import ( build_mock_modelaccessor, ) build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[]) # The test config.yaml references files in `xos-synchronizer-tests/` so make sure we're in the parent # directory of the test directory. os.chdir(os.path.join(test_path, "..")) import xossynchronizer.event_loop reload(xossynchronizer.event_loop) import xossynchronizer.backend reload(xossynchronizer.backend) from xossynchronizer.modelaccessor import model_accessor b = xossynchronizer.backend.Backend(model_accessor=model_accessor) steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = xossynchronizer.event_loop.XOSObserver( self.steps, model_accessor)
def setUp(self): b = backend.Backend() steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = event_loop.XOSObserver(self.steps) os.remove('/tmp/sync_ports') os.remove('/tmp/delete_ports')
def setUp(self): self.sys_path_save = sys.path self.cwd_save = os.getcwd() config = os.path.join(test_path, "test_config.yaml") from xosconfig import Config Config.clear() Config.init(config, "synchronizer-config-schema.yaml") from xossynchronizer.mock_modelaccessor_build import ( build_mock_modelaccessor, ) build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[]) # The test config.yaml references files in `xos-synchronizer-tests/` so make sure we're in the parent # directory of the test directory. os.chdir(os.path.join(test_path, "..")) import xossynchronizer.event_loop reload(xossynchronizer.event_loop) import xossynchronizer.backend reload(xossynchronizer.backend) from xossynchronizer.modelaccessor import model_accessor b = xossynchronizer.backend.Backend(model_accessor=model_accessor) steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
def run_playbook(ansible_hosts, ansible_config, fqp, opts): args = {"ansible_hosts": ansible_hosts, "ansible_config": ansible_config, "fqp": fqp, "opts": opts, "config_file": Config.get_config_file()} keep_temp_files = Config.get("keep_temp_files") dir = tempfile.mkdtemp() args_fn = None result_fn = None try: log.info("creating args file",dir = dir) args_fn = os.path.join(dir, "args") result_fn = os.path.join(dir, "result") open(args_fn, "w").write(pickle.dumps(args)) ansible_main_fn = os.path.join(os.path.dirname(__file__), "ansible_main.py") os.system("python %s %s %s" % (ansible_main_fn, args_fn, result_fn)) result = pickle.loads(open(result_fn).read()) if hasattr(result, "exception"): log.error("Exception in playbook",exception = result["exception"]) stats = result.get("stats", None) aresults = result.get("aresults", None) except Exception,e: log.exception("Exception running ansible_main") stats = None aresults = None
def unload_models(client, reactor, version): # This function is called by a timer until it succeeds. log.info("unload_models initiated by timer") try: result = ModelLoadClient(client).unload_models( Config.get("name"), version=version, cleanup_behavior=ModelLoadClient.AUTOMATICALLY_CLEAN) log.debug("Unload response", result=result) if result.status in [result.SUCCESS, result.SUCCESS_NOTHING_CHANGED]: log.info("Models successfully unloaded. Exiting with status", code=0) sys.exit(0) if result.status == result.TRYAGAIN: log.info("TRYAGAIN received. Expect to try again in 30 seconds.") except Exception as e: # If the synchronizer is operational, then assume the ORM's restart_on_disconnect will deal with the # connection being lost. log.exception( "Error while unloading. Expect to try again in 30 seconds.") Timer(30, functools.partial(unload_models, client, reactor, version)).start()
def load_dependency_graph(self): dep_path = Config.get("dependency_graph") self.log.info('Loading model dependency graph', path = dep_path) try: dep_graph_str = open(dep_path).read() # joint_dependencies is of the form { Model1 -> [(Model2, src_port, dst_port), ...] } # src_port is the field that accesses Model2 from Model1 # dst_port is the field that accesses Model1 from Model2 joint_dependencies = json.loads(dep_graph_str) model_dependency_graph = DiGraph() for src_model, deps in joint_dependencies.items(): for dep in deps: dst_model, src_accessor, dst_accessor = dep if src_model != dst_model: edge_label = {'src_accessor': src_accessor, 'dst_accessor': dst_accessor} model_dependency_graph.add_edge( src_model, dst_model, edge_label) model_dependency_graph_rev = model_dependency_graph.reverse( copy=True) self.model_dependency_graph = { # deletion True: model_dependency_graph_rev, False: model_dependency_graph } self.log.info("Loaded dependencies", edges = model_dependency_graph.edges()) except Exception as e: self.log.exception("Error loading dependency graph", e = e) raise e
def test_get_child_level(self): """ [XOS-Config] Should return a child level param """ Config.init(sample_conf) res = Config.get("database.name") self.assertEqual(res, "xos")
def test_get_missing_param(self): """ [XOS-Config] Should return None reading a missing param """ Config.init(sample_conf) res = Config.get("foo") self.assertEqual(res, None)
def __init__(self, username=None, password=None, tenant=None, url=None, token=None, endpoint=None, controller=None, cacert=None, admin=True, *args, **kwds): self.has_openstack = has_openstack self.url = controller.auth_url if admin: self.username = controller.admin_user self.password = controller.admin_password self.tenant = controller.admin_tenant else: self.username = None self.password = None self.tenant = None if username: self.username = username if password: self.password = password if tenant: self.tenant = tenant if url: self.url = url if token: self.token = token if endpoint: self.endpoint = endpoint if cacert: self.cacert = cacert else: self.cacert = Config.get("nova.ca_ssl_cert")
def run(self): # This is our unique client id, to be used when firing and receiving events # It needs to be generated once and placed in the config file user = Config.get("feefie.client_user") try: clid = Config.get("feefie.client_id") except: clid = get_random_client_id() print "EventListener: no feefie_client_id configured. Using random id %s" % clid if fofum_enabled: f = Fofum(user=user) listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event)) listener_thread.start()
def run(self): observer_thread = None watcher_thread = None model_policy_thread = None model_accessor.update_diag(sync_start=time.time(), backend_status="Synchronizer Start") steps_dir = Config.get("steps_dir") if steps_dir: sync_steps = self.load_sync_step_modules(steps_dir) if sync_steps: # start the observer observer = XOSObserver(sync_steps, log = self.log) observer_thread = threading.Thread(target=observer.run,name='synchronizer') observer_thread.start() # start the watcher thread if (watchers_enabled): watcher = XOSWatcher(sync_steps) watcher_thread = threading.Thread(target=watcher.run,name='watcher') watcher_thread.start() else: self.log.info("Skipping observer and watcher threads due to no steps dir.") # start model policies thread policies_dir = Config.get("model_policies_dir") if policies_dir: policy_engine = XOSPolicyEngine(policies_dir=policies_dir, log = self.log) model_policy_thread = threading.Thread(target=policy_engine.run, name="policy_engine") model_policy_thread.start() else: self.log.info("Skipping model policies thread due to no model_policies dir.") while True: try: time.sleep(1000) except KeyboardInterrupt: print "exiting due to keyboard interrupt" # TODO: See about setting the threads as daemons if observer_thread: observer_thread._Thread__stop() if watcher_thread: watcher_thread._Thread__stop() if model_policy_thread: model_policy_thread._Thread__stop() sys.exit(1)
def extract_context(self, cur): try: observer_name = Config.get("name") cur['synchronizer_name'] = observer_name except: pass self.sanitize_extra_args(cur) return cur
def config_accessor(): accessor_kind = Config.get("accessor.kind") if accessor_kind == "testframework": pass elif accessor_kind == "grpcapi": config_accessor_grpcapi() else: raise Exception("Unknown accessor kind %s" % accessor_kind)
def test_get_first_level(self): """ [XOS-Config] Should return a first level param """ Config.init(sample_conf) # NOTE we are using Config2 here to be sure that the configuration is readable from any import, # not only from the one that has been used to initialize it res = Config2.get("database") self.assertEqual(res, {"name": "xos", "username": "******", "password": "******"})
def config_accessor(): accessor_kind = Config.get("accessor.kind") if accessor_kind == "testframework": pass elif accessor_kind == "grpcapi": config_accessor_grpcapi() else: raise Exception("Unknown accessor kind %s" % accessor_kind)
def __init__(self, sync_steps): # The Condition object that gets signalled by Feefie events self.step_lookup = {} self.sync_steps = sync_steps self.load_sync_steps() self.event_cond = threading.Condition() self.driver = DRIVER self.observer_name = Config.get("name")
def extract_context(self, cur): try: observer_name = Config.get("name") cur['synchronizer_name'] = observer_name except: pass self.sanitize_extra_args(cur) return cur
def admin_driver(self, tenant=None, controller=None): if isinstance(controller, int): controller = Controller.objects.get(id=controller.id) if not tenant: tenant = controller.admin_tenant client = OpenStackClient(tenant=tenant, controller=controller, cacert=Config.get("nova.ca_ssl_cert")) driver = OpenStackDriver(client=client) driver.admin_user = client.keystone.users.find(name=controller.admin_user) driver.controller = controller return driver
def __init__(self, sync_steps, log = log): # The Condition object via which events are received self.log = log self.step_lookup = {} self.sync_steps = sync_steps self.load_sync_steps() self.load_dependency_graph() self.event_cond = threading.Condition() self.driver = DRIVER self.observer_name = Config.get("name")
def wait_for_database(): while True: db_user = Config.get("database.username") db_password = Config.get("database.password") db_host = "xos-db" # TODO: this should be configurable db_port = 5432 # TODO: this should be configurable try: myConnection = psycopg2.connect( host=db_host, port=db_port, user=db_user, password=db_password ) myConnection.close() # Exit on successful connection print("Database is available") return except BaseException: traceback.print_exc("Exception while connecting to db") time.sleep(1)
def test_get_default_val_for_missing_param(self): """ [XOS-Config] Should get the default value if nothing is specified """ Config.init(basic_conf) log = Config.get("logging") self.assertEqual(log, { "level": "info", "channels": ["file", "console"], "logstash_hostport": "cordloghost:5617", "file": "/var/log/xos.log", })
def setUp(self): # self.policy = TenantWithContainerPolicy() # self.user = User(email="*****@*****.**") # self.tenant = Tenant(creator=self.user) # self.flavor = Flavor(name="m1.small") # model_policy_tenantwithcontainer.Instance = Instance # model_policy_tenantwithcontainer.Flavor = Flavor b = backend.Backend() steps_dir = Config.get("steps_dir") self.steps = b.load_sync_step_modules(steps_dir) self.synchronizer = event_loop.XOSObserver(self.steps)
def upload_models(self, name, dir, version="unknown"): request = self.api.dynamicload_pb2.LoadModelsRequest(name=name, version=version) for fn in os.listdir(dir): if fn.endswith(".xproto"): item = request.xprotos.add() item.filename = fn item.contents = open(os.path.join(dir, fn)).read() models_fn = os.path.join(dir, "models.py") if os.path.exists(models_fn): item = request.decls.add() item.filename = "models.py" item.contents = open(models_fn).read() attic_dir = os.path.join(dir, "attic") if os.path.exists(attic_dir): log.warn( "Attics are deprecated, please use the legacy=True option in xProto" ) for fn in os.listdir(attic_dir): if fn.endswith(".py"): item = request.attics.add() item.filename = fn item.contents = open(os.path.join(attic_dir, fn)).read() api_convenience_dir = os.path.join(dir, "convenience") if os.path.exists(api_convenience_dir): for fn in os.listdir(api_convenience_dir): if fn.endswith(".py") and "test" not in fn: item = request.convenience_methods.add() item.filename = fn item.contents = open(os.path.join(api_convenience_dir, fn)).read() # migrations directory is a sibling to the models directory migrations_dir = os.path.join(dir, "..", "migrations") if os.path.exists(migrations_dir): for fn in os.listdir(migrations_dir): if fn.endswith(".py") and "test" not in fn: item = request.migrations.add() item.filename = fn item.contents = open(os.path.join(migrations_dir, fn)).read() # loading core requested version from synchronizer config core_version = Config.get("core_version") if core_version is None: log.warn("Core version is not set in the config file") request.core_version = core_version result = self.api.dynamicload.LoadModels(request) return result