def main(): """ Broker entry function """ try: # Uncomment when testing as app running Globals.config_file = './test.yaml' Constants.SUPERBLOCK_LOCATION = './state_recovery.lock' Constants.MAINTENANCE_LOCATION = './maintenance.lock' with GracefulInterruptHandler() as h: GlobalsSingleton.get().start(force_fresh=False) runtime_config = GlobalsSingleton.get().get_config( ).get_runtime_config() # prometheus server prometheus_port = int( runtime_config.get( Constants.PROPERTY_CONF_PROMETHEUS_REST_PORT, None)) prometheus_client.start_http_server(prometheus_port) while True: time.sleep(0.0001) BrokerKernelSingleton.get().do_periodic() if h.interrupted: GlobalsSingleton.get().stop() except Exception: traceback.print_exc()
def validate(self) -> dict: """ Validate the token @raise Exception in case of error """ try: # validate the token from fabric_cf.actor.core.container.globals import GlobalsSingleton jwt_validator = GlobalsSingleton.get().get_jwt_validator() verify_exp = GlobalsSingleton.get().get_config().get_oauth_config().get( Constants.PROPERTY_CONF_O_AUTH_VERIFY_EXP, True) if jwt_validator is not None: self.logger.info("Validating CI Logon token") code, token_or_exception = jwt_validator.validate_jwt(token=self.encoded_token, verify_exp=verify_exp) if code is not ValidateCode.VALID: self.logger.error(f"Unable to validate provided token: {code}/{token_or_exception}") raise TokenException(f"Unable to validate provided token: {code}/{token_or_exception}") else: raise TokenException("JWT Token validator not initialized, skipping validation") self.decoded_token = token_or_exception self.logger.debug(json.dumps(self.decoded_token)) return self.decoded_token except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error("Exception occurred while validating the token e: {}".format(e)) raise e
def cancel_timer(self): """ Cancel a timer if started """ if self.timer is not None: from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().timer_scheduler.cancel(self.timer)
def test_create(self): ap = AnsibleHandlerProcessor() from fabric_cf.actor.core.container.globals import GlobalsSingleton ap.set_plugin(plugin=GlobalsSingleton.get().get_container().get_actor().get_plugin()) ap.set_logger(logger=GlobalsSingleton.get().get_logger()) mapping = ConfigurationMapping() mapping.set_class_name(class_name="NoOpHandler") mapping.set_module_name(module_name="fabric_cf.actor.handlers.no_op_handler") mapping.set_key(key=NodeType.VM.name) ap.add_config_mapping(mapping=mapping) ap.start() u1 = self.create_sliver(name="n1", pci_address="0000:25:00.0", gpu_name="gpu1") ap.create(unit=u1) u2 = self.create_sliver(name="n2", pci_address="0000:81:00.0", gpu_name="gpu2") ap.create(unit=u2) time.sleep(30) self.assertIsNotNone(u1.sliver.label_allocations.instance) self.assertIsNotNone(u1.sliver.management_ip) self.assertIsNotNone(u2.sliver.label_allocations.instance) self.assertIsNotNone(u2.sliver.management_ip) ap.shutdown()
def recovery_ended(self): """ Recovery ended """ self.plugin.recovery_ended() self.policy.recovery_ended() from fabric_cf.actor.core.container.globals import GlobalsSingleton if GlobalsSingleton.get().can_reload_model(): GlobalsSingleton.get().delete_reload_model_state_file()
def __init__(self): from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() self.broker = GlobalsSingleton.get().get_container().get_actor() self.producer = GlobalsSingleton.get().get_simple_kafka_producer() self.kafka_topic = GlobalsSingleton.get().get_config( ).get_global_config().get_bqm_config().get(Constants.KAFKA_TOPIC, None) self.publish_interval = GlobalsSingleton.get().get_config( ).get_global_config().get_bqm_config().get(Constants.PUBLISH_INTERVAL, None) self.last_query_time = None
def register_actor(self): """ Register actor @raises ConfigurationException in case of error """ try: from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().get_container().register_actor( actor=self.actor) except Exception as e: raise ConfigurationException( f"Could not register actor: {self.actor.get_name()} {e}")
def __setstate__(self, state): self.__dict__.update(state) from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() self.resource_delegation_factory = None self.actor = None self.initialized = False
def recover_proxy(*, proxy_reload_from_db: ABCProxy, register: bool) -> ABCProxy: """ Creates a proxy list from a properties list representing the serialization of the proxy. Optionally, the resulting object may be registered with the ActorRegistry so that it becomes visible to the rest of the system. @param proxy_reload_from_db proxy_reload_from_db @param register If true, the resulting proxy is registered with the container's ActorRegistry @return Proxy @throws Exception in case of error """ from fabric_cf.actor.core.container.globals import GlobalsSingleton proxy_reload_from_db.set_logger( logger=GlobalsSingleton.get().get_logger()) if register: if proxy_reload_from_db.callback: ActorRegistrySingleton.get().register_callback( callback=proxy_reload_from_db) else: ActorRegistrySingleton.get().register_proxy( proxy=proxy_reload_from_db) return proxy_reload_from_db
def check_access(*, action_id: ActionId, resource_type: ResourceType, token: str, resource_id: str = None, logger=None, actor_type: ActorType) -> FabricToken: """ Check access for Incoming operation against Policy Decision Point PDP :param action_id action id :param resource_type resource type :param token fabric token :param resource_id resource id :param logger logger :param actor_type actor type :returns decoded fabric token on success; throws exception in case of failure """ from fabric_cf.actor.core.container.globals import GlobalsSingleton pdp_config = GlobalsSingleton.get().get_config().get_global_config().get_pdp_config() fabric_token = FabricToken(logger=logger, token=token) fabric_token.validate() pdp_auth = PdpAuth(config=pdp_config, logger=logger) pdp_auth.check_access(fabric_token=fabric_token.get_decoded_token(), actor_type=actor_type, action_id=action_id, resource_type=resource_type, resource_id=resource_id) return fabric_token
def make_actor_instance(*, actor_config: ActorConfig) -> ABCActorMixin: """ Creates Actor instance @param actor_config actor config @raises ConfigurationException in case of error """ actor_type = ActorType.get_actor_type_from_string( actor_type=actor_config.get_type()) actor = None if actor_type == ActorType.Orchestrator: actor = Controller() elif actor_type == ActorType.Broker: actor = Broker() elif actor_type == ActorType.Authority: actor = Authority() else: raise ConfigurationException( f"Unsupported actor type: {actor_type}") actor_guid = ID() if actor_config.get_guid() is not None: actor_guid = ID(uid=actor_config.get_guid()) auth_token = AuthToken(name=actor_config.get_name(), guid=actor_guid) actor.set_identity(token=auth_token) if actor_config.get_description() is not None: actor.set_description(description=actor_config.get_description()) from fabric_cf.actor.core.container.globals import GlobalsSingleton actor.set_actor_clock( clock=GlobalsSingleton.get().get_container().get_actor_clock()) return actor
def __init__(self): self.controller_state = OrchestratorKernelSingleton.get() from fabric_cf.actor.core.container.globals import GlobalsSingleton self.globals = GlobalsSingleton.get() self.logger = self.globals.get_logger() self.jwks_url = self.globals.get_config().get_oauth_config().get(Constants.PROPERTY_CONF_O_AUTH_JWKS_URL, None) self.pdp_config = self.globals.get_config().get_global_config().get_pdp_config()
def get_local_container(*, caller: AuthToken): from fabric_cf.actor.core.container.globals import GlobalsSingleton manager = GlobalsSingleton.get().get_container( ).get_management_object_manager().get_management_object(key=ID( uid=Constants.CONTAINER_MANAGMENT_OBJECT_ID)) proxy = LocalContainer(manager=manager, auth=caller) return proxy
def process_neo4j(self, substrate_file: str, actor_name: str) -> Dict: """ Create ARM and Inventory Slices """ from fabric_cf.actor.core.container.globals import GlobalsSingleton self.container = GlobalsSingleton.get().get_container() result = self.substrate.get_inventory_slice_manager().create_inventory_slice( slice_id=ID(), name=actor_name, rtype=ResourceType(resource_type=Constants.PROPERTY_AGGREGATE_RESOURCE_MODEL)) if result.code != InventorySliceManagerError.ErrorNone: raise AggregateResourceModelCreatorException(f"Could not create ARM: {actor_name}. error={result.code}") self.logger.debug(f"Created aggregate manager resource slice# {result.slice}") if result.slice.get_graph_id() is not None: # load the graph from Neo4j database self.logger.debug(f"Reloading an existing graph for resource slice# {result.slice}") self.arm_graph = FimHelper.get_arm_graph(graph_id=result.slice.get_graph_id()) result.slice.set_graph(graph=self.arm_graph) else: self.arm_graph = FimHelper.get_arm_graph_from_file(filename=substrate_file) result.slice.set_graph(graph=self.arm_graph) self.substrate.get_inventory_slice_manager().update_inventory_slice(slice_obj=result.slice) self.logger.debug(f"Created new graph for resource slice# {result.slice}") for r in self.resources.values(): self.logger.debug(f"Registering resource_handler for resource_type: {r.get_resource_type_label()} " f"for Actor {actor_name}") self.register_handler(resource_config=r) return self.arm_graph.generate_adms()
def new_proxy(self, *, identity: ABCActorIdentity, location: ActorLocation, proxy_type: str = None) -> ABCProxy: result = None actor = ActorRegistrySingleton.get().get_actor( actor_name_or_guid=identity.get_name()) if actor is not None: descriptor = location.get_descriptor() if descriptor is not None and descriptor.get_location( ) is not None: if isinstance(actor, ABCAuthority): result = KafkaAuthorityProxy( kafka_topic=descriptor.get_location(), identity=actor.get_identity(), logger=actor.get_logger()) elif isinstance(actor, ABCBrokerMixin): result = KafkaBrokerProxy( kafka_topic=descriptor.get_location(), identity=actor.get_identity(), logger=actor.get_logger()) else: kafka_topic = location.get_location() if proxy_type is not None: from fabric_cf.actor.core.container.globals import GlobalsSingleton if proxy_type.lower() == ActorType.Authority.name.lower(): result = KafkaAuthorityProxy( kafka_topic=kafka_topic, identity=identity.get_identity(), logger=GlobalsSingleton.get().get_logger()) elif proxy_type.lower() == ActorType.Broker.name.lower(): result = KafkaBrokerProxy( kafka_topic=kafka_topic, identity=identity.get_identity(), logger=GlobalsSingleton.get().get_logger()) else: raise ProxyException( "Unsupported proxy type: {}".format(proxy_type)) else: raise ProxyException( Constants.NOT_SPECIFIED_PREFIX.format("proxy type")) return result
def __init__(self, *, substrate: AuthoritySubstrate = None, resources: dict = None, neo4j_config: dict = None): self.substrate = substrate self.resources = resources self.neo4j_config = neo4j_config self.container = None from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() self.arm_graph = None
def initialize(self, *, config: Configuration): """ Initialize container and actor """ if config is None: raise ContainerException("handlers cannot be null") with self.container_lock: if self.state != ContainerState.Unknown: raise ContainerException(f"Cannot initialize container in state: {self.state}") self.state = ContainerState.Starting self.config = config failed = False try: self.determine_boot_mode() self.create_database() self.boot() if self.is_fresh(): try: from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().cleanup_neo4j() from fabric_cf.actor.boot.configuration_loader import ConfigurationLoader loader = ConfigurationLoader() loader.process(config=self.config) except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(f"Failed to instantiate actors {e}") self.logger.error("This container may need to be restored to a clean state") raise e # Create State file only after successful fresh boot self.create_super_block() except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(e) failed = True raise e finally: with self.container_lock: if failed: self.state = ContainerState.Failed else: self.state = ContainerState.Started
def __init__(self): self.type_id = None self.proxies = None self.id = ID() from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() self.initialized = False self.serial = None
def start_actor(self): self.logger.debug("Starting the Actor") from fabric_cf.actor.core.container.globals import GlobalsSingleton producer = GlobalsSingleton.get().get_kafka_producer_with_poller(actor=self.actor) from fabric_cf.actor.core.kernel.rpc_manager_singleton import RPCManagerSingleton RPCManagerSingleton.get().set_producer(producer=producer) RPCManagerSingleton.get().start() self.actor.start() self.logger.debug(f"Actor {self.actor.get_name()} started")
def setup_kafka_producer(self): try: from fabric_cf.actor.core.container.globals import GlobalsSingleton self.producer = GlobalsSingleton.get().get_kafka_producer() except Exception as e: self.logger.error(traceback.format_exc()) self.logger.eror( "Exception occurred while loading schemas {}".format(e)) raise e
def get_networkx_importer() -> ABCGraphImporter: """ get fim graph importer :return: Neo4jGraphImporter """ from fabric_cf.actor.core.container.globals import GlobalsSingleton logger = GlobalsSingleton.get().get_logger() return NetworkXGraphImporter(logger=logger)
def get_neo4j_importer() -> ABCGraphImporter: """ get fim graph importer :return: Neo4jGraphImporter """ from fabric_cf.actor.core.container.globals import GlobalsSingleton neo4j_config = GlobalsSingleton.get().get_config().get_global_config( ).get_neo4j_config() logger = GlobalsSingleton.get().get_logger() neo4j_graph_importer = Neo4jGraphImporter( url=neo4j_config["url"], user=neo4j_config["user"], pswd=neo4j_config["pass"], import_host_dir=neo4j_config["import_host_dir"], import_dir=neo4j_config["import_dir"], logger=logger) return neo4j_graph_importer
class SubstrateTestBase(BaseTestCase, unittest.TestCase): from fabric_cf.actor.core.container.globals import Globals Globals.config_file = "./config/config.test.yaml" Constants.SUPERBLOCK_LOCATION = './state_recovery.lock' from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().start(force_fresh=True) while not GlobalsSingleton.get().start_completed: time.sleep(0.0001) def make_actor_database(self) -> ABCDatabase: db = SubstrateActorDatabase(user=self.db_user, password=self.db_pwd, database=self.db_name, db_host=self.db_host, logger=self.logger) return db def test_actor(self): actor = self.get_actor() self.assertIsNotNone(actor)
def get_logger(self): """ Get logger :return: """ if self.logger is None: from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() return self.logger
def initialize(self): from fabric_cf.actor.core.container.globals import GlobalsSingleton if not self.initialized: if self.identity is None or self.plugin is None or self.policy is None: raise ActorException( f"The actor is not properly created: identity: {self.identity} " f"plugin: {self.plugin} policy: {self.policy}") if self.name is None: self.name = self.identity.get_name() if self.name is None: raise ActorException( "The actor is not properly created: no name") if self.clock is None: self.clock = GlobalsSingleton.get().get_container( ).get_actor_clock() if self.clock is None: raise ActorException( "The actor is not properly created: no clock") if self.logger is None: self.logger = GlobalsSingleton.get().get_logger() self.plugin.set_actor(actor=self) self.plugin.set_logger(logger=self.logger) self.plugin.initialize() self.policy.set_actor(actor=self) self.policy.initialize() self.policy.set_logger(logger=self.logger) self.wrapper = KernelWrapper(actor=self, plugin=self.plugin, policy=self.policy) self.current_cycle = -1 self.setup_message_service() self.initialized = True
def __init__(self, *, config: Configuration): from fabric_cf.actor.core.container.globals import GlobalsSingleton self.logger = GlobalsSingleton.get().get_logger() self.config = config self.actor = None self.to_export = [] self.to_advertise = [] self.resources = {} self.controls = None self.aggregate_delegation_models = None
def main(): """ Authority entry function """ try: with GracefulInterruptHandler() as h: GlobalsSingleton.get().start(force_fresh=False) runtime_config = GlobalsSingleton.get().get_config().get_runtime_config() # prometheus server prometheus_port = int(runtime_config.get(Constants.PROPERTY_CONF_PROMETHEUS_REST_PORT, None)) prometheus_client.start_http_server(prometheus_port) while True: time.sleep(0.0001) if h.interrupted: GlobalsSingleton.get().stop() except Exception: traceback.print_exc()
class ActorDatabaseTest(BaseTestCase, unittest.TestCase): from fabric_cf.actor.core.container.globals import Globals Globals.config_file = "./config/config.test.yaml" Constants.SUPERBLOCK_LOCATION = './state_recovery.lock' from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().start(force_fresh=True) while not GlobalsSingleton.get().start_completed: time.sleep(0.0001) def get_clean_database(self) -> ABCDatabase: db = self.get_actor_database() db.set_actor_name(name=self.actor_name) db.set_reset_state(state=True) db.initialize() return db def test_a_create(self): self.get_clean_database() def prepare_actor_database(self): container_db = self.get_container_database() actor = self.get_actor() container_db.remove_actor(actor_name=actor.get_name()) container_db.add_actor(actor=actor) actor.actor_added() return actor def test_b_create_2(self): self.prepare_actor_database() def get_database_to_test(self): actor = self.prepare_actor_database() return actor.get_plugin().get_database() def test_c_add_slice(self): db = self.get_database_to_test() slice_obj = SliceFactory.create(slice_id=ID(), name="slice_to_add") self.assertEqual("slice_to_add", slice_obj.get_name()) db.add_slice(slice_object=slice_obj) slice2 = db.get_slice(slice_id=slice_obj.get_slice_id()) self.assertIsNotNone(slice2) db.remove_slice(slice_id=slice_obj.get_slice_id())
def add_pending_request(self, *, guid: ID, request: RPCRequest): try: self.pending_lock.acquire() from fabric_cf.actor.core.container.globals import GlobalsSingleton logger = GlobalsSingleton.get().get_logger() logger.debug("Added request with rid: {}".format(guid)) self.pending[guid] = request logger.debug("Pending Queue: {}".format(self.pending)) finally: self.pending_lock.release()
def recover_broker_slice(self, *, slice_obj: ABCSlice): """ Recover broker slice at the AM, do the following if the model.reload file is detected - Close the existing delegations - Create the new delegations from the reloaded ARM - Add the delegations to the Broker Slice @param slice_obj Slice object """ if self.get_type() != ActorType.Authority: return False if not slice_obj.is_broker_client(): return False from fabric_cf.actor.core.container.globals import GlobalsSingleton if not GlobalsSingleton.get().can_reload_model(): return False self.logger.info( f"Closing old delegations and adding new delegations to the slice: {slice_obj}!" ) delegation_names = [] try: delegations = self.plugin.get_database( ).get_delegations_by_slice_id(slice_id=slice_obj.get_slice_id()) except Exception as e: self.logger.error(e) raise ActorException( f"Could not fetch delegations records for slice {slice_obj} from database" ) for d in delegations: self.logger.info(f"Closing delegation: {d}!") d.set_graph(graph=None) d.transition(prefix="closed as part of recovers", state=DelegationState.Closed) delegation_names.append(d.get_delegation_name()) self.plugin.get_database().update_delegation(delegation=d) adms = self.policy.aggregate_resource_model.generate_adms() # Create new delegations and add to the broker slice; they will be re-registered with the policy in the recovery for name in delegation_names: new_delegation_graph = adms.get(name) dlg_obj = DelegationFactory.create( did=new_delegation_graph.get_graph_id(), slice_id=slice_obj.get_slice_id(), delegation_name=name) dlg_obj.set_slice_object(slice_object=slice_obj) dlg_obj.set_graph(graph=new_delegation_graph) dlg_obj.transition(prefix="Reload Model", state=DelegationState.Delegated) self.plugin.get_database().add_delegation(delegation=dlg_obj)