def _load_resources(self, scope): """ Load all registered resources """ entities = resource.get_entity_resources() for entity in entities: instances = self._get_instances_of_types([entity]) for instance in instances[entity]: self.add_resource(Resource.create_from_model(self, entity, instance)) Resource.convert_requires()
def update_resource(self, resource_data): """ Update a resource. Broadcast it on the bus and store the update in the database. """ ds = DataStore.instance() resource = R.deserialize(resource_data) version = Version(resource.id) version.data = resource_data version.save() if not ds.contains(Resource, version.resource_id): res = Resource(version.resource_id) res.save() if not ds.contains(Agent, res.agent_name): agent = Agent(res.agent_name) agent.save() # broadcast topic = "%s.%s" % (resource.id.agent_name, resource.id.entity_type) msg = amqp.Message(json.dumps({"operation" : "UPDATE", "resource": resource_data})) msg.content_type = "application/json" self._channel.basic_publish(msg, exchange = self._exchange_name, routing_key = "resources.%s" % topic)
def run(self, scope, offline = False): """ Run the export functions """ self._offline = offline self._scope = scope self._version = int(time.time()) Resource.clear_cache() # first run other export plugins self._run_export_plugins() # then process the configuration model to submit it to the mgmt server self._load_resources(scope) # call dependency managers self._call_dep_manager(scope) # filter out any resource that belong to hosts that have unknown values for res_id in list(self._resources.keys()): host = self._resource_to_host[res_id] if host in self._unknown_hosts: del self._resources[res_id] if len(self._unknown_hosts) > 0: LOGGER.info("The configuration of the following hosts is not exported due to unknown configuration parameters:") hosts = sorted(list(self._unknown_hosts)) for host in hosts: LOGGER.info(" - %s" % host) # validate the dependency graph self._validate_graph() json_data = self.resources_to_json() if self.options and self.options.json: with open(self.options.json, "wb+") as fd: fd.write(json_data) else: self.deploy_code(self._version) if len(self._resources) > 0 and not offline: self.commit_resources(self._version, json_data) LOGGER.info("Committed resources with version %d" % self._version) return json_data
def setup_stack_deps(stack): for srv in stack['srv']: srv = Resource.get_resource(srv.value) for pkg in stack['pkg']: pkg = Resource.get_resource(pkg.value) print(pkg) if pkg.id not in srv.requires: srv.requires.add(pkg.id) print(pkg.id + " toegevoegd aan " + srv) for cfg in stack['cfg']: cfg = Resource.get_resource(cfg.value) if cfg.id not in srv.requires: srv.requires.add(cfg.id) print(cfg.id + " toegevoegd aan " + srv) for cfg in stack[cfg]: cfg = Resource.get_resource(cfg.value) for pkg in stack['pkg']: pkg = Resource.get_resource(pkg.value) if pkg.id not in cfg.requires: cfg.requires.add(pkg.id) print(pkg.id + " toegevoegd aan " + cfg)
def deploy(config, root_scope, remote = None, dry_run = True, ip_address = None): deploy_host = None all_names = [] if remote is None: hostname = socket.gethostname() else: hostname = remote print("Deploying on %s (dry-run = %s)" % (hostname, dry_run)) try: servers = root_scope.get_variable("Host", ["std"]).value for server in servers: all_names.append(server.name) if server.name == hostname: deploy_host = server except Exception: print("The std module is not loaded or does not contain the definition of Host") return if deploy_host is None: print("Unable to find a host to deploy on the current machine %s" % hostname) print("Host found in model: " + ", ".join(all_names)) return export = Exporter(config) json_data = export.run(root_scope, offline = True) files = export.get_offline_files() if remote is not None: ip_address = remote agent = Agent(config, False, hostname, offline = True, deploy = not dry_run, remote = ip_address) agent._offline_files = files host_id = "[%s," % deploy_host.name for item in json.loads(json_data.decode("utf-8")): if host_id in item["id"]: agent.update(Resource.deserialize(item)) if agent._queue.size() == 0: print("No configuration found for host %s" % hostname) return print("Deploying config") agent.deploy_config() #agent.close()
def dir_before_file(model, resources): """ If a file is defined on a host, then make the file depend on its parent directory """ # loop over all resources to find files for _id, resource in resources.items(): res_class = resource.model.__class__ if resource.model.__module__ == "std" and res_class.__name__ == "File": model = resource.model host = model.host for dir in host.directories: dir_res = Resource.get_resource(dir) if dir_res is not None and os.path.dirname(resource.path) == dir_res.path: #Make the File resource require the directory resource.requires.add(dir_res.id)
def file_before_service(model, resources): """ If a service is defined on a host, then make the service depend on all packages A brute force way of defining pkg->srv dependencies. Optional better way: 1. somehow ask the package manager what service a package installs 2. see if that service is described in the model 3. if so: get the resource of that service and make it depend on the original package """ # loop over all resources to find services for _id, resource in resources.items(): res_class = resource.model.__class__ if resource.model.__module__ == "std" and res_class.__name__ == "Service": model = resource.model host = model.host # now find all packages on the same host as the service and add the packages as a requirement for file in host.files: file_res = Resource.get_resource(file) if file_res is not None: #Make the Service resource require the package resource.requires.add(file_res.id)
def bootstrap(config): """ Bootstrap IMP on a remote server by provisioning a VM, configuring it and starting the IMP server there. """ console = logging.StreamHandler() LOGGER.addHandler(console) LOGGER.setLevel(logging.INFO) root_scope = compile_model(config) mgmt_server = get_mgmt_server_from_model(root_scope) # provision the mgmt server iaas_agent = Agent(config, False, mgmt_server.iaas.name, offline=True, deploy=True) export = Exporter(config) export.run(root_scope, offline=True) mgmt_vm_resource = Resource.get_resource(mgmt_server) iaas_agent.update(mgmt_vm_resource) print("Bootstrapping IaaS config and booting management server") while iaas_agent._queue.size() > 0: iaas_agent.deploy_config() # wait for the vm to come online get its ip print("Waiting for %s to become available" % mgmt_server.name) facts = None while facts is None: all_facts = iaas_agent.get_facts(mgmt_vm_resource) for vm_key in all_facts.keys(): if vm_key == mgmt_vm_resource.id.resource_str() and "ip_address" in all_facts[vm_key]: facts = all_facts[vm_key] if facts is None: print("No response, waiting 5s for retry") time.sleep(5) # wait for the server to respond to ssh while True: result = run("/usr/bin/ssh", ARGS + ["ec2-user@" + facts["ip_address"], "echo 'OK'"]) if result[0] == "OK": break else: time.sleep(5) # now add our ssh key to the root user deploy_key(facts["ip_address"], mgmt_vm_resource.key_value) # now recompile the model for the mgmt server and do a remote deploy Offline.get().set_facts(str(mgmt_vm_resource.id.resource_str()), facts) root_scope = compile_model(config) deploy(config, root_scope, remote=mgmt_server.name, dry_run=False, ip_address=facts["ip_address"]) ## now boot all other servers, these are already available in the root_scope of the previous compile servers = set(root_scope.get_variable("Host", ["std"]).value) vm_servers = [] for server in servers: vm_resource = Resource.get_resource(server) if vm_resource is not None: vm_servers.append(vm_resource) iaas_agent.update(vm_resource) print("Booting all other servers") while iaas_agent._queue.size() > 0: iaas_agent.deploy_config() # collect facts about all server print("Waiting for servers to become available") facts = {} while len(vm_servers) > len(facts): for vm in vm_servers: if vm.id.resource_str() not in facts: all_facts = iaas_agent.get_facts(vm) for vm_key in all_facts.keys(): if vm_key not in facts and "ip_address" in all_facts[vm_key]: Offline.get().set_facts(vm_key, all_facts[vm_key]) facts[vm_key] = all_facts[vm_key] if len(vm_servers) > len(facts): print("No response, waiting 5s for retry") time.sleep(5) # now recompile the model once again with all facts and deploy to all servers root_scope = compile_model(config) # wait for the server to come online, deploy the ssh key and deploy the config print("Waiting for the server to come online, add our key to the root user and deploy the configuration") vm_todo = list(vm_servers) while len(vm_todo) > 0: for vm in list(vm_todo): ip = facts[vm.id.resource_str()]["ip_address"] result = run("/usr/bin/ssh", ARGS + ["ec2-user@" + ip, "echo 'OK'"]) if result[0] == "OK": print("%s up" % vm.id.attribute_value) deploy_key(ip, vm.key_value) deploy(config, root_scope, remote=vm.name, dry_run=False, ip_address=ip) print("%s done" % vm.id.attribute_value) vm_todo.remove(vm) time.sleep(5) # now a final run with bootstrap off # now recompile the model once again with all facts and deploy to all servers root_scope = compile_model(config, bootstrap="false") # wait for the server to come online, deploy the ssh key and deploy the config print("Deploying final non-bootstrap configuration") vm_todo = list(vm_servers) for vm in list(vm_todo): ip = facts[vm.id.resource_str()]["ip_address"] deploy(config, root_scope, remote=vm.name, dry_run=False, ip_address=ip) vm_todo.remove(vm)
def _handle_op(self, operation, message): """ Handle an operation """ if operation == "PING": LOGGER.info("Got ping request, sending pong back") response = {"hostname" : self._hostnames } self._mq_send("control", "PONG", response) elif operation == "UPDATE": LOGGER.debug("Received update for %s", message["resource"]["id"]) resource = Resource.deserialize(message["resource"]) self.update(resource) elif operation == "UPDATED": rid = Id.parse_id(message["id"]) version = message["version"] reload = message["reload"] self._dm.resource_update(rid.resource_str(), version, reload) elif operation == "STATUS": resource = Id.parse_id(message["id"]).get_instance() if resource is None: self._mq_send("control", "STATUS_REPLY", {"code" : 404}) return try: provider = Commander.get_provider(self, resource.id) except Exception: LOGGER.exception("Unable to find a handler for %s" % resource) try: result = provider.check_resource(resource) self._mq_send("control", "STATUS_REPLY", result) except Exception: LOGGER.exception("Unable to check status of %s" % resource) self._mq_send("control", "STATUS_REPLY", {"code" : 404}) elif operation == "FACTS": resource_id = Id.parse_id(message["id"]) try: resource = Resource.deserialize(message["resource"]) provider = Commander.get_provider(self, resource_id) try: result = provider.facts(resource) response = {"operation" : "FACTS_REPLY", "subject" : str(resource_id), "facts" : result} self._mq_send("control", "FACTS_REPLY", response) except Exception: LOGGER.exception("Unable to retrieve fact") self._mq_send("control", "FACTS_REPLY", {"subject" : str(resource_id), "code": 404}) except Exception: LOGGER.exception("Unable to find a handler for %s" % resource_id) elif operation == "QUEUE": response = {"queue" : ["%s,v=%d" % (x.id, x.version) for x in self._queue.all()]} self._mq_send("control", "QUEUE_REPLY", response) elif operation == "DEPLOY": self.deploy_config() elif operation == "INFO": response = {"threads" : [x.name for x in enumerate()], "queue length" : self._queue.size(), "queue ready length" : self._queue.ready_size(), } self._mq_send("control", "INFO_REPLY", response) elif operation == "DUMP": LOGGER.info("Dumping!") self._queue.dump() elif operation == "MODULE_UPDATE": version = message["version"] modules = message["modules"] self._loader.deploy_version(version, modules)