def _load_resources(self, types): """ Load all registered resources """ entities = resource.get_entity_resources() resource_mapping = {} ignored_set = set() for entity in entities: if entity not in types: continue instances = types[entity].get_all_instances() if len(instances) > 0: for instance in instances: try: res = Resource.create_from_model( self, entity, DynamicProxy.return_value(instance)) resource_mapping[instance] = res self.add_resource(res) except UnknownException: ignored_set.add(instance) # We get this exception when the attribute that is used to create the object id contains an unknown. # We can safely ignore this resource == prune it LOGGER.debug( "Skipped resource of type %s because its id contains an unknown (location: %s)", entity, instance.location) except IgnoreResourceException: ignored_set.add(instance) LOGGER.info( "Ignoring resource of type %s because it requested to ignore it. (location: %s)", entity, instance.location) Resource.convert_requires(resource_mapping, ignored_set)
def get_facts(self, resource): with (yield self.ratelimiter.acquire()): yield self.process._ensure_code(self._env_id, resource["model"], [resource["resource_type"]]) ctx = handler.HandlerContext(resource) provider = None try: data = resource["attributes"] data["id"] = resource["id"] resource_obj = Resource.deserialize(data) version = resource_obj.id.version try: self._cache.open_version(version) provider = handler.Commander.get_provider(self._cache, self, resource_obj) provider.set_cache(self._cache) result = yield self.thread_pool.submit(provider.check_facts, ctx, resource_obj) parameters = [{"id": name, "value": value, "resource_id": resource_obj.id.resource_str(), "source": "fact"} for name, value in result.items()] yield self.get_client().set_parameters(tid=self._env_id, parameters=parameters) except Exception: LOGGER.exception("Unable to retrieve fact") finally: self._cache.close_version(version) except Exception: LOGGER.exception("Unable to find a handler for %s", resource["id"]) return 500 finally: if provider is not None: provider.close() return 200
def do_restore(self, restore_id, snapshot_id, resources): with (yield self.ratelimiter.acquire()): LOGGER.info("Start a restore %s", restore_id) yield self.process._ensure_code(self._env_id, resources[0][1]["model"], [res[1]["resource_type"] for res in resources]) version = resources[0][1]["model"] self._cache.open_version(version) for restore, resource in resources: start = datetime.datetime.now() provider = None try: data = resource["attributes"] data["id"] = resource["id"] resource_obj = Resource.deserialize(data) provider = handler.Commander.get_provider(self._cache, self, resource_obj) provider.set_cache(self._cache) if not hasattr(resource_obj, "allow_restore") or not resource_obj.allow_restore: yield self.get_client().update_restore(tid=self._env_id, id=restore_id, resource_id=str(resource_obj.id), start=start, stop=datetime.datetime.now(), success=False, error=False, msg="Resource %s does not allow restore" % resource["id"]) continue try: yield self.thread_pool.submit(provider.restore, resource_obj, restore["content_hash"]) yield self.get_client().update_restore(tid=self._env_id, id=restore_id, resource_id=str(resource_obj.id), success=True, error=False, start=start, stop=datetime.datetime.now(), msg="") except NotImplementedError: yield self.get_client().update_restore(tid=self._env_id, id=restore_id, resource_id=str(resource_obj.id), success=False, error=False, start=start, stop=datetime.datetime.now(), msg="The handler for resource " "%s does not support restores" % resource["id"]) except Exception: LOGGER.exception("Unable to find a handler for %s", resource["id"]) yield self.get_client().update_restore(tid=self._env_id, id=restore_id, resource_id=resource_obj.id.resource_str(), success=False, error=False, start=start, stop=datetime.datetime.now(), msg="Unable to find a handler to restore a snapshot of resource %s" % resource["id"]) finally: if provider is not None: provider.close() self._cache.close_version(version) return 200
def do_run_dryrun(self, version, dry_run_id): with (yield self.dryrunlock.acquire()): with (yield self.ratelimiter.acquire()): result = yield self.get_client().get_resources_for_agent(tid=self._env_id, agent=self.name, version=version) if result.code == 404: LOGGER.warning("Version %s does not exist, can not run dryrun", version) return elif result.code != 200: LOGGER.warning("Got an error while pulling resources for agent %s and version %s", self.name, version) return resources = result.result["resources"] restypes = set([res["resource_type"] for res in resources]) # TODO: handle different versions for dryrun and deploy! yield self.process._ensure_code(self._env_id, version, restypes) self._cache.open_version(version) for res in resources: ctx = handler.HandlerContext(res, True) started = datetime.datetime.now() provider = None try: data = res["attributes"] data["id"] = res["id"] resource = Resource.deserialize(data) LOGGER.debug("Running dryrun for %s", resource.id) try: provider = handler.Commander.get_provider(self._cache, self, resource) provider.set_cache(self._cache) except Exception as e: ctx.exception("Unable to find a handler for %(resource_id)s (exception: %(exception)s", resource_id=str(resource.id), exception=str(e)) self._client.dryrun_update(tid=self._env_id, id=dry_run_id, resource=res["id"], changes={}) else: yield self.thread_pool.submit(provider.execute, ctx, resource, dry_run=True) yield self.get_client().dryrun_update(tid=self._env_id, id=dry_run_id, resource=res["id"], changes=ctx.changes) except TypeError: ctx.exception("Unable to process resource for dryrun.") finally: if provider is not None: provider.close() finished = datetime.datetime.now() self.get_client().resource_action_update(tid=self._env_id, resource_ids=[res["id"]], action_id=ctx.action_id, action=const.ResourceAction.dryrun, started=started, finished=finished, messages=ctx.logs, status=const.ResourceState.dry) self._cache.close_version(version)
def get_latest_version_for_agent(self): """ Get the latest version for the given agent (this is also how we are notified) """ if not self._can_get_resources(): return with (yield self.critical_ratelimiter.acquire()): if not self._can_get_resources(): return LOGGER.debug("Getting latest resources for %s" % self.name) self._getting_resources = True start = time.time() try: result = yield self.get_client().get_resources_for_agent( tid=self._env_id, agent=self.name) finally: self._getting_resources = False end = time.time() self._get_resource_duration = end - start self._get_resource_timeout = GET_RESOURCE_BACKOFF * self._get_resource_duration + end if result.code == 404: LOGGER.info( "No released configuration model version available for agent %s", self.name) elif result.code != 200: LOGGER.warning( "Got an error while pulling resources for agent %s. %s", self.name, result.result) else: restypes = set([ res["resource_type"] for res in result.result["resources"] ]) resources = [] yield self.process._ensure_code(self._env_id, result.result["version"], restypes) try: undeployable = {} for res in result.result["resources"]: state = const.ResourceState[res["status"]] if state in const.UNDEPLOYABLE_STATES: undeployable[res["id"]] = state data = res["attributes"] data["id"] = res["id"] resource = Resource.deserialize(data) resources.append(resource) LOGGER.debug("Received update for %s", resource.id) except TypeError: LOGGER.exception("Failed to receive update") if len(resources) > 0: self._nq.reload(resources, undeployable)
def do_snapshot(self, snapshot_id, resources): with (yield self.ratelimiter.acquire()): LOGGER.info("Start snapshot %s", snapshot_id) yield self.process._ensure_code( self._env_id, resources[0]["model"], [res["resource_type"] for res in resources]) version = resources[0]["model"] self._cache.open_version(version) for resource in resources: start = datetime.datetime.now() provider = None try: data = resource["attributes"] data["id"] = resource["id"] resource_obj = Resource.deserialize(data) provider = handler.Commander.get_provider( self._cache, self, resource_obj) provider.set_cache(self._cache) if not hasattr(resource_obj, "allow_snapshot" ) or not resource_obj.allow_snapshot: yield self.get_client().update_snapshot( tid=self._env_id, id=snapshot_id, resource_id=resource_obj.id.resource_str(), snapshot_data="", start=start, stop=datetime.datetime.now(), size=0, success=False, error=False, msg="Resource %s does not allow snapshots" % resource["id"]) continue try: result = yield self.thread_pool.submit( provider.snapshot, resource_obj) if result is not None: sha1sum = hashlib.sha1() sha1sum.update(result) content_id = sha1sum.hexdigest() yield self.get_client().upload_file( id=content_id, content=base64.b64encode(result).decode( "ascii")) yield self.get_client().update_snapshot( tid=self._env_id, id=snapshot_id, resource_id=resource_obj.id.resource_str(), snapshot_data=content_id, start=start, stop=datetime.datetime.now(), size=len(result), success=True, error=False, msg="") else: raise Exception("Snapshot returned no data") except NotImplementedError: yield self.get_client().update_snapshot( tid=self._env_id, id=snapshot_id, error=False, resource_id=resource_obj.id.resource_str(), snapshot_data="", start=start, stop=datetime.datetime.now(), size=0, success=False, msg="The handler for resource " "%s does not support snapshots" % resource["id"]) except Exception: LOGGER.exception( "An exception occurred while creating the snapshot of %s", resource["id"]) yield self.get_client().update_snapshot( tid=self._env_id, id=snapshot_id, snapshot_data="", resource_id=resource_obj.id.resource_str(), error=True, start=start, stop=datetime.datetime.now(), size=0, success=False, msg="The handler for resource " "%s does not support snapshots" % resource["id"]) except Exception: LOGGER.exception("Unable to find a handler for %s", resource["id"]) yield self.get_client().update_snapshot( tid=self._env_id, id=snapshot_id, snapshot_data="", resource_id=resource_obj.id.resource_str(), error=False, start=start, stop=datetime.datetime.now(), size=0, success=False, msg="Unable to find a handler for %s" % resource["id"]) finally: if provider is not None: provider.close() self._cache.close_version(version) return 200