def __init__(self, project_id, zk_client, index_manager, datastore_access): """ Creates a new ProjectIndexManager. Args: project_id: A string specifying a project ID. zk_client: A KazooClient. update_callback: A function that should be called with the project ID and index list every time the indexes get updated. index_manager: An IndexManager used for checking lock status. datastore_access: A DatastoreDistributed object. """ self.project_id = project_id self.indexes_node = '/appscale/projects/{}/indexes'.format(self.project_id) self.active = True self.update_event = AsyncEvent() self._creation_times = {} self._index_manager = index_manager self._zk_client = zk_client self._ds_access = datastore_access self._zk_client.DataWatch(self.indexes_node, self._update_indexes_watch) # Since this manager can be used synchronously, ensure that the indexes # are populated for this IOLoop iteration. try: encoded_indexes = self._zk_client.get(self.indexes_node)[0] except NoNodeError: encoded_indexes = '[]' self.indexes = [DatastoreIndex.from_dict(self.project_id, index) for index in json.loads(encoded_indexes)]
def __init__(self, client, path, identifier=None): """ Creates an AsyncKazooLock. Args: client: A KazooClient. path: The lock path to use. identifier: The name to use for this lock contender. This can be useful for querying to see who the current lock contenders are. """ self.client = client self.tornado_kazoo = TornadoKazoo(client) self.path = path # some data is written to the node. this can be queried via # contenders() to see who is contending for the lock self.data = str(identifier or "").encode('utf-8') self.node = None self.wake_event = AsyncEvent() # props to Netflix Curator for this trick. It is possible for our # create request to succeed on the server, but for a failure to # prevent us from getting back the full path name. We prefix our # lock name with a uuid and can check for its presence on retry. self.prefix = uuid.uuid4().hex + self._NODE_NAME self.create_path = self.path + "/" + self.prefix self.create_tried = False self.is_acquired = False self.assured_path = False self.cancelled = False self._retry = AsyncKazooRetry(max_tries=-1) self._lock = AsyncLock()
def __init__(self, zk_client, datastore_access, perform_admin=False): """ Creates a new IndexManager. Args: zk_client: A kazoo.client.KazooClient object. datastore_access: A DatastoreDistributed object. perform_admin: A boolean specifying whether or not to perform admin operations. """ self.projects = {} self._wake_event = AsyncEvent() self._zk_client = zk_client self.admin_lock = AsyncKazooLock(self._zk_client, self.ADMIN_LOCK_NODE) # TODO: Refactor so that this dependency is not needed. self._ds_access = datastore_access self._zk_client.ensure_path('/appscale/projects') self._zk_client.ChildrenWatch('/appscale/projects', self._update_projects) # Since this manager can be used synchronously, ensure that the projects # are populated for this IOLoop iteration. project_ids = self._zk_client.get_children('/appscale/projects') self._update_projects_sync(project_ids) if perform_admin: IOLoop.current().spawn_callback(self._contend_for_admin_lock)
def __init__(self, project_id, coordinator, zk_client, db_access, thread_pool, index_manager): """ Creates a new ProjectGroomer. Args: project_id: A string specifying a project ID. coordinator: A GroomingCoordinator. zk_client: A KazooClient. db_access: A DatastoreProxy. thread_pool: A ThreadPoolExecutor. index_manager: An IndexManager object. """ self.project_id = project_id self._coordinator = coordinator self._zk_client = zk_client self._tornado_zk = TornadoKazoo(self._zk_client) self._db_access = db_access self._thread_pool = thread_pool self._index_manager = index_manager self._project_node = '/appscale/apps/{}'.format(self.project_id) self._containers = [] self._inactive_containers = set() self._batch_resolver = BatchResolver(self.project_id, self._db_access) self._zk_client.ensure_path(self._project_node) self._zk_client.ChildrenWatch(self._project_node, self._update_containers) self._txid_manual_offset = 0 self._offset_node = '/'.join([self._project_node, OFFSET_NODE]) self._zk_client.DataWatch(self._offset_node, self._update_offset) self._stop_event = AsyncEvent() self._stopped_event = AsyncEvent() # Keeps track of cleanup results for each round of grooming. self._txids_cleaned = 0 self._oldest_valid_tx_time = None self._worker_queue = AsyncQueue(maxsize=MAX_CONCURRENCY) for _ in range(MAX_CONCURRENCY): IOLoop.current().spawn_callback(self._worker) IOLoop.current().spawn_callback(self.start)