def __init__(self, volume_client, name_pfx, nr_concurrent_jobs): threading.Thread.__init__(self, name="{0}.tick".format(name_pfx)) self.vc = volume_client # queue of volumes for starting async jobs self.q = deque() # type: deque # volume => job tracking self.jobs = {} # lock, cv for kickstarting jobs self.lock = threading.Lock() self.cv = threading.Condition(self.lock) # cv for job cancelation self.waiting = False self.stopping = threading.Event() self.cancel_cv = threading.Condition(self.lock) self.nr_concurrent_jobs = nr_concurrent_jobs self.name_pfx = name_pfx # each async job group uses its own libcephfs connection (pool) self.fs_client = CephfsClient(self.vc.mgr) self.threads = [] for i in range(self.nr_concurrent_jobs): self.threads.append( JobThread(self, volume_client, name="{0}.{1}".format(self.name_pfx, i))) self.threads[-1].start() self.start()
def __init__(self, mgr): self.mgr = mgr self.rados = mgr.rados self.pool_policy = {} self.fs_map = self.mgr.get('fs_map') self.lock = threading.Lock() self.refresh_pool_policy() self.local_fs = CephfsClient(mgr)