def __init__(self): self.key = 'attbatch:datalist' self.__oo = List(self.key) self.store = String('attbatch:store') self.sn = String('attbatch:sn') self.lock = Lock(self.__oo._client, 'att_batch_lock', timeout=120) self.multi = False self.m_bat = None
def __store_entry(cls, entry_url, content): entry_url = cls.__get_url(entry_url) redis_client = cls.__get_redis_api_client() lock_name = entry_url + ":lock" logger.debug("Acquire Redis lock for update: " + lock_name) lock = Lock(redis_client, lock_name, timeout=None, sleep=0.1) acquired = lock.acquire(blocking=True) logger.debug("Lock acquired: " + str(acquired)) redis_client.hmset(entry_url, content) lock.release() logger.debug("Stored Redis entry at: " + entry_url + " Content: " + str(json.dumps(content)))
def __store_entry(cls, entry_url, content): entry_url = cls.__get_url(entry_url) redis_client = cls.__get_redis_api_client() lock_name=entry_url+":lock" logger.debug("Acquire Redis lock for update: " + lock_name) lock = Lock(redis_client, lock_name, timeout=None, sleep=0.1) acquired=lock.acquire(blocking=True) logger.debug("Lock acquired: " + str(acquired)) redis_client.hmset(entry_url, content) lock.release() logger.debug("Stored Redis entry at: " + entry_url + " Content: " + str(json.dumps(content)))
class att_batch(object): def __init__(self): self.key = 'attbatch:datalist' self.__oo = List(self.key) self.store = String('attbatch:store') self.sn = String('attbatch:sn') self.lock = Lock(self.__oo._client, 'att_batch_lock', timeout=120) self.multi = False self.m_bat = None def get(self): if (len(self.__oo) > 0): self.m_bat = self.__oo[:ATT_DEAL_BAT_SIZE] m_rtn = [e for e in self.m_bat if e] del self.__oo[:ATT_DEAL_BAT_SIZE] return m_rtn else: self.lock.acquire() try: self.done() except: pass self.set() self.lock.release() if (len(self.__oo) > 0): self.m_bat = self.__oo[:ATT_DEAL_BAT_SIZE] m_rtn = [e for e in self.m_bat if e] del self.__oo[:ATT_DEAL_BAT_SIZE] return m_rtn else: return [] def set(self, file=None): m_store, m_sn, m_list = load_att_file(file) if m_store: self.store.set(m_store) self.sn.set(m_sn) if m_list: for e in m_list: if e: self.__oo.rpush(e) def init_data(self): file = self.store.get() if file: self.set(file) def done(self): os.remove(self.store.get()) def get_sn(self): return self.sn.get()
def __store_entry_item(cls, entry_url, item_key, item_value): entry_url = cls.__get_url(entry_url) redis_client = cls.__get_redis_api_client() lock_name = entry_url + ":lock" logger.debug("Acquire Redis lock for update: " + lock_name) lock = Lock(redis_client, lock_name, timeout=None, sleep=0.1) acquired = lock.acquire(blocking=True) logger.debug("Lock acquired: " + str(acquired)) redis_client.hset(entry_url, item_key, item_value) lock.release() logger.debug("Stored Redis entry at: " + entry_url + " Key: " + str(json.dumps(item_key)) + " Value: " + str(json.dumps(item_value)))
def __store_entry_item(cls, entry_url, item_key, item_value): entry_url = cls.__get_url(entry_url) redis_client = cls.__get_redis_api_client() lock_name=entry_url+":lock" logger.debug("Acquire Redis lock for update: " + lock_name) lock = Lock(redis_client, lock_name, timeout=None, sleep=0.1) acquired=lock.acquire(blocking=True) logger.debug("Lock acquired: " + str(acquired)) redis_client.hset(entry_url, item_key, item_value) lock.release() logger.debug("Stored Redis entry at: " + entry_url + " Key: " + str(json.dumps(item_key)) + " Value: " + str(json.dumps(item_value)) )
def lock(self, name, timeout=None, sleep=0.1): """ Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. """ return Lock(self, name, timeout=timeout, sleep=sleep)
def func(*args, **kwargs): key = name or task.__name__ acquired = Lock(rc, key, timeout=timeout, blocking=blocking, blocking_timeout=blocking_timeout).acquire() if not acquired: if silent_fail: return None else: raise OnlyOne(key) try: task(*args, **kwargs) finally: if release: rc.delete(key)
def __init__(self): self.key = 'posbatch:datalist' self.__oo = List(self.key) self.store = String('posbatch:store') self.head_data = String('posbatch:head_data') self.lock = Lock(self.__oo._client, 'pos_ic_batch_lock', timeout=120)
class ic_pos_batch(object): def __init__(self): self.key = 'posbatch:datalist' self.__oo = List(self.key) self.store = String('posbatch:store') self.head_data = String('posbatch:head_data') self.lock = Lock(self.__oo._client, 'pos_ic_batch_lock', timeout=120) def get_is_sistributed(self): #分布式部署情况 if (len(self.__oo) > 0): self.lock.acquire() m_bat = self.__oo[:POS_DEAL_BAT_SIZE] del self.__oo[:POS_DEAL_BAT_SIZE] self.lock.release() m_rtn = [e for e in m_bat if e] return m_rtn else: self.lock.acquire() try: self.done() except: # import traceback;traceback.print_exc() pass self.set() self.lock.release() if (len(self.__oo) > 0): self.lock.acquire() m_bat = self.__oo[:POS_DEAL_BAT_SIZE] del self.__oo[:POS_DEAL_BAT_SIZE] self.lock.release() m_rtn = [e for e in m_bat if e] return m_rtn else: return [] def get(self): if (len(self.__oo) > 0): # print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",len(self.__oo) m_bat = self.__oo[:POS_DEAL_BAT_SIZE] m_rtn = [e for e in m_bat if e] return m_rtn else: self.lock.acquire() try: # print "dddddddddddddddddddddddddddd",len(self.__oo) self.done() except: pass self.set() self.lock.release() if (len(self.__oo) > 0): m_bat = self.__oo[:POS_DEAL_BAT_SIZE] m_rtn = [e for e in m_bat if e] return m_rtn else: return [] def del_oo_item(self): self.lock.acquire() del self.__oo[:POS_DEAL_BAT_SIZE] self.lock.release() return len(self.__oo) def set(self, file=None): try: m_store, m_head_data, m_list = load_pos_file(file) if m_store: self.store.set(m_store) self.head_data.set(m_head_data) if m_list: for e in m_list: if e: self.__oo.rpush(e) except: pass # import traceback;traceback.print_exc() def init_data(self): file = self.store.get() if file: del self.__oo[:] self.set(file) def done(self): file_path = self.store.get() file_name = file_path.split("/")[-1:][0].split("_") sn = file_name[0] f_dir = file_name[1][:8] cf_path = settings.WORK_PATH + "/files/zkpos/%s/" + f_dir + "/" cf_path = cf_path % sn #数据备份路径 if not os.path.exists(cf_path): os.makedirs(cf_path) shutil.copy(file_path, cf_path) os.remove(self.store.get()) def get_head_data(self): return self.head_data.get()