def schedule_task_with_lock(self, task): """Crawler scheduler filters tasks according to task type""" if not task.get('enable'): return None task_queue = task.get('task_queue') if task_queue not in self.task_queues: return None conn = get_redis_conn() task_name = task.get('name') internal = task.get('internal') urls = task.get('resource') lock_indentifier = acquire_lock(conn, task_name) if not lock_indentifier: return False pipe = conn.pipeline(True) try: now = int(time.time()) pipe.hget(TIMER_RECORDER, task_name) r = pipe.execute()[0] if not r or (now - int(r.decode('utf-8'))) >= internal * 60: pipe.lpush(task_queue, *urls) pipe.hset(TIMER_RECORDER, task_name, now) pipe.execute() # scheduler_logger.info('crawler task {} has been stored into redis successfully'.format(task_name)) return True else: return None finally: release_lock(conn, task_name, lock_indentifier)
def schedule_task_with_lock(self, task): """Crawler scheduler filters tasks according to task type""" if not task.get('enable'): return None task_queue = task.get('task_queue') if task_queue not in self.task_queues: return None conn = get_redis_conn() task_name = task.get('name') interval = task.get('interval') urls = task.get('resource') lock_indentifier = acquire_lock(conn, task_name) if not lock_indentifier: return False pipe = conn.pipeline(True) try: now = int(time.time()) pipe.hget(TIMER_RECORDER, task_name) r = pipe.execute()[0] if not r or (now - int(r.decode('utf-8'))) >= interval * 60: pipe.lpush(task_queue, *urls) pipe.hset(TIMER_RECORDER, task_name, now) pipe.execute() # scheduler_logger.info('crawler task {} has been stored into redis successfully'.format(task_name)) return True else: return None finally: release_lock(conn, task_name, lock_indentifier)
def add(self, key): """ :param key: 要添加的数据 :return: >>> bf = BloomFilter(data_size=100000, error_rate=0.001) >>> bf.add("test") True """ if self._is_half_fill(): raise IndexError("The capacity is insufficient") keyname = self.redis_key + str(sum(map(ord, key)) % self._block_num) key_hashed_idx = [] for time in range(self._hash_num): key_hashed_idx.append( mmh3.hash(keyname, self._hash_seed[time]) % self._bit_num) lock = acquire_lock_with_timeout(self.server, key) if lock: for idx in key_hashed_idx: self.server.setbit(keyname, idx, 1) self._data_count += 1 release_lock(self.server, key, lock) return True else: return False
def is_exists(self, key): """ :param key: :return: 判断该值是否存在 有任意一位为0 则肯定不存在 """ keyname = self.redis_key + str(sum(map(ord, key)) % self._block_num) lock = acquire_lock_with_timeout(self.server, key) for time in range(self._hash_num): key_hashed_idx = mmh3.hash(keyname, self._hash_seed[time]) % self._bit_num if not int(self.server.getbit(keyname, key_hashed_idx)): # 类型? release_lock(self.server, key, lock) return False release_lock(self.server, key, lock) return True
def schedule_task_with_lock(self, task): """Validator scheduler filters tasks according to task name since it's task name stands for task type""" if not task.get('enable'): return None task_queue = task.get('task_queue') if task_queue not in self.task_queues: return None conn = get_redis_conn() internal = task.get('internal') task_name = task.get('name') resource_queue = task.get('resource') lock_indentifier = acquire_lock(conn, task_name) if not lock_indentifier: return False pipe = conn.pipeline(True) try: now = int(time.time()) pipe.hget(TIMER_RECORDER, task_name) pipe.zrevrangebyscore(resource_queue, '+inf', '-inf') r, proxies = pipe.execute() if not r or (now - int(r.decode('utf-8'))) >= internal * 60: if not proxies: print('fetched no proxies from task {}'.format(task_name)) return None pipe.sadd(task_queue, *proxies) pipe.hset(TIMER_RECORDER, task_name, now) pipe.execute() print( 'validator task {} has been stored into redis successfully' .format(task_name)) return True else: return None finally: release_lock(conn, task_name, lock_indentifier)
def schedule_task_with_lock(self, task): """Validator scheduler filters tasks according to task name since its task name stands for task type""" if not task.get('enable'): return None task_queue = task.get('task_queue') if task_queue not in self.task_queues: return None conn = get_redis_conn() interval = task.get('interval') task_name = task.get('name') resource_queue = task.get('resource') lock_indentifier = acquire_lock(conn, task_name) if not lock_indentifier: return False pipe = conn.pipeline(True) try: now = int(time.time()) pipe.hget(TIMER_RECORDER, task_name) pipe.zrevrangebyscore(resource_queue, '+inf', '-inf') r, proxies = pipe.execute() if not r or (now - int(r.decode('utf-8'))) >= interval * 60: if not proxies: # scheduler_logger.warning('fetched no proxies from task {}'.format(task_name)) print('fetched no proxies from task {}'.format(task_name)) return None pipe.sadd(task_queue, *proxies) pipe.hset(TIMER_RECORDER, task_name, now) pipe.execute() # scheduler_logger.info('validator task {} has been stored into redis successfully'.format(task_name)) return True else: return None finally: release_lock(conn, task_name, lock_indentifier)
import utils if __name__ == '__main__': utils.release_lock(strict=False)
# applicable. try: if config.USE_LOCKS: utils.set_lock() logger.info('connecting to DB') connect() logger.info('setting parameters') set_params() logger.info('checking height') check_node_height() logger.info('calculating payouts') rawpayouts, timestamp = calculate() logger.info('formatting payouts') formatted_payouts = format_payments(payouts=rawpayouts, timestamp=timestamp) if config.PAYOUTCALCULATOR_TEST: logger.info('FORMATTED PAYMENTS') for i in formatted_payouts: logger.info('{} ---- {}'.format( i, formatted_payouts[i] / info.ARK)) else: logger.info('transmitting payouts') transmit_payments(payouts=formatted_payouts) if config.USE_LOCKS: utils.release_lock() except Exception: logger.exception('caught exception in plugandplay') raise
def test_release_lock(self): import utils utils.release_lock(strict=False) self.assertRaises(utils.LockError, utils.release_lock, strict=True)
def setUp(self): import utils utils.release_lock(strict=False)