def set_current_block_level_sync(block_id: str, level: int) -> None: """Set the current verification level for a block (async) Args: block_id: block_id to set level: verification level to set for block_id """ redis.set_sync(state_key(block_id), str(level))
def register() -> None: """Register self with matchmaking""" path = "/registration" body = get_matchmaking_config() body["token"] = os.environ.get("REGISTRATION_TOKEN") make_matchmaking_request("POST", path, body) redis.set_sync(REREGISTER_TIMING_KEY, "a", ex=REREGISTER_TIME_AMOUNT) # Value doesn't matter
def update_funded_flag(flag_value: bool) -> None: key = "dc:isFunded" if flag_value: redis.set_sync(key, "a") # Value does not matter else: redis.delete_sync(key) register()
def enqueue_generic(content: dict, queue: str, deadline: int) -> None: _log.info(f"Enqueueing content to {queue} queue") string_content = json.dumps(content, separators=(",", ":")) if not redis.lpush_sync(queue, string_content): raise RuntimeError("Failed to enqueue") if deadline: # Set a deadline, beyond-which this L2-4 will disgard this item completely key = get_deadline_key(string_content.encode("utf8")) redis.set_sync(key, "a", deadline) # Value is irrelevant
def register(retry: bool = True) -> None: """Register self with matchmaking Args: retry: Whether or not to retry this request if it fails """ path = "/registration" body = get_matchmaking_config() body["token"] = os.environ.get("REGISTRATION_TOKEN") make_matchmaking_request("POST", path, body, retry) redis.set_sync(REREGISTER_TIMING_KEY, "a", ex=REREGISTER_TIME_AMOUNT) # Value doesn't matter
def save_matchmaking_auth_key(auth_key: str) -> bool: """Register a new matchmaking auth key. !This will overwrite the existing matchmaking key for this chain! Args: auth_key: auth_key to add for matchmaking Returns: Boolean if successful """ try: redis.set_sync(MATCHMAKING_KEY_LOCATION, auth_key) return True except Exception: return False
def signature_is_replay(request_signature: str) -> bool: """Check if a request signature is new, and add also mark it as used (if it is new) Args: request_signature: string of the request id to check Returns: boolean true if this signature is a replay, false if not """ redis_key = f"{REQUEST_PREFIX_KEY}{request_signature}" if redis.get_sync(redis_key, decode=False): # If key exists in redis, we return True return True # Set this new request_signature in redis (value doesn't matter) and return False # Set additional 60 seconds from timeout just as a safety in case the chain's clock re-adjusts slightly (NTP) redis.set_sync(redis_key, "a", ex=60) return False
def increment_storage_error_sync(block_id: str, current_level: int) -> None: """When getting a storage error/inconsistency between redis/storage, this should be called This will roll-back a block to a previous level for verifications if FAULT_TOLERATION is surpassed for a block Basically, the state in redis can be a mis-representation of what's in actual storage, and if this occurs, we need to roll back the block verifications state and remove any bogus verifications from redis that aren't truly saved in storage, which happens on occasion Args: block_id: the block_id to increment a storage error current_level: the current block verification level state (should be in broadcast:block:state) """ # Don't do anything if at or below level two because no verifications are required yet if current_level <= 2: return error_key = storage_error_key(block_id) current_count = int(redis.get_sync(error_key, decode=False) or 0) if current_count < FAULT_TOLERATION: redis.set_sync(error_key, str(current_count + 1)) return # Beyond fault toleration, we must rollback this block # First find all verifications actually in storage prefix = f"BLOCK/{block_id}-l{current_level - 1}" good_verifications = set() for key in storage.list_objects(prefix): good_verifications.add(re.search(f"^{prefix}-(.*)", key).group(1)) # noqa: T484 # Now find all verifications the system thinks we have in redis redis_verifications_key = verifications_key(block_id, current_level - 1) all_verifications = redis.smembers_sync(redis_verifications_key) # Remove all bad verifications recorded in redis that aren't in storage, and demote block to previous level p = redis.pipeline_sync() p.srem(redis_verifications_key, *all_verifications.difference(good_verifications)) p.delete(error_key) p.set(state_key(block_id), str(current_level - 1)) p.execute()
def test_set_sync(self): redis.set_sync("banana", "banana") redis.redis_client.set.assert_called_once_with("banana", "banana", ex=None)