def __init__(self, account_id, block_hash, work_block_hash, difficulty=None): self.account_id = account_id self.block_hash = (validate_block_hash(block_hash) if block_hash else None) self.work_block_hash = validate_block_hash(work_block_hash) if not difficulty: difficulty = WORK_DIFFICULTY self.difficulty = validate_difficulty(difficulty) self.work = None
async def service_handler(self, data): if not {'hash', 'user', 'api_key'} <= data.keys(): raise InvalidRequest("Incorrect submission. Required information: user, api_key, hash") service, api_key = data['user'], data['api_key'] api_key = hash_key(api_key) #Verify API Key db_key = await self.database.hash_get(f"service:{service}", "api_key") if db_key is None: logger.info(f"Received request with non existing service {service}") raise InvalidRequest("Invalid credentials") elif not api_key == db_key: logger.info(f"Received request with non existing api key {api_key} for service {service}") raise InvalidRequest("Invalid credentials") async with self.service_throttlers[service]: block_hash = data['hash'] account = data.get('account', None) difficulty = data.get('difficulty', None) reward = data.get("reward", True) if not isinstance(reward, bool): reward = True try: block_hash = nanolib.validate_block_hash(block_hash) if account: if not Validations.validate_address(account): raise nanolib.InvalidAccount() if difficulty: nanolib.validate_difficulty(difficulty) except nanolib.InvalidBlockHash: raise InvalidRequest("Invalid hash") except nanolib.InvalidAccount: raise InvalidRequest("Invalid account") except ValueError: raise InvalidRequest("Invalid difficulty") except nanolib.InvalidDifficulty: raise InvalidRequest("Difficulty too low") if difficulty: difficulty_multiplier = nanolib.work.derive_work_multiplier(difficulty, base_difficulty='fffffe0000000000') if difficulty_multiplier > BpowServer.MAX_DIFFICULTY_MULTIPLIER: difficulty = nanolib.work.derive_work_difficulty(BpowServer.MAX_DIFFICULTY_MULTIPLIER, base_difficulty='fffffe0000000000') difficulty_multiplier = BpowServer.MAX_DIFFICULTY_MULTIPLIER #Check if hash in redis db, if so return work work = await self.database.get(f"block:{block_hash}") if work is None: # Set reward await self.database.insert_expire(f"doreward:{block_hash}", "yes" if reward else "no", 500) # Set incomplete work await self.database.insert_expire(f"block:{block_hash}", BpowServer.WORK_PENDING, BpowServer.BLOCK_EXPIRY) if service in ["tixwallet", "bananovault", "natrium", "kalium"]: await self.database.insert_expire(f"doreward:{block_hash}", "no", 500) work_type = "ondemand" if work and work != BpowServer.WORK_PENDING: work_type = "precache" if difficulty: precached_multiplier = nanolib.work.derive_work_multiplier(hex(nanolib.work.get_work_value(block_hash, work))[2:], base_difficulty='fffffe0000000000') if precached_multiplier < difficulty_multiplier: # Force ondemand since the precache difficulty is not close enough to requested difficulty work_type = "ondemand" await self.database.insert(f"block:{block_hash}", BpowServer.WORK_PENDING) logger.debug(f"Forcing ondemand: precached {precached_multiplier} vs requested {difficulty_multiplier}") if work_type == "ondemand": # Set work type await self.database.insert_expire(f"work-type:{block_hash}", work_type, BpowServer.BLOCK_EXPIRY) if block_hash not in self.work_futures: # Create a Future to be set with work when complete self.work_futures[block_hash] = loop.create_future() # If account is not provided, service runs a risk of the next work not being precached for # There is still the possibility we recognize the need to precache based on the previous block if account: # Update account frontier asyncio.ensure_future(self.database.insert_expire(f"account:{account}", block_hash, BpowServer.ACCOUNT_EXPIRY)) # Set difficulty in DB if provided if difficulty: await self.database.insert_expire(f"block-difficulty:{block_hash}", difficulty, BpowServer.DIFFICULTY_EXPIRY) # Base difficulty if not provided difficulty = difficulty or self.DEFAULT_WORK_DIFFICULTY # Ask for work on demand queue = await self.get_next_queue() await self.mqtt.send(f"work/ondemand/{queue}", f"{block_hash},{difficulty}", qos=QOS_0) timeout = data.get('timeout', 10) try: timeout = int(timeout) if timeout < 1 or timeout > 30: raise except: raise InvalidRequest("Timeout must be an integer between 1 and 30") try: work = await asyncio.wait_for(self.work_futures[block_hash], timeout=timeout) except asyncio.CancelledError: logger.debug(f"Future was cancelled for {block_hash}") work = await self.database.get(f"block:{block_hash}") if not work: logger.error("Future was cancelled and work result not set in database") raise RetryRequest() except asyncio.TimeoutError: logger.warn(f"Timeout of {timeout} reached for {block_hash}") raise RequestTimeout() finally: try: future = self.work_futures.pop(block_hash) future.cancel() except Exception: pass # logger.info(f"Work received: {work}") else: # logger.info(f"Work in cache: {work}") pass # Increase the work type counter for this service asyncio.ensure_future(self.database.hash_increment(f"service:{service}", work_type)) # Send stats update to services topic asyncio.ensure_future(self.mqtt.send(f"service/{service}", f"{block_hash},{work_type}", qos=QOS_0)) response = {'work': work, 'hash': block_hash} logger.debug(f"Request handled for {service} -> {work_type}") return response
async def service_handler(self, data): if not {'hash', 'user', 'api_key'} <= data.keys(): raise InvalidRequest( "Incorrect submission. Required information: user, api_key, hash" ) service, api_key = data['user'], data.pop('api_key') api_key = hash_key(api_key) # Verify API Key db_key = await self.database.hash_get(f"service:{service}", "api_key") if db_key is None: logger.info( f"Received request with non existing service {service}") raise InvalidRequest("Invalid credentials") elif not api_key == db_key: logger.info( f"Received request with non existing api key {api_key} for service {service}" ) raise InvalidRequest("Invalid credentials") async with self.service_throttlers[service]: block_hash = data['hash'] account = data.get('account', None) difficulty = data.get('difficulty', None) multiplier = data.get('multiplier', None) if multiplier: try: multiplier = float(multiplier) except: raise InvalidRequest(f"Multiplier must be a float") difficulty = nanolib.work.derive_work_difficulty( multiplier, base_difficulty=self.base_difficulty) try: block_hash = nanolib.validate_block_hash(block_hash) if account: account = account.replace("xrb_", "nano_") nanolib.validate_account_id(account) if difficulty: nanolib.validate_difficulty(difficulty) except nanolib.InvalidBlockHash: raise InvalidRequest("Invalid hash") except nanolib.InvalidAccount: raise InvalidRequest("Invalid account") except nanolib.InvalidDifficulty: raise InvalidRequest("Difficulty too low") except ValueError: raise InvalidRequest("Invalid difficulty") if difficulty: multiplier = nanolib.work.derive_work_multiplier( difficulty, base_difficulty=self.base_difficulty) if multiplier > config.max_multiplier: raise InvalidRequest( f"Difficulty too high. Maximum: {nanolib.work.derive_work_difficulty(config.max_multiplier, base_difficulty=self.base_difficulty)} ( {config.max_multiplier} multiplier )" ) elif multiplier < 1.0: raise InvalidRequest(f"Difficulty too low. Minimum: 1.0") # Check if hash in redis db, if so return work work = await self.database.get(f"block:{block_hash}") if work is None: # Set incomplete work await self.database.insert_expire(f"block:{block_hash}", DpowServer.WORK_PENDING, config.block_expiry) work_type = "ondemand" if work and work != DpowServer.WORK_PENDING: work_type = "precache" if difficulty: precached_difficulty = nanolib.work.get_work_value( block_hash, work, as_hex=True) precached_multiplier = nanolib.work.derive_work_multiplier( precached_difficulty, base_difficulty=self.base_difficulty) if precached_multiplier < DpowServer.FORCE_ONDEMAND_THRESHOLD * multiplier: # Force ondemand since the precache difficulty is not close enough to requested difficulty work_type = "ondemand" await self.database.insert_expire( f"block:{block_hash}", DpowServer.WORK_PENDING, config.block_expiry) logger.info( f"Forcing ondemand: precached {precached_multiplier} vs requested {multiplier}" ) else: difficulty = precached_difficulty if work_type == "ondemand": # Set work type await self.database.insert_expire(f"work-type:{block_hash}", work_type, config.block_expiry) if block_hash not in self.work_futures: # If account is not provided, service runs a risk of the next work not being precached for # There is still the possibility we recognize the need to precache based on the previous block if account: # Update account frontier asyncio.ensure_future( self.database.insert_expire( f"account:{account}", block_hash, config.account_expiry)) # Set difficulty in DB if provided if difficulty: await self.database.insert_expire( f"block-difficulty:{block_hash}", difficulty, DpowServer.DIFFICULTY_EXPIRY) # Base difficulty if not provided difficulty = difficulty or self.base_difficulty # Create a Future to be set with work when complete self.work_futures[block_hash] = loop.create_future() # Ask for work on demand await self.mqtt.send(f"work/ondemand", f"{block_hash},{difficulty}", qos=QOS_0) timeout = data.get('timeout', 5) try: timeout = int(timeout) if timeout < 1 or timeout > 30: raise except: raise InvalidRequest( "Timeout must be an integer between 1 and 30") try: work = await asyncio.wait_for( self.work_futures[block_hash], timeout=timeout) except asyncio.CancelledError: logger.debug(f"Future was cancelled for {block_hash}") work = await self.database.get(f"block:{block_hash}") if not work: logger.error( "Future was cancelled and work result not set in database" ) raise RetryRequest() except asyncio.TimeoutError: logger.warn( f"Timeout of {timeout} reached for {block_hash}") raise RequestTimeout() finally: try: future = self.work_futures.pop(block_hash) future.cancel() except Exception: pass # logger.info(f"Work received: {work}") else: # logger.info(f"Work in cache: {work}") pass # Increase the work type counter for this service asyncio.ensure_future( self.database.hash_increment(f"service:{service}", work_type)) # Final work validation try: nanolib.validate_work(block_hash, work, difficulty=difficulty or self.base_difficulty) except nanolib.InvalidWork: db_difficulty = await self.database.get( f"block-difficulty:{block_hash}") logger.critical( f"Work could not be validated! Request difficulty {difficulty or self.base_difficulty} result difficulty {nanolib.work.get_work_value(block_hash, work, as_hex=True)} , hash {block_hash} work {work} type {work_type} DB difficulty {db_difficulty}" ) response = {'work': work, 'hash': block_hash} logger.info( f"Request handled for {service} -> {work_type} : {data} : {work}" ) return response
def set_block_hash(self, block_hash): self._block_hash = validate_block_hash(block_hash)
def parse(self, val): try: validate_block_hash(val) return val except ValueError: raise ValueError("is not a valid block hash")
async def service_handler(self, data): error = None timeout = False response = {} try: if {'hash', 'user', 'api_key'} <= data.keys(): service, api_key = data['user'], data['api_key'] api_key = hash_key(api_key) #Verify API Key db_key = await self.database.hash_get(f"service:{service}", "api_key") if db_key is None: logger.info( f"Received request with non existing service {service}" ) error = "Invalid credentials" elif not api_key == db_key: logger.info( f"Received request with non existing api key {api_key} for service {service}" ) error = "Invalid credentials" if not error: block_hash = data['hash'] account = data.get('account', None) difficulty = data.get('difficulty', None) try: block_hash = nanolib.validate_block_hash(block_hash) if account: account = account.replace("xrb_", "nano_") nanolib.validate_account_id(account) if difficulty: nanolib.validate_difficulty(difficulty) except nanolib.InvalidBlockHash: error = "Invalid hash" except nanolib.InvalidAccount: error = "Invalid account" except ValueError: error = "Invalid difficulty" except nanolib.InvalidDifficulty: error = "Difficulty too low" if not error and difficulty: difficulty_multiplier = nanolib.work.derive_work_multiplier( difficulty) if difficulty_multiplier > DpowServer.MAX_DIFFICULTY_MULTIPLIER: error = f"Difficulty too high. Maximum: {nanolib.work.derive_work_difficulty(DpowServer.MAX_DIFFICULTY_MULTIPLIER)} ( {DpowServer.MAX_DIFFICULTY_MULTIPLIER} multiplier )" if not error: #Check if hash in redis db, if so return work work = await self.database.get(f"block:{block_hash}") if work is None: # Set incomplete work await self.database.insert_expire( f"block:{block_hash}", DpowServer.WORK_PENDING, DpowServer.BLOCK_EXPIRY) work_type = "ondemand" if work and work != DpowServer.WORK_PENDING: work_type = "precache" if difficulty: precached_multiplier = nanolib.work.derive_work_multiplier( hex( nanolib.work.get_work_value( block_hash, work))[2:]) if precached_multiplier < DpowServer.FORCE_ONDEMAND_THRESHOLD * difficulty_multiplier: # Force ondemand since the precache difficulty is not close enough to requested difficulty work_type = "ondemand" await self.database.insert( f"block:{block_hash}", DpowServer.WORK_PENDING) logger.warn( f"Forcing ondemand: precached {precached_multiplier} vs requested {difficulty_multiplier}" ) if work_type == "ondemand": # If account is not provided, service runs a risk of the next work not being precached for # There is still the possibility we recognize the need to precache based on the previous block if account: # Update account frontier asyncio.ensure_future( self.database.insert_expire( f"account:{account}", block_hash, DpowServer.ACCOUNT_EXPIRY)) # Create a Future to be set with work when complete self.work_futures[block_hash] = loop.create_future() # Set difficulty in DB if provided if difficulty: await self.database.insert_expire( f"block-difficulty:{block_hash}", difficulty, DpowServer.DIFFICULTY_EXPIRY) # Base difficulty if not provided difficulty = difficulty or nanolib.work.WORK_DIFFICULTY # Ask for work on demand await self.mqtt.send(f"work/ondemand", f"{block_hash},{difficulty}", qos=QOS_0) # Wait on the work for some time timeout = max(int(data.get('timeout', 5)), 1) try: work = await asyncio.wait_for( self.work_futures[block_hash], timeout=timeout) except asyncio.TimeoutError: logger.warn( f"Timeout of {timeout} reached for {block_hash}" ) error = "Timeout reached without work" timeout = True finally: try: future = self.work_futures.pop(block_hash) future.cancel() except: pass # logger.info(f"Work received: {work}") else: # logger.info(f"Work in cache: {work}") pass # Increase the work type counter for this service asyncio.ensure_future( self.database.hash_increment(f"service:{service}", work_type)) else: error = "Incorrect submission. Required information: user, api_key, hash" except Exception as e: logger.critical(f"Unknown exception: {e}") if not error: error = f"Unknown error, please report the following timestamp to the maintainers: {datetime.datetime.now()}" if 'id' in data: response['id'] = data['id'] if error: response['error'] = error if timeout: response['timeout'] = timeout else: response['work'] = work return response