async def client_work_handler(self, topic, block_hash, work, client): # Check if work is needed # - Block is removed from DB once account frontier that contained it is updated # - Block corresponding value is WORK_PENDING if work is pending try: available = await self.database.get(f"block:{block_hash}") if not available or available != BpowServer.WORK_PENDING: return work_type = await self.database.get(f"work-type:{block_hash}") if not work_type: work_type = "precache" # expired ? difficulty = await self.database.get(f"block-difficulty:{block_hash}") try: nanolib.validate_work(block_hash, work, difficulty = difficulty or self.DEFAULT_WORK_DIFFICULTY) except (nanolib.InvalidWork, Exception): # logger.debug(f"Client {client} provided invalid work {work} for {block_hash}") return # Used as a lock - if value already existed, then some other client finished before if not await self.database.insert_if_noexist_expire(f"block-lock:{block_hash}", '1', 5): return # As we've got work now send cancel command to clients asyncio.ensure_future(self.mqtt.send(f"cancel/{work_type}", block_hash, qos=QOS_1)) logger.debug(f"CANCEL: {work_type}/{block_hash}") # Set work result in DB await self.database.insert_expire(f"block:{block_hash}", work, BpowServer.BLOCK_EXPIRY) # Set Future result if in memory try: resulting_work = self.work_futures[block_hash] if not resulting_work.done(): resulting_work.set_result(work) except KeyError: pass except Exception as e: logger.error(f"Unknown error when setting work future: {e}") except Exception as e: logger.error(f"Unknown error when handling block {block_hash} - {e}") if not Validations.validate_address(client): await self.mqtt.send(f"client/{client}", ujson.dumps({"error": f"Work accepted but account {client} is invalid"})) return # Account information and DB update doreward = await self.database.get(f"doreward:{block_hash}") if doreward != "no": await asyncio.gather( self.client_update(client, work_type, block_hash, difficulty or self.DEFAULT_WORK_DIFFICULTY), self.database.increment(f"stats:{work_type}"), self.database.set_add(f"clients", client) )
def solved(self): if not self.work: return False try: validate_work(block_hash=self.work_block_hash, work=self.work, difficulty=self.difficulty) return True except InvalidDifficulty: return False
async def rpc(request): requestjson = await request.json() log.server_logger.info(f"Received request {str(requestjson)}") if 'action' not in requestjson or requestjson['action'] != 'work_generate': return web.HTTPBadRequest(reason='invalid action') elif 'hash' not in requestjson: return web.HTTPBadRequest(reason='Missing hash in request') difficulty = requestjson[ 'difficulty'] if 'difficulty' in requestjson else None reward = requestjson['reward'] if 'reward' in requestjson else True # See if work is in cache try: work = await request.app['redis'].get( f"{requestjson['hash']}:{difficulty}" if difficulty is not None else requestjson['hash']) if work is not None: # Validate test_difficulty = difficulty if difficulty is not None else DPOWClient.NANO_DIFFICULTY_CONST if BPOW_FOR_NANO else 'fffffe0000000000' try: nanolib.validate_work(requestjson['hash'], work, difficulty=test_difficulty) return web.json_response({"work": work}) except nanolib.InvalidWork: pass except Exception: pass # Not in cache, request it from peers try: request.app['busy'] = True # Halts the precaching process respjson = await work_generate(requestjson['hash'], request.app, difficulty=difficulty, reward=reward) if respjson is None: request.app['busy'] = False return web.HTTPInternalServerError(reason="Couldn't generate work") request.app['busy'] = False return web.json_response(respjson) except Exception as e: request.app['busy'] = False log.server_logger.exception(e) return web.HTTPInternalServerError(reason=str(sys.exc_info()))
def attach_precomputed_work(self, block): """ Add the precomputed work to a new block if it is available and valid. In either case, discard the precomputed work afterwards. """ if self.precomputed_work: try: validate_work(block_hash=block.work_block_hash, work=self.precomputed_work.work, difficulty=self.precomputed_work.difficulty) is_valid_work = True except InvalidWork: is_valid_work = False if is_valid_work: block.difficulty = self.precomputed_work.difficulty block.work = self.precomputed_work.work else: logger.warning("Invalid precomputed work found, discarding.") self.precomputed_work = None
async def service_handler(self, data): if not {'hash', 'user', 'api_key'} <= data.keys(): raise InvalidRequest( "Incorrect submission. Required information: user, api_key, hash" ) service, api_key = data['user'], data.pop('api_key') api_key = hash_key(api_key) # Verify API Key db_key = await self.database.hash_get(f"service:{service}", "api_key") if db_key is None: logger.info( f"Received request with non existing service {service}") raise InvalidRequest("Invalid credentials") elif not api_key == db_key: logger.info( f"Received request with non existing api key {api_key} for service {service}" ) raise InvalidRequest("Invalid credentials") async with self.service_throttlers[service]: block_hash = data['hash'] account = data.get('account', None) difficulty = data.get('difficulty', None) multiplier = data.get('multiplier', None) if multiplier: try: multiplier = float(multiplier) except: raise InvalidRequest(f"Multiplier must be a float") difficulty = nanolib.work.derive_work_difficulty( multiplier, base_difficulty=self.base_difficulty) try: block_hash = nanolib.validate_block_hash(block_hash) if account: account = account.replace("xrb_", "nano_") nanolib.validate_account_id(account) if difficulty: nanolib.validate_difficulty(difficulty) except nanolib.InvalidBlockHash: raise InvalidRequest("Invalid hash") except nanolib.InvalidAccount: raise InvalidRequest("Invalid account") except nanolib.InvalidDifficulty: raise InvalidRequest("Difficulty too low") except ValueError: raise InvalidRequest("Invalid difficulty") if difficulty: multiplier = nanolib.work.derive_work_multiplier( difficulty, base_difficulty=self.base_difficulty) if multiplier > config.max_multiplier: raise InvalidRequest( f"Difficulty too high. Maximum: {nanolib.work.derive_work_difficulty(config.max_multiplier, base_difficulty=self.base_difficulty)} ( {config.max_multiplier} multiplier )" ) elif multiplier < 1.0: raise InvalidRequest(f"Difficulty too low. Minimum: 1.0") # Check if hash in redis db, if so return work work = await self.database.get(f"block:{block_hash}") if work is None: # Set incomplete work await self.database.insert_expire(f"block:{block_hash}", DpowServer.WORK_PENDING, config.block_expiry) work_type = "ondemand" if work and work != DpowServer.WORK_PENDING: work_type = "precache" if difficulty: precached_difficulty = nanolib.work.get_work_value( block_hash, work, as_hex=True) precached_multiplier = nanolib.work.derive_work_multiplier( precached_difficulty, base_difficulty=self.base_difficulty) if precached_multiplier < DpowServer.FORCE_ONDEMAND_THRESHOLD * multiplier: # Force ondemand since the precache difficulty is not close enough to requested difficulty work_type = "ondemand" await self.database.insert_expire( f"block:{block_hash}", DpowServer.WORK_PENDING, config.block_expiry) logger.info( f"Forcing ondemand: precached {precached_multiplier} vs requested {multiplier}" ) else: difficulty = precached_difficulty if work_type == "ondemand": # Set work type await self.database.insert_expire(f"work-type:{block_hash}", work_type, config.block_expiry) if block_hash not in self.work_futures: # If account is not provided, service runs a risk of the next work not being precached for # There is still the possibility we recognize the need to precache based on the previous block if account: # Update account frontier asyncio.ensure_future( self.database.insert_expire( f"account:{account}", block_hash, config.account_expiry)) # Set difficulty in DB if provided if difficulty: await self.database.insert_expire( f"block-difficulty:{block_hash}", difficulty, DpowServer.DIFFICULTY_EXPIRY) # Base difficulty if not provided difficulty = difficulty or self.base_difficulty # Create a Future to be set with work when complete self.work_futures[block_hash] = loop.create_future() # Ask for work on demand await self.mqtt.send(f"work/ondemand", f"{block_hash},{difficulty}", qos=QOS_0) timeout = data.get('timeout', 5) try: timeout = int(timeout) if timeout < 1 or timeout > 30: raise except: raise InvalidRequest( "Timeout must be an integer between 1 and 30") try: work = await asyncio.wait_for( self.work_futures[block_hash], timeout=timeout) except asyncio.CancelledError: logger.debug(f"Future was cancelled for {block_hash}") work = await self.database.get(f"block:{block_hash}") if not work: logger.error( "Future was cancelled and work result not set in database" ) raise RetryRequest() except asyncio.TimeoutError: logger.warn( f"Timeout of {timeout} reached for {block_hash}") raise RequestTimeout() finally: try: future = self.work_futures.pop(block_hash) future.cancel() except Exception: pass # logger.info(f"Work received: {work}") else: # logger.info(f"Work in cache: {work}") pass # Increase the work type counter for this service asyncio.ensure_future( self.database.hash_increment(f"service:{service}", work_type)) # Final work validation try: nanolib.validate_work(block_hash, work, difficulty=difficulty or self.base_difficulty) except nanolib.InvalidWork: db_difficulty = await self.database.get( f"block-difficulty:{block_hash}") logger.critical( f"Work could not be validated! Request difficulty {difficulty or self.base_difficulty} result difficulty {nanolib.work.get_work_value(block_hash, work, as_hex=True)} , hash {block_hash} work {work} type {work_type} DB difficulty {db_difficulty}" ) response = {'work': work, 'hash': block_hash} logger.info( f"Request handled for {service} -> {work_type} : {data} : {work}" ) return response
async def client_handler(self, topic, content): try: # Content is expected as CSV block,work,client block_hash, work, client = content.split(',') # logger.info(f"Message {block_hash} {work} {client}") except Exception: # logger.warn(f"Could not parse message: {e}") return # Check if work is needed # - Block is removed from DB once account frontier that contained it is updated # - Block corresponding value is WORK_PENDING if work is pending available = await self.database.get(f"block:{block_hash}") if not available or available != DpowServer.WORK_PENDING: return work_type = await self.database.get(f"work-type:{block_hash}") if not work_type: work_type = "precache" # expired ? difficulty = await self.database.get(f"block-difficulty:{block_hash}") try: nanolib.validate_work(block_hash, work, difficulty=difficulty or self.base_difficulty) except nanolib.InvalidWork: # logger.debug(f"Client {client} provided invalid work {work} for {block_hash}") return except: return # Used as a lock - if value already existed, then some other client finished before if not await self.database.insert_if_noexist_expire( f"block-lock:{block_hash}", '1', 5): return # Set work result in DB await self.database.insert_expire(f"block:{block_hash}", work, config.block_expiry) # Set Future result if in memory try: resulting_work = self.work_futures[block_hash] if not resulting_work.done(): resulting_work.set_result(work) except KeyError: pass except Exception as e: logger.error(f"Unknown error when setting work future: {e}") # As we've got work now send cancel command to clients and do a stats update await self.mqtt.send(f"cancel/{work_type}", block_hash, qos=QOS_0) try: nanolib.validate_account_id(client) except nanolib.InvalidAccount: await self.mqtt.send( f"client/{client}", ujson.dumps({ "error": f"Work accepted but account {client} is invalid" })) return # Account information and DB update await asyncio.gather(self.client_update(client, work_type, block_hash), self.database.increment(f"stats:{work_type}"), self.database.set_add(f"clients", client))
async def client_handler(self, topic, content): try: # Content is expected as CSV block,work,client block_hash, work, client = content.split(',') # We expect result/{work_type} as topic work_type = topic.split('/')[-1] if work_type not in ('precache', 'ondemand'): logger.error( f"Unexpected topic {topic} -> Extracted work_type {work_type}" ) return # logger.info(f"Message {block_hash} {work} {client}") except Exception as e: # logger.warn(f"Could not parse message: {e}") return # Check if work is needed # - Block is removed from DB once account frontier that contained it is updated # - Block corresponding value is WORK_PENDING if work is pending available = await self.database.get(f"block:{block_hash}") if not available or available != DpowServer.WORK_PENDING: return difficulty = await self.database.get(f"block-difficulty:{block_hash}") try: nanolib.validate_work(block_hash, work, difficulty=difficulty or nanolib.work.WORK_DIFFICULTY) except nanolib.InvalidWork: # logger.debug(f"Client {client} provided invalid work {work} for {block_hash}") return # Used as a lock - if value already existed, then some other client finished before if not await self.database.insert_if_noexist_expire( f"block-lock:{block_hash}", '1', 5): return # Set Future result if in memory try: resulting_work = self.work_futures[block_hash] if not resulting_work.done(): resulting_work.set_result(work) except KeyError: pass except Exception as e: logger.error(f"Unknown error when setting work future: {e}") # Account information and DB update await asyncio.gather( self.client_update(client, work_type, block_hash), self.database.insert_expire(f"block:{block_hash}", work, DpowServer.BLOCK_EXPIRY)) # As we've got work now send cancel command to clients and do a stats update # No need to wait on this here await asyncio.gather( self.mqtt.send(f"cancel/{work_type}", block_hash, qos=QOS_1), self.database.increment(f"stats:{work_type}"), self.database.set_add(f"clients", client))