def dispatch_worker(self, contract_address, execution_id, commitments): """ Dispatch worker thread to do the execution and proof generation. Args: contract_address: string, contract address. execution_id: int, the execution id for unique execution of this contract. commitments: commitments released by the given contract. """ if contract_address not in self.worker_pool.keys(): self.worker_pool[contract_address] = {} if execution_id not in self.worker_pool[ contract_address] or self.worker_pool[contract_address].ready( ): worker_thread = self.create_worker(contract_address, execution_id, commitments, self.execution_queue) self.worker_pool[contract_address][execution_id] = worker_thread worker_thread.start() if self.debug: LogUtils.info("Started the worker thread for contract@" + contract_address + " with execution_id: " + str(execution_id)) else: LogUtils.error( "Cannot create more than one worker for the same contract and same execution id at the same" "time.")
def wait_for_verify_and_settle_event(self, contract_id, execution_id): """ Wait for the verifyAndSettle event from contract and return the result. Args: contract_id: string, the unique identifier for the contract, it should be a key anchor for the proving key and contract within the proving key path and contract path. execution_id: int, the identity number for different execution of the same contract. Returns: Boolean, if verification succeeds, then return True, otherwise return False. """ retry_times = 3 while retry_times > 0: try: self.__chain_interface.wait_for_verify_and_settle_event( self.verify_and_settle_event, execution_id, self._put_result_into_queue, self) break except: retry_times = retry_times - 1 LogUtils.error('failed to wait for settle event, retry remaining %d time(s)' % retry_times) if retry_times > 0: traceback.print_exc() LogUtils.info('retrying after 5 seconds') sleep(5) else: raise return self.__verification_result.get()
def _clean_worker_pool(self, contract_address): cleaned_worker_cnt = 0 if contract_address in list(self.worker_pool.keys()): for execution_id in list( self.worker_pool[contract_address].keys()): if not self.worker_pool[contract_address][execution_id].ready( ): LogUtils.error( "Error, worker thread is still live for contract@" + contract_address + ", execution_id: " + str(execution_id) + ". Force killed") self.worker_pool[contract_address][execution_id].stop() self.worker_pool[contract_address][execution_id].join() cleaned_worker_cnt += 1 del self.worker_pool[contract_address][execution_id] del self.worker_pool[contract_address] if self.debug: LogUtils.info('Cleaned ' + str(cleaned_worker_cnt) + ' worker tasks for @' + contract_address)
def _get_options_from_config_file(config_path): parsed_options = {} with open(config_path) as f: config_lines = f.readlines() for line in config_lines: config_fields = line.rstrip().split('=') if len(config_fields) != 2: LogUtils.error("Invalid config item:" + line) exit(0) if config_fields[ 0] == 'listener_poll_interval' or config_fields[ 0] == 'service_port': parsed_options[config_fields[0]] = int(config_fields[1]) elif config_fields[0] == 'use_existing_data' or config_fields[ 0] == 'debug_mode': if config_fields[1] in ['true', 'True', '1']: parsed_options[config_fields[0]] = True else: parsed_options[config_fields[0]] = False else: parsed_options[config_fields[0]] = config_fields[1] return parsed_options
def _generate_options_from_commandline_args(self, input_args): """ Generate the options based on commandline args. Args: input_args: args of ArgumentParser. Returns: """ config_options = {} if input_args.config_file_path is not None and input_args.config_file_path != '': config_file = Path(input_args.config_file_path) if not config_file.is_file(): LogUtils.error("Could not find provided config file:" + input_args.config_file_path) exit(0) config_options = self._get_options_from_config_file( input_args.config_file_path) for arg in vars(input_args): if getattr(input_args, arg) is not None: config_options[arg] = getattr(input_args, arg) for key, value in self.DEFAULT_OPTIONS.items(): if key not in config_options or config_options[key] is None: config_options[key] = value self.options['service_port'] = config_options['service_port'] self.options['proving_key_path'] = config_options[ 'local_proving_key_path'] self.options['code_path'] = config_options['local_code_path'] self.options['working_path'] = config_options['local_working_path'] self.options['zokrates_path'] = config_options['zokrates_binary_path'] self.options['abi_path'] = config_options['local_abi_path'] self.options['chain_config'] = { 'provider_type': config_options['chain_provider_type'], 'abi_path': config_options['local_abi_path'], 'private_key': config_options['account_private_key'], 'public_key': config_options['account_public_key'], 'default_account': config_options['account_public_key'] } self.options['encryption_info'] = { 'type': config_options['encryption_type'], 'rsa_key': config_options['rsa_key_path'] } self.options['poll_interval'] = config_options[ 'listener_poll_interval'] self.options['use_existing_data'] = config_options['use_existing_data'] self.options['debug_mode'] = config_options['debug_mode'] if config_options['chain_provider_type'] == 'http': self.options['chain_config']['http_uri'] = config_options[ 'http_uri'] elif config_options['chain_provider_type'] == 'ipc': self.options['chain_config']['ipc_path'] = config_options[ 'ipc_path'] elif config_options['chain_provider_type'] == 'websocket': self.options['chain_config']['websocket_uri'] = config_options[ 'websocket_uri'] else: raise Exception("Unsupported chain provider type:" + config_options['chain_provider_type'])
def update_worker_status(self, contract_address, status, info=None): if status == TaskStatus.REGISTERING: if contract_address in self.__task_status and \ self.__task_status[contract_address]['status'] != TaskStatus.UNREGISTERED: LogUtils.error("Cannot register again registered contract@" + contract_address) return else: if contract_address not in self.__task_status: LogUtils.error("Cannot update status:" + TaskStatus.get_status_info(status) + " for not registered contract" + '@' + contract_address) return if status == TaskStatus.REGISTERING: self.__task_status[contract_address] = { 'status': status, 'finished_task': 0, 'successful_task': 0, 'progress': 0.0, 'failed_tasks': {}, 'info': '' } elif status == TaskStatus.LISTENING: self.__task_status[contract_address]['status'] = status elif status == TaskStatus.EXECUTING: assert self.__task_status[contract_address]['status'] in { TaskStatus.LISTENING, TaskStatus.EXECUTING } self.__task_status[contract_address]['status'] = status progress = \ 1.0 * self.registered_contract_verification_result_count[contract_address] / \ self.registered_contracts_execution_count[contract_address] self.__task_status[contract_address]['progress'] = progress for failed_execution_id, info in \ self.registered_contract_verification_failed_result_count[contract_address].items(): if failed_execution_id not in self.__task_status[ contract_address]['failed_tasks']: self.__task_status[contract_address]['failed_tasks'][ failed_execution_id] = info elif status == TaskStatus.FINISHED: self.__task_status[contract_address]['finished_task'] += 1 if not self.registered_contract_verification_failed_result_count[ contract_address]: self.__task_status[contract_address]['successful_task'] += 1 self.__task_status[contract_address]['progress'] = 0.0 if contract_address in self.listener_pool and not self.listener_pool[ contract_address].ready(): # Go back to listening status if listener is still up after task is finished. but set the finished task # count incrementally. self.__task_status[contract_address][ 'status'] = TaskStatus.LISTENING else: self.__task_status[contract_address]['status'] = status elif status == TaskStatus.UNREGISTERING: self.__task_status[contract_address]['status'] = status elif status == TaskStatus.UNREGISTERED: self.__task_status[contract_address]['status'] = status elif status == TaskStatus.FAILED_TO_REGISTER: self.__task_status[contract_address]['status'] = status if info is not None: self.__task_status[contract_address]['info'] += (info + '; ') else: raise Exception("Unknown TaskStatus:" + str(status))
def run(self): """ Listening on all the registered contracts' commitment opening event. If the event is triggered, then dispatch an ExecutorWorker thread to finish the proof. """ while not self.should_exit: if not self.event_queue.empty(): # Check the listener setup status first. contract_address, commitments, status, debug_msg = self.handle_event_from_queue( ) if status == EventListenerStatus.SETUP_SUCCEEDED: if self.debug: LogUtils.info( "Status update for contract@" + contract_address + ": " + EventListenerStatus.STATUS_EXPLANATION[status]) self.update_worker_status(contract_address, TaskStatus.LISTENING) elif status == EventListenerStatus.SETUP_FAILED: if self.debug: LogUtils.error( ("Status update for contract@" + contract_address + ": " + EventListenerStatus.STATUS_EXPLANATION[status])) self.update_worker_status(contract_address, TaskStatus.FAILED_TO_REGISTER, debug_msg) elif commitments is not None: if self.debug: LogUtils.info("Received commitment with length [" + str(len(commitments)) + "] from contract@" + contract_address) single_execution_commitment_size = self._get_single_execution_commitment_size( contract_address) if self.debug: LogUtils.info( "single_execution_commitment_size for contract:" + contract_address + " is [" + str(single_execution_commitment_size) + "]") if single_execution_commitment_size is None: LogUtils.error( "No single_execution_commitment_size found!") self.update_worker_status(contract_address, TaskStatus.FINISHED) else: single_execution_commitment_length = single_execution_commitment_size * \ ExecutorConstants.ENCRYPTED_DATA_SIZE if len(commitments ) % single_execution_commitment_length != 0: LogUtils.error( "Invalid commitment length, cannot be divided by single_execution_commitment" " size") self.update_worker_status(contract_address, TaskStatus.FINISHED) else: execution_num = int( len(commitments) / single_execution_commitment_length) self.registered_contracts_execution_count[ contract_address] = execution_num self.update_worker_status(contract_address, TaskStatus.EXECUTING) for execution_id in range(execution_num): single_execution_commitments = \ commitments[execution_id * single_execution_commitment_length: (execution_id + 1) * single_execution_commitment_length] if self.debug: LogUtils.info( "Execute commitment[" + str(single_execution_commitments) + "] from contract@" + contract_address) self.dispatch_worker( contract_address, execution_id, single_execution_commitments) # Wait for 10 second before staring another worker thread for a new task to avoid too # heavy job. time.sleep(10) # Check the result queue. if not self.execution_queue.empty(): execution_result = self.execution_queue.get() if self.debug: LogUtils.info('Received execution result' + str(execution_result)) contract_address = execution_result['contract_address'] self.registered_contract_verification_result_count[ contract_address] += 1 # If there is any failed execution, need to count down. if execution_result[ 'execution_result'] != ExecutionResult.SUCCESS: execution_id = execution_result['execution_id'] self.registered_contract_verification_failed_result_count[contract_address][execution_id] = \ str(execution_result['execution_result']) if execution_result['debug_msg'] is not None: self.registered_contract_verification_failed_result_count[contract_address][execution_id] += \ ' (' + execution_result['debug_msg'] + ')' # always update the worker status with EXECUTING after received each execution id's result. self.update_worker_status(contract_address, TaskStatus.EXECUTING) if self.registered_contract_verification_result_count[contract_address] == \ self.registered_contracts_execution_count[contract_address]: self.update_worker_status(contract_address, TaskStatus.FINISHED) # Once finished, cleanup all the execution counters. self.registered_contract_verification_result_count[ contract_address] = 0 self.registered_contract_verification_failed_result_count[ contract_address].clear() self.registered_contracts_execution_count[ contract_address] = 0 if self.debug: LogUtils.info('Start to clean worker pool for @' + contract_address) self._clean_worker_pool(contract_address) time.sleep(1)