def on_task_init(self, task_id, task): """This method is called before a task is executed. Pass our request context to the task. http://docs.celeryproject.org/en/latest/userguide/tasks.html#context .. note :: The same request object is recycled over and over again. Pyramid does not have correctly mechanisms for having retryable request factory. """ # TODO: How Celery handles retries? # We must not have on-going transaction when worker spawns a task # - otherwise it means init code has left transaction open ensure_transactionless("Thread local TX was ongoing when Celery fired up a new task {}: {}".format(task_id, task)) # Kill thread-local transaction manager, so we minimize issues # with different Celery threading models. # Always use request.tm instead. import transaction transaction.manager = None # Each tasks gets a new request with its own transaction manager and dbsession request = make_routable_request(dbsession=None, registry=self.request.registry) task.request.update(request=request)
def on_task_init(self, task_id, task): """This method is called before a task is executed. Pass our request context to the task. http://docs.celeryproject.org/en/latest/userguide/tasks.html#context .. note :: The same request object is recycled over and over again. Pyramid does not have correctly mechanisms for having retryable request factory. """ # TODO: How Celery handles retries? # We must not have on-going transaction when worker spawns a task # - otherwise it means init code has left transaction open ensure_transactionless( "Thread local TX was ongoing when Celery fired up a new task {}: {}" .format(task_id, task)) # When using celery groups, the request is not available, we set it here. if not hasattr(self, 'request'): self._set_request() # Each tasks gets a new request with its own transaction manager and dbsession request = make_routable_request(dbsession=None, registry=self.request.registry) task.request.update(request=request)
def on_task_init(self, task_id, task): """This method is called before a task is executed. Pass our request context to the task. http://docs.celeryproject.org/en/latest/userguide/tasks.html#context .. note :: The same request object is recycled over and over again. Pyramid does not have correctly mechanisms for having retryable request factory. """ # TODO: How Celery handles retries? # We must not have on-going transaction when worker spawns a task # - otherwise it means init code has left transaction open ensure_transactionless( "Thread local TX was ongoing when Celery fired up a new task {}: {}" .format(task_id, task)) # Kill thread-local transaction manager, so we minimize issues # with different Celery threading models. # Always use request.tm instead. import transaction transaction.manager = None # Each tasks gets a new request with its own transaction manager and dbsession request = make_routable_request(dbsession=None, registry=self.request.registry) task.request.update(request=request)
def finalize_pending_crypto_ops(dbsession, timeout=90): """Wait all pending operations to complete. This assumes you have an Ethereum service running on a background. """ tm = dbsession.transaction_manager ensure_transactionless() # Get list of ops we need to clear @retryable(tm=tm) def fetch_ids(): ops = dbsession.query(CryptoOperation).filter(CryptoOperation.state.in_([CryptoOperationState.waiting, CryptoOperationState.pending,])) ids = [op.id for op in ops] return ids @retryable(tm=tm) def check_op_completion(id): op = dbsession.query(CryptoOperation).get(id) network = op.network if not is_network_alive(network): time.sleep(5) # Give some extra time to recover if not is_network_alive(network): raise RuntimeError("Tried to complete against dead network: {}, op {}".format(network, op)) # Cleared this item if op.completed_at: logger.info("Finished %s", op) return True if op.failed_at: raise RuntimeError("Op failed while waiting: {}".format(op)) return False # Wait until all ops clear correctly deadline = time.time() + timeout ids = fetch_ids() logger.info("Waiting for %d operations to finish", len(ids)) while time.time() < deadline: if not ids: # All ops cleared logger.info("All ops clear") return # Filter out completed operations ids = [id for id in ids if check_op_completion(id) == False] time.sleep(1) raise RuntimeError("Could not confirm all operations")
def process_log(self, contract_address, event_hash: str, change: dict) -> bool: """Return true if there were changes in the database""" event_name, log_data = self.parse_log_data(event_hash, change) if event_name: # handle_event() does its own transaction management ensure_transactionless(transaction_manager=self.tm) return self.handle_event(event_name, contract_address, log_data, change) else: return False
def run_event_cycle(self, cycle_num=None) -> Tuple[int, int]: """Run full event cycle for all operations.""" total_success = total_failure = 0 for func in (self.run_waiting_operations, self.run_listener_operations, self.run_confirmation_updates): # Make sure all transactions are closed before and after running ops # logger.info("Running %s", func) ensure_transactionless("TX management Error. Starting to process {} in event cycle {}".format(func, cycle_num)) success, failure = func() ensure_transactionless() total_success += success total_failure += failure self.update_heartbeat() return total_success, total_failure
def scan_logs(self, from_block, to_block) -> Tuple[int, int]: """Look for new deposits. Assume addresses are hosted wallet smart contract addresses and scan for their event logs. """ # get_monitored_addresses() does its own transaction ensure_transactionless(transaction_manager=self.tm) addresses = self.get_monitored_addresses() if not addresses: return 0, 0 logs = self.client.get_logs(from_block=from_block, to_block=to_block, address=addresses) return self.process_logs(logs, addresses)
def scan_txs(self) -> Tuple[int, int]: """Look for new deposits. Assume addresses are hosted wallet smart contract addresses and scan for their event logs. :return: (performed updates, failed updates) """ updates = failures = 0 current_block = self.client.get_block_number() ensure_transactionless(transaction_manager=self.tm) # Don't repeat update for the same block with self.tm: network = self.dbsession.query(AssetNetwork).get(self.network_id) last_block = network.other_data.get("last_database_confirmation_updater_block") if current_block == last_block: logger.debug("No new blocks, still on %d, skipping confirmation updater", current_block) return 0, 0 ensure_transactionless(transaction_manager=self.tm) txs = list(self.get_monitored_transactions()) logger.debug("Block %d, updating confirmations for %d transactions", current_block, len(txs)) ensure_transactionless(transaction_manager=self.tm) for tx in txs: receipt = self.client.get_transaction_receipt(tx) txinfo = self.client.get_transaction_by_hash(tx) if not receipt: # This withdraw transaction is still in memory pool and has not been mined into a block yet continue try: new_updates, new_failures = self.update_tx(current_block, txinfo, receipt) updates += new_updates failures += new_failures except Exception as e: logger.error("Could not update transaction %s", tx) logger.exception(e) failures += 1 with self.tm: network = self.dbsession.query(AssetNetwork).get(self.network_id) network.other_data["last_database_confirmation_updater_block"] = current_block ensure_transactionless(transaction_manager=self.tm) return updates, failures
def import_all_users(mailgun: Mailgun, dbsession, address: str, tm: Optional[TransactionManager] = None) -> int: """Update Mail subscribers database from Websauna internal database. :return: Imported count """ if tm is None: tm = dbsession.transaction_manager count = 0 for obj in dbsession: print(obj) # Make sure we don't have a transaction in progress as we do batching ourselves ensure_transactionless(transaction_manager=tm) @retryable(tm=tm) def tx1(): """Get user ids on the first transaction.""" return [u.id for u in dbsession.query(User.id).all()] @retryable(tm=tm) def tx_n(id): """For each user, import it in a subsequent transaction.""" u = dbsession.query(User).get(id) if import_subscriber(mailgun, address, u): return 1 else: return 0 user_ids = tx1() for id in user_ids: count += tx_n(id) logger.info("Imported %d users", count) return count
def finalize_pending_crypto_ops(dbsession, timeout=90): """Wait all pending operations to complete. This assumes you have an Ethereum service running on a background. """ tm = dbsession.transaction_manager ensure_transactionless() # Get list of ops we need to clear @retryable(tm=tm) def fetch_ids(): ops = dbsession.query(CryptoOperation).filter( CryptoOperation.state.in_([ CryptoOperationState.waiting, CryptoOperationState.pending, ])) ids = [op.id for op in ops] return ids @retryable(tm=tm) def check_op_completion(id): op = dbsession.query(CryptoOperation).get(id) network = op.network if not is_network_alive(network): time.sleep(5) # Give some extra time to recover if not is_network_alive(network): raise RuntimeError( "Tried to complete against dead network: {}, op {}".format( network, op)) # Cleared this item if op.completed_at: logger.info("Finished %s", op) return True if op.failed_at: raise RuntimeError("Op failed while waiting: {}".format(op)) return False # Wait until all ops clear correctly deadline = time.time() + timeout ids = fetch_ids() logger.info("Waiting for %d operations to finish", len(ids)) while time.time() < deadline: if not ids: # All ops cleared logger.info("All ops clear") return # Filter out completed operations ids = [id for id in ids if check_op_completion(id) == False] time.sleep(1) raise RuntimeError("Could not confirm all operations")