示例#1
0
 def test_singleton(self):
     self.assertEqual(0, Daemon.objects.all().count())
     DaemonFactory()
     d1 = Daemon.get_solo()
     self.assertEqual(1, Daemon.objects.all().count())
     d2 = Daemon.get_solo()
     self.assertEqual(1, Daemon.objects.all().count())
     self.assertEqual(d1.pk, d2.pk)
    def test_reorg_centralized_oracle(self):
        # initial transaction, to set reorg init
        accounts = self.web3.eth.accounts
        self.web3.eth.sendTransaction({
            'from': accounts[0],
            'to': accounts[1],
            'value': 5000000
        })
        self.assertEqual(0, Block.objects.all().count())
        self.assertEqual(CentralizedOracle().length(), 0)
        self.assertEqual(1, self.web3.eth.blockNumber)

        # Create centralized oracle
        tx_hash = self.centralized_oracle_factory.transact(
            self.tx_data).createCentralizedOracle(
                'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
        self.assertIsNotNone(tx_hash)
        self.listener_under_test.execute()
        self.assertEqual(CentralizedOracle().length(), 1)
        self.assertEqual(2, Daemon.get_solo().block_number)
        self.assertEqual(2, Block.objects.all().count())
        self.assertEqual(2, self.web3.eth.blockNumber)

        # Reset blockchain (simulates reorg)
        self.provider.server.shutdown()
        self.provider.server.server_close()
        self.provider = TestRPCProvider()
        web3_service = Web3Service(self.provider)
        self.web3 = web3_service.web3
        self.assertEqual(0, self.web3.eth.blockNumber)

        self.web3.eth.sendTransaction({
            'from': accounts[0],
            'to': accounts[1],
            'value': 1000000
        })
        self.web3.eth.sendTransaction({
            'from': accounts[0],
            'to': accounts[1],
            'value': 1000000
        })
        self.web3.eth.sendTransaction({
            'from': accounts[0],
            'to': accounts[1],
            'value': 1000000
        })
        self.assertEqual(2, self.web3.eth.blockNumber)

        # force block_hash change (cannot recreate a real reorg with python testrpc)
        block_hash = remove_0x_head(self.web3.eth.getBlock(1)['hash'])
        Block.objects.filter(block_number=1).update(block_hash=block_hash)

        self.listener_under_test.execute()
        self.assertEqual(CentralizedOracle().length(), 0)
        self.assertEqual(2, Daemon.get_solo().block_number)
        self.assertEqual(2, Block.objects.all().count())
示例#3
0
    def test_create_centralized_oracle(self):
        self.assertEqual(len(centralized_oracles), 0)
        self.assertEqual(0, Daemon.get_solo().block_number)

        # Create centralized oracle
        tx_hash = self.centralized_oracle_factory.transact(
            self.tx_data).createCentralizedOracle(
                'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
        self.assertIsNotNone(tx_hash)
        self.listener_under_test.execute()
        self.assertEqual(len(centralized_oracles), 1)
        self.assertEqual(1, Daemon.get_solo().block_number)
示例#4
0
    def test_deadlock_checker(self):
        daemon = DaemonFactory(listener_lock=True)
        # sleep process to simulate old Daemon instance
        sleep(2)
        deadlock_checker(2000)  # 2 seconds
        daemon_test = Daemon.get_solo()
        # Test deadlock detection
        self.assertEquals(daemon_test.listener_lock, False)

        daemon.listener_lock = True
        daemon.save()
        deadlock_checker()
        daemon_test = Daemon.get_solo()
        self.assertEquals(daemon_test.listener_lock, True)
示例#5
0
    def handle(self, *args, start_block_number, **options):
        PeriodicTask.objects.filter(
            task='django_eth_events.tasks.event_listener').delete()
        time.sleep(5)
        call_command('cleandatabase')
        call_command('resync_daemon')
        self.stdout.write(
            self.style.SUCCESS('Making sure no process was running'))
        time.sleep(5)
        call_command('cleandatabase')
        call_command('resync_daemon')

        if start_block_number is not None:
            Block.objects.all().delete()

            daemon = Daemon.get_solo()
            daemon.block_number = start_block_number - 1
            daemon.save()

            self.stdout.write(
                self.style.SUCCESS('Restart processing at block {}'.format(
                    start_block_number)))

        # auto-create celery task
        interval = IntervalSchedule(every=5, period='seconds')
        interval.save()
        if not PeriodicTask.objects.filter(
                task='django_eth_events.tasks.event_listener').count():
            PeriodicTask.objects.create(
                name='Event Listener',
                task='django_eth_events.tasks.event_listener',
                interval=interval)
            self.stdout.write(
                self.style.SUCCESS(
                    'Created Periodic Task for Event Listener every 5s'))
    def rollback(self, block_number):
        # get all blocks to rollback
        blocks = Block.objects.filter(
            block_number__gt=block_number).order_by('-block_number')
        logger.info('rolling back {} blocks, until block number {}'.format(
            blocks.count(), block_number))
        for block in blocks:
            decoded_logs = loads(block.decoded_logs)
            logger.info('rolling back {} block, {} logs'.format(
                block.block_number, len(decoded_logs)))
            if len(decoded_logs):
                # We loop decoded logs on inverse order because there might be dependencies inside the same block
                # And must be processed from last applied to first applied
                for log in reversed(decoded_logs):
                    event = log['event']
                    block_info = {
                        'hash': block.block_hash,
                        'number': block.block_number,
                        'timestamp': block.timestamp
                    }
                    self.revert_events(log['event_receiver'], event,
                                       block_info)

        # Remove backups from future blocks (old chain)
        blocks.delete()

        # set daemon block_number to current one
        daemon = Daemon.get_solo()
        daemon.block_number = block_number
        daemon.save()
    def test_create_centralized_oracle(self):
        self.assertEqual(CentralizedOracle().length(), 0)
        self.assertEqual(0, Daemon.get_solo().block_number)
        self.assertEqual(0, Block.objects.all().count())

        # Create centralized oracle
        tx_hash = self.centralized_oracle_factory.transact(
            self.tx_data).createCentralizedOracle(
                'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
        self.assertIsNotNone(tx_hash)
        self.listener_under_test.execute()
        self.assertEqual(CentralizedOracle().length(), 1)
        self.assertEqual(1, Daemon.get_solo().block_number)

        # Check backup
        self.assertEqual(1, Block.objects.all().count())
        block = Block.objects.get(block_number=1)
        self.assertEqual(1, len(loads(block.decoded_logs)))
示例#8
0
    def get_lock(self):
        """
        :return: True if lock can be acquired, False otherwise
        """
        with transaction.atomic():
            daemon = Daemon.get_solo()
            locked = daemon.listener_lock

            if not locked:
                self.stdout.write(self.style.SUCCESS('LOCK acquired by db_dump task'))
                daemon.listener_lock = True
                daemon.save()
                return True
            else:
                self.stdout.write(self.style.SUCCESS('LOCK already being imported by another worker'))
                return False
示例#9
0
def deadlock_checker(lock_interval=60000):
    """
    Verifies whether celery tasks over the Daemon table are deadlocked.
    :param lock_interval: milliseconds
    """
    try:
        logger.info("Deadlock checker, lock_interval %d" % lock_interval)
        daemon = Daemon.get_solo()
        valid_interval = datetime.now() - timedelta(milliseconds=lock_interval)
        if daemon.modified < valid_interval and daemon.listener_lock is True:
            # daemon is deadlocked
            logger.info('Found deadlocked Daemon task, block number %d' %
                        daemon.block_number)
            with transaction.atomic():
                daemon.listener_lock = False
                daemon.save()
    except Exception as err:
        logger.error(str(err))
        send_email(traceback.format_exc())
示例#10
0
文件: setup.py 项目: jzvelc/gnosisdb
    def handle(self, *args, **options):
        EventDescription.objects.all().delete()
        Block.objects.all().delete()
        daemon = Daemon.get_solo()
        daemon.block_number = 0
        daemon.last_error_block_number = 0
        daemon.save()
        self.stdout.write(self.style.SUCCESS('DB Successfully cleaned.'))

        # auto-create celery task
        interval = IntervalSchedule(every=5, period='seconds')
        interval.save()
        if not PeriodicTask.objects.filter(
                task='django_eth_events.tasks.event_listener').count():
            PeriodicTask.objects.create(
                name='Event Listener',
                task='django_eth_events.tasks.event_listener',
                interval=interval)
            self.stdout.write(
                self.style.SUCCESS(
                    'Created Periodic Task for Event Listener every 5s.'))
示例#11
0
def event_listener():
    with cache_lock('eth_events', oid) as acquired:
        if acquired:
            bot = EventListener()
            try:
                bot.execute()
            except Exception as err:
                logger.error(str(err))
                daemon = Daemon.get_solo()
                # get last error block number database
                last_error_block_number = daemon.last_error_block_number
                # get current block number from database
                current_block_number = daemon.block_number
                logger.info(
                    "Current block number: {}, Last error block number: {}".
                    format(current_block_number, last_error_block_number))
                if last_error_block_number < current_block_number:
                    send_email(err.message)
                    # save block number into cache
                    daemon.last_error_block_number = current_block_number
                    daemon.save()
示例#12
0
    def get_last_mined_blocks(self):
        """
        Returns the block numbers of blocks mined since last event_listener execution
        :return: [int]
        """
        daemon = Daemon.get_solo()
        current = self.web3.eth.blockNumber

        logger.info('blocks mined, daemon: {} current: {}'.format(
            daemon.block_number, current))
        if daemon.block_number < current:
            max_blocks_to_process = int(
                getattr(settings, 'ETH_PROCESS_BLOCKS', '10000'))
            if current - daemon.block_number > max_blocks_to_process:
                blocks_to_update = range(
                    daemon.block_number + 1,
                    daemon.block_number + max_blocks_to_process)
            else:
                blocks_to_update = range(daemon.block_number + 1, current + 1)
            return blocks_to_update
        else:
            return []
示例#13
0
    def test_event_listener(self):
        daemon_factory = DaemonFactory(listener_lock=False)
        # Number of blocks analyzed by Event Listener
        n_blocks = Block.objects.all().count()
        # Create centralized oracle factory contract
        centralized_contract_factory = self.web3.eth.contract(
            abi=centralized_oracle_abi, bytecode=centralized_oracle_bytecode)
        tx_hash = centralized_contract_factory.deploy()
        centralized_oracle_factory_address = self.web3.eth.getTransactionReceipt(
            tx_hash).get('contractAddress')
        centralized_oracle_factory = self.web3.eth.contract(
            centralized_oracle_factory_address, abi=centralized_oracle_abi)

        # Event receiver
        centralized_event_receiver = {
            'NAME': 'Centralized Oracle Factory',
            'EVENT_ABI': centralized_oracle_abi,
            'EVENT_DATA_RECEIVER':
            'django_eth_events.tests.test_celery.DummyEventReceiver',
            'ADDRESSES': [centralized_oracle_factory_address[2::]]
        }

        self.event_receivers.append(centralized_event_receiver)
        setattr(settings, 'ETH_EVENTS', self.event_receivers)

        # Start Celery Task
        event_listener()
        # Create centralized oracle
        centralized_oracle_factory.transact(
            self.tx_data).createCentralizedOracle(
                'QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG')
        # Run event listener again
        event_listener()
        # Do checks
        daemon = Daemon.get_solo()
        self.assertEquals(daemon.block_number, daemon_factory.block_number + 1)
        self.assertEquals(Block.objects.all().count(), n_blocks + 1)
        self.assertFalse(daemon.listener_lock)
示例#14
0
 def next_block(self):
     return Daemon.get_solo().block_number
示例#15
0
    def execute(self):
        # Check daemon status
        daemon = Daemon.get_solo()
        if daemon.status == 'EXECUTING':
            # Check reorg
            had_reorg, reorg_block_number = check_reorg(provider=self.provider)

            if had_reorg:
                self.rollback(reorg_block_number)

            # update block number
            # get blocks and decode logs
            last_mined_blocks = self.get_last_mined_blocks()
            if len(last_mined_blocks):
                logger.info('{} blocks mined from {} to {}'.format(
                    len(last_mined_blocks), last_mined_blocks[0],
                    last_mined_blocks[-1]))
            else:
                logger.info('no blocks mined')
            for block in last_mined_blocks:
                # first get un-decoded logs and the block info
                logs, block_info = self.get_logs(block)
                logger.info('got {} logs in block {}'.format(
                    len(logs), block_info['number']))

                ###########################
                # Decode logs #
                ###########################
                if len(logs):
                    for contract in self.contract_map:
                        # Add ABI
                        self.decoder.add_abi(contract['EVENT_ABI'])

                        # Get watched contract addresses
                        watched_addresses = self.get_watched_contract_addresses(
                            contract)

                        # Filter logs by relevant addresses
                        target_logs = [
                            log for log in logs
                            if normalize_address_without_0x(log['address']) in
                            watched_addresses
                        ]

                        logger.info('{} logs'.format(len(target_logs)))

                        # Decode logs
                        decoded_logs = self.decoder.decode_logs(target_logs)

                        logger.info('{} decoded logs'.format(
                            len(decoded_logs)))

                        if len(decoded_logs):
                            for log in decoded_logs:
                                # Save events
                                instance = self.save_event(
                                    contract, log, block_info)

                                # Only valid data is saved in backup
                                if instance is not None:
                                    max_blocks_to_backup = int(
                                        getattr(settings, 'ETH_BACKUP_BLOCKS',
                                                '100'))
                                    if (block - last_mined_blocks[-1]
                                        ) < max_blocks_to_backup:
                                        self.backup(
                                            remove_0x_head(block_info['hash']),
                                            block_info['number'],
                                            block_info['timestamp'], log,
                                            contract['EVENT_DATA_RECEIVER'])

                # TODO refactor to be faster
                daemon = Daemon.get_solo()
                daemon.block_number = block
                daemon.save()

                max_blocks_to_backup = int(
                    getattr(settings, 'ETH_BACKUP_BLOCKS', '100'))
                if (block - last_mined_blocks[-1]) < max_blocks_to_backup:
                    # backup block if haven't been backed up (no logs, but we saved the hash for reorg checking anyway)
                    Block.objects.get_or_create(
                        block_number=block,
                        block_hash=remove_0x_head(block_info['hash']),
                        defaults={'timestamp': block_info['timestamp']})

            if len(last_mined_blocks):
                # Update block number after execution
                logger.info('update daemon block_number={}'.format(
                    last_mined_blocks[-1]))
                self.update_block_number(last_mined_blocks[-1])

                # Remove older backups
                self.clean_old_backups()
示例#16
0
    def clean_old_backups(self):
        max_blocks_backup = int(getattr(settings, 'ETH_BACKUP_BLOCKS', '100'))
        current_block = Daemon.get_solo().block_number

        Block.objects.filter(block_number__lt=current_block -
                             max_blocks_backup).delete()
示例#17
0
def check_reorg(provider=None):
    """
    Checks for reorgs to happening
    :param provider: optional Web3 provider instance
    :return: Tuple (True|False, None|Block number)
    :raise NetworkReorgException
    :raise UnknownBlockReorg
    :raise NoBackup
    """
    web3 = None
    saved_block_number = Daemon.get_solo().block_number

    try:
        web3 = Web3Service(provider=provider).web3
        if web3.isConnected():
            current_block_number = web3.eth.blockNumber
        else:
            raise Exception()
    except:
        raise NetworkReorgException(
            'Unable to get block number from current node. Check the node is up and running.'
        )

    if current_block_number >= saved_block_number:
        # check last saved block hash haven't changed
        blocks = Block.objects.all().order_by('-block_number')
        if blocks.count():
            # check if there was reorg
            for block in blocks:
                try:
                    node_block_hash = remove_0x_head(
                        web3.eth.getBlock(block.block_number)['hash'])
                except:
                    raise UnknownBlockReorg
                if block.block_hash == node_block_hash:
                    # if is last saved block, no reorg
                    if block.block_number == saved_block_number:
                        return False, None
                    else:
                        # there was a reorg from a saved block, we can do rollback
                        return True, block.block_number

            # Exception, no saved history enough
            errors = {
                'saved_block_number': saved_block_number,
                'current_block_number': current_block_number,
                'las_saved_block_hash': blocks[0].block_hash
            }
            raise NoBackup(
                message='Not enough backup blocks, reorg cannot be rollback',
                errors=errors)

        else:
            # No backup data
            return False, None
    else:
        # check last common block hash haven't changed
        blocks = Block.objects.filter(
            block_number__lte=current_block_number).order_by('-block_number')
        if blocks.count():
            # check if there was reorg
            for block in blocks:
                try:
                    node_block_hash = remove_0x_head(
                        web3.eth.getBlock(block.block_number)['hash'])
                except:
                    raise UnknownBlockReorg
                if block.block_hash == node_block_hash:
                    # if is last saved block, no reorg
                    if block.block_number == saved_block_number:
                        return False, None
                    else:
                        # there was a reorg from a saved block, we can do rollback
                        return True, block.block_number

            # Exception, no saved history enough
            errors = {
                'saved_block_number': saved_block_number,
                'current_block_number': current_block_number,
                'las_saved_block_hash': blocks[0].block_hash
            }
            raise NoBackup(
                message='Not enough backup blocks, reorg cannot be rollback',
                errors=errors)
        else:
            # No backup data
            return False, None
示例#18
0
    def handle(self, *args, start_block_number, **options):
        PeriodicTask.objects.filter(task__in=[
            'django_eth_events.tasks.event_listener',
            'tradingdb.relationaldb.tasks.calculate_scoreboard',
            'tradingdb.relationaldb.tasks.issue_tokens',
            'tradingdb.relationaldb.tasks.clear_issued_tokens_flag',
        ]).delete()
        time.sleep(5)
        call_command('cleandatabase')
        call_command('resync_daemon')
        self.stdout.write(
            self.style.SUCCESS('Making sure no process was running'))

        time.sleep(5)
        call_command('cleandatabase')
        call_command('resync_daemon')

        if start_block_number is not None:
            Block.objects.all().delete()

            daemon = Daemon.get_solo()
            daemon.block_number = start_block_number - 1
            daemon.save()

            self.stdout.write(
                self.style.SUCCESS('Restart processing at block {}'.format(
                    start_block_number)))

        # auto-create celery task
        five_seconds_interval = IntervalSchedule(every=5, period='seconds')
        five_seconds_interval.save()

        ten_minutes_interval = IntervalSchedule(every=10, period='minutes')
        ten_minutes_interval.save()

        one_minute_interval = IntervalSchedule(every=1, period='minutes')
        one_minute_interval.save()

        one_day_interval = IntervalSchedule(every=1, period='days')
        one_day_interval.save()

        PeriodicTask.objects.create(
            name='Event Listener',
            task='django_eth_events.tasks.event_listener',
            interval=five_seconds_interval,
        )
        self.stdout.write(
            self.style.SUCCESS(
                'Created Periodic Task for Event Listener every 5 seconds'))

        PeriodicTask.objects.create(
            name='Scoreboard Calculation',
            task='tradingdb.relationaldb.tasks.calculate_scoreboard',
            interval=ten_minutes_interval,
        )
        self.stdout.write(
            self.style.SUCCESS(
                'Created Periodic Task for Scoreboard every 10 minutes'))

        PeriodicTask.objects.create(
            name='Token issuance',
            task='tradingdb.relationaldb.tasks.issue_tokens',
            interval=one_minute_interval,
        )
        self.stdout.write(
            self.style.SUCCESS(
                'Created Periodic Task for Token Issuance every minute'))

        PeriodicTask.objects.create(
            name='Token issuance flag clear',
            task='tradingdb.relationaldb.tasks.clear_issued_tokens_flag',
            interval=one_day_interval,
        )
        self.stdout.write(
            self.style.SUCCESS(
                'Created Periodic Task for Token Issuance flag clear every day'
            ))

        TournamentWhitelistedCreator.objects.create(
            address=normalize_address_without_0x(
                settings.ETHEREUM_DEFAULT_ACCOUNT),
            enabled=True)
        self.stdout.write(
            self.style.SUCCESS('Added User {} to Tournament Whitelist'.format(
                settings.ETHEREUM_DEFAULT_ACCOUNT)))
示例#19
0
 def update_block_number(self, block_number):
     daemon = Daemon.get_solo()
     daemon.block_number = block_number
     daemon.save()
示例#20
0
def event_listener():
    with transaction.atomic():
        daemon = Daemon.objects.select_for_update().first()
        if not daemon:
            logger.debug('Daemon singleton row was not created, creating')
            daemon = Daemon.get_solo()
        locked = daemon.listener_lock
        if not locked:
            logger.debug('LOCK acquired')
            daemon.listener_lock = True
            daemon.save()
    if locked:
        logger.debug('LOCK already being imported by another worker')
    else:
        bot = EventListener()
        try:
            bot.execute()
        except UnknownTransaction:
            logger.error('Unknown Transaction hash, might be a reorg')
        except UnknownBlock:
            logger.error('Unknown Block hash, might be a reorg')
        except UnknownBlockReorg:
            logger.error('Unknown Block hash, might be a reorg')
        except NetworkReorgException as nrex:
            logger.error(
                'An error occurred while calling ethereum node on reorgs checker. %s'
                % nrex.message)
        except Exception as err:
            # Not halting system for connection error cases
            if hasattr(err, 'errno') and (err.errno == errno.ECONNABORTED
                                          or err.errno == errno.ECONNRESET
                                          or err.errno == errno.ECONNREFUSED):
                logger.error(
                    "An error has occurred, errno: {}, trace: {}".format(
                        err.errno, str(err)))
            elif (isinstance(err, HTTPError) or isinstance(err, PoolError)
                  or isinstance(err, LocationValueError)
                  or isinstance(err, RequestException)):
                logger.error(
                    "An error has occurred, errno: {}, trace: {}".format(
                        err.errno, str(err)))
            else:
                logger.error("Halting system due to error {}".format(str(err)))
                daemon = Daemon.get_solo()
                daemon.status = 'HALTED'
                daemon.save()
                # get last error block number database
                last_error_block_number = daemon.last_error_block_number
                # get current block number from database
                current_block_number = daemon.block_number
                logger.info(
                    "Current block number: {}, Last error block number: {}".
                    format(current_block_number, last_error_block_number))
                if last_error_block_number < current_block_number:
                    send_email(traceback.format_exc())
                    # save block number into cache
                    daemon.last_error_block_number = current_block_number
                    daemon.save()
        finally:
            logger.info('Releasing LOCK')
            with transaction.atomic():
                daemon = Daemon.objects.select_for_update().first()
                daemon.listener_lock = False
                daemon.save()